| /****************************************************************************** |
| * emulate.c |
| * |
| * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. |
| * |
| * Copyright (c) 2005 Keir Fraser |
| * |
| * Linux coding style, mod r/m decoder, segment base fixes, real-mode |
| * privileged instructions: |
| * |
| * Copyright (C) 2006 Qumranet |
| * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| * |
| * Avi Kivity <avi@qumranet.com> |
| * Yaniv Kamay <yaniv@qumranet.com> |
| * |
| * This work is licensed under the terms of the GNU GPL, version 2. See |
| * the COPYING file in the top-level directory. |
| * |
| * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 |
| */ |
| |
| #include <linux/kvm_host.h> |
| #include "kvm_cache_regs.h" |
| #include <linux/module.h> |
| #include <asm/kvm_emulate.h> |
| |
| #include "x86.h" |
| #include "tss.h" |
| |
| /* |
| * Operand types |
| */ |
| #define OpNone 0ull |
| #define OpImplicit 1ull /* No generic decode */ |
| #define OpReg 2ull /* Register */ |
| #define OpMem 3ull /* Memory */ |
| #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ |
| #define OpDI 5ull /* ES:DI/EDI/RDI */ |
| #define OpMem64 6ull /* Memory, 64-bit */ |
| #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ |
| #define OpDX 8ull /* DX register */ |
| #define OpCL 9ull /* CL register (for shifts) */ |
| #define OpImmByte 10ull /* 8-bit sign extended immediate */ |
| #define OpOne 11ull /* Implied 1 */ |
| #define OpImm 12ull /* Sign extended immediate */ |
| #define OpMem16 13ull /* Memory operand (16-bit). */ |
| #define OpMem32 14ull /* Memory operand (32-bit). */ |
| #define OpImmU 15ull /* Immediate operand, zero extended */ |
| #define OpSI 16ull /* SI/ESI/RSI */ |
| #define OpImmFAddr 17ull /* Immediate far address */ |
| #define OpMemFAddr 18ull /* Far address in memory */ |
| #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ |
| #define OpES 20ull /* ES */ |
| #define OpCS 21ull /* CS */ |
| #define OpSS 22ull /* SS */ |
| #define OpDS 23ull /* DS */ |
| #define OpFS 24ull /* FS */ |
| #define OpGS 25ull /* GS */ |
| #define OpMem8 26ull /* 8-bit zero extended memory operand */ |
| |
| #define OpBits 5 /* Width of operand field */ |
| #define OpMask ((1ull << OpBits) - 1) |
| |
| /* |
| * Opcode effective-address decode tables. |
| * Note that we only emulate instructions that have at least one memory |
| * operand (excluding implicit stack references). We assume that stack |
| * references and instruction fetches will never occur in special memory |
| * areas that require emulation. So, for example, 'mov <imm>,<reg>' need |
| * not be handled. |
| */ |
| |
| /* Operand sizes: 8-bit operands or specified/overridden size. */ |
| #define ByteOp (1<<0) /* 8-bit operands. */ |
| /* Destination operand type. */ |
| #define DstShift 1 |
| #define ImplicitOps (OpImplicit << DstShift) |
| #define DstReg (OpReg << DstShift) |
| #define DstMem (OpMem << DstShift) |
| #define DstAcc (OpAcc << DstShift) |
| #define DstDI (OpDI << DstShift) |
| #define DstMem64 (OpMem64 << DstShift) |
| #define DstImmUByte (OpImmUByte << DstShift) |
| #define DstDX (OpDX << DstShift) |
| #define DstMask (OpMask << DstShift) |
| /* Source operand type. */ |
| #define SrcShift 6 |
| #define SrcNone (OpNone << SrcShift) |
| #define SrcReg (OpReg << SrcShift) |
| #define SrcMem (OpMem << SrcShift) |
| #define SrcMem16 (OpMem16 << SrcShift) |
| #define SrcMem32 (OpMem32 << SrcShift) |
| #define SrcImm (OpImm << SrcShift) |
| #define SrcImmByte (OpImmByte << SrcShift) |
| #define SrcOne (OpOne << SrcShift) |
| #define SrcImmUByte (OpImmUByte << SrcShift) |
| #define SrcImmU (OpImmU << SrcShift) |
| #define SrcSI (OpSI << SrcShift) |
| #define SrcImmFAddr (OpImmFAddr << SrcShift) |
| #define SrcMemFAddr (OpMemFAddr << SrcShift) |
| #define SrcAcc (OpAcc << SrcShift) |
| #define SrcImmU16 (OpImmU16 << SrcShift) |
| #define SrcDX (OpDX << SrcShift) |
| #define SrcMem8 (OpMem8 << SrcShift) |
| #define SrcMask (OpMask << SrcShift) |
| #define BitOp (1<<11) |
| #define MemAbs (1<<12) /* Memory operand is absolute displacement */ |
| #define String (1<<13) /* String instruction (rep capable) */ |
| #define Stack (1<<14) /* Stack instruction (push/pop) */ |
| #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ |
| #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ |
| #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ |
| #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ |
| #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ |
| #define Sse (1<<18) /* SSE Vector instruction */ |
| /* Generic ModRM decode. */ |
| #define ModRM (1<<19) |
| /* Destination is only written; never read. */ |
| #define Mov (1<<20) |
| /* Misc flags */ |
| #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ |
| #define VendorSpecific (1<<22) /* Vendor specific instruction */ |
| #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ |
| #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ |
| #define Undefined (1<<25) /* No Such Instruction */ |
| #define Lock (1<<26) /* lock prefix is allowed for the instruction */ |
| #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ |
| #define No64 (1<<28) |
| #define PageTable (1 << 29) /* instruction used to write page table */ |
| /* Source 2 operand type */ |
| #define Src2Shift (30) |
| #define Src2None (OpNone << Src2Shift) |
| #define Src2CL (OpCL << Src2Shift) |
| #define Src2ImmByte (OpImmByte << Src2Shift) |
| #define Src2One (OpOne << Src2Shift) |
| #define Src2Imm (OpImm << Src2Shift) |
| #define Src2ES (OpES << Src2Shift) |
| #define Src2CS (OpCS << Src2Shift) |
| #define Src2SS (OpSS << Src2Shift) |
| #define Src2DS (OpDS << Src2Shift) |
| #define Src2FS (OpFS << Src2Shift) |
| #define Src2GS (OpGS << Src2Shift) |
| #define Src2Mask (OpMask << Src2Shift) |
| |
| #define X2(x...) x, x |
| #define X3(x...) X2(x), x |
| #define X4(x...) X2(x), X2(x) |
| #define X5(x...) X4(x), x |
| #define X6(x...) X4(x), X2(x) |
| #define X7(x...) X4(x), X3(x) |
| #define X8(x...) X4(x), X4(x) |
| #define X16(x...) X8(x), X8(x) |
| |
| struct opcode { |
| u64 flags : 56; |
| u64 intercept : 8; |
| union { |
| int (*execute)(struct x86_emulate_ctxt *ctxt); |
| struct opcode *group; |
| struct group_dual *gdual; |
| struct gprefix *gprefix; |
| } u; |
| int (*check_perm)(struct x86_emulate_ctxt *ctxt); |
| }; |
| |
| struct group_dual { |
| struct opcode mod012[8]; |
| struct opcode mod3[8]; |
| }; |
| |
| struct gprefix { |
| struct opcode pfx_no; |
| struct opcode pfx_66; |
| struct opcode pfx_f2; |
| struct opcode pfx_f3; |
| }; |
| |
| /* EFLAGS bit definitions. */ |
| #define EFLG_ID (1<<21) |
| #define EFLG_VIP (1<<20) |
| #define EFLG_VIF (1<<19) |
| #define EFLG_AC (1<<18) |
| #define EFLG_VM (1<<17) |
| #define EFLG_RF (1<<16) |
| #define EFLG_IOPL (3<<12) |
| #define EFLG_NT (1<<14) |
| #define EFLG_OF (1<<11) |
| #define EFLG_DF (1<<10) |
| #define EFLG_IF (1<<9) |
| #define EFLG_TF (1<<8) |
| #define EFLG_SF (1<<7) |
| #define EFLG_ZF (1<<6) |
| #define EFLG_AF (1<<4) |
| #define EFLG_PF (1<<2) |
| #define EFLG_CF (1<<0) |
| |
| #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a |
| #define EFLG_RESERVED_ONE_MASK 2 |
| |
| /* |
| * Instruction emulation: |
| * Most instructions are emulated directly via a fragment of inline assembly |
| * code. This allows us to save/restore EFLAGS and thus very easily pick up |
| * any modified flags. |
| */ |
| |
| #if defined(CONFIG_X86_64) |
| #define _LO32 "k" /* force 32-bit operand */ |
| #define _STK "%%rsp" /* stack pointer */ |
| #elif defined(__i386__) |
| #define _LO32 "" /* force 32-bit operand */ |
| #define _STK "%%esp" /* stack pointer */ |
| #endif |
| |
| /* |
| * These EFLAGS bits are restored from saved value during emulation, and |
| * any changes are written back to the saved value after emulation. |
| */ |
| #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) |
| |
| /* Before executing instruction: restore necessary bits in EFLAGS. */ |
| #define _PRE_EFLAGS(_sav, _msk, _tmp) \ |
| /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \ |
| "movl %"_sav",%"_LO32 _tmp"; " \ |
| "push %"_tmp"; " \ |
| "push %"_tmp"; " \ |
| "movl %"_msk",%"_LO32 _tmp"; " \ |
| "andl %"_LO32 _tmp",("_STK"); " \ |
| "pushf; " \ |
| "notl %"_LO32 _tmp"; " \ |
| "andl %"_LO32 _tmp",("_STK"); " \ |
| "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \ |
| "pop %"_tmp"; " \ |
| "orl %"_LO32 _tmp",("_STK"); " \ |
| "popf; " \ |
| "pop %"_sav"; " |
| |
| /* After executing instruction: write-back necessary bits in EFLAGS. */ |
| #define _POST_EFLAGS(_sav, _msk, _tmp) \ |
| /* _sav |= EFLAGS & _msk; */ \ |
| "pushf; " \ |
| "pop %"_tmp"; " \ |
| "andl %"_msk",%"_LO32 _tmp"; " \ |
| "orl %"_LO32 _tmp",%"_sav"; " |
| |
| #ifdef CONFIG_X86_64 |
| #define ON64(x) x |
| #else |
| #define ON64(x) |
| #endif |
| |
| #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \ |
| do { \ |
| __asm__ __volatile__ ( \ |
| _PRE_EFLAGS("0", "4", "2") \ |
| _op _suffix " %"_x"3,%1; " \ |
| _POST_EFLAGS("0", "4", "2") \ |
| : "=m" ((ctxt)->eflags), \ |
| "+q" (*(_dsttype*)&(ctxt)->dst.val), \ |
| "=&r" (_tmp) \ |
| : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \ |
| } while (0) |
| |
| |
| /* Raw emulation: instruction has two explicit operands. */ |
| #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \ |
| do { \ |
| unsigned long _tmp; \ |
| \ |
| switch ((ctxt)->dst.bytes) { \ |
| case 2: \ |
| ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \ |
| break; \ |
| case 4: \ |
| ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \ |
| break; \ |
| case 8: \ |
| ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \ |
| break; \ |
| } \ |
| } while (0) |
| |
| #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ |
| do { \ |
| unsigned long _tmp; \ |
| switch ((ctxt)->dst.bytes) { \ |
| case 1: \ |
| ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \ |
| break; \ |
| default: \ |
| __emulate_2op_nobyte(ctxt, _op, \ |
| _wx, _wy, _lx, _ly, _qx, _qy); \ |
| break; \ |
| } \ |
| } while (0) |
| |
| /* Source operand is byte-sized and may be restricted to just %cl. */ |
| #define emulate_2op_SrcB(ctxt, _op) \ |
| __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c") |
| |
| /* Source operand is byte, word, long or quad sized. */ |
| #define emulate_2op_SrcV(ctxt, _op) \ |
| __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r") |
| |
| /* Source operand is word, long or quad sized. */ |
| #define emulate_2op_SrcV_nobyte(ctxt, _op) \ |
| __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r") |
| |
| /* Instruction has three operands and one operand is stored in ECX register */ |
| #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \ |
| do { \ |
| unsigned long _tmp; \ |
| _type _clv = (ctxt)->src2.val; \ |
| _type _srcv = (ctxt)->src.val; \ |
| _type _dstv = (ctxt)->dst.val; \ |
| \ |
| __asm__ __volatile__ ( \ |
| _PRE_EFLAGS("0", "5", "2") \ |
| _op _suffix " %4,%1 \n" \ |
| _POST_EFLAGS("0", "5", "2") \ |
| : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \ |
| : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ |
| ); \ |
| \ |
| (ctxt)->src2.val = (unsigned long) _clv; \ |
| (ctxt)->src2.val = (unsigned long) _srcv; \ |
| (ctxt)->dst.val = (unsigned long) _dstv; \ |
| } while (0) |
| |
| #define emulate_2op_cl(ctxt, _op) \ |
| do { \ |
| switch ((ctxt)->dst.bytes) { \ |
| case 2: \ |
| __emulate_2op_cl(ctxt, _op, "w", u16); \ |
| break; \ |
| case 4: \ |
| __emulate_2op_cl(ctxt, _op, "l", u32); \ |
| break; \ |
| case 8: \ |
| ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \ |
| break; \ |
| } \ |
| } while (0) |
| |
| #define __emulate_1op(ctxt, _op, _suffix) \ |
| do { \ |
| unsigned long _tmp; \ |
| \ |
| __asm__ __volatile__ ( \ |
| _PRE_EFLAGS("0", "3", "2") \ |
| _op _suffix " %1; " \ |
| _POST_EFLAGS("0", "3", "2") \ |
| : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \ |
| "=&r" (_tmp) \ |
| : "i" (EFLAGS_MASK)); \ |
| } while (0) |
| |
| /* Instruction has only one explicit operand (no source operand). */ |
| #define emulate_1op(ctxt, _op) \ |
| do { \ |
| switch ((ctxt)->dst.bytes) { \ |
| case 1: __emulate_1op(ctxt, _op, "b"); break; \ |
| case 2: __emulate_1op(ctxt, _op, "w"); break; \ |
| case 4: __emulate_1op(ctxt, _op, "l"); break; \ |
| case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \ |
| } \ |
| } while (0) |
| |
| #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ |
| do { \ |
| unsigned long _tmp; \ |
| ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \ |
| ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \ |
| \ |
| __asm__ __volatile__ ( \ |
| _PRE_EFLAGS("0", "5", "1") \ |
| "1: \n\t" \ |
| _op _suffix " %6; " \ |
| "2: \n\t" \ |
| _POST_EFLAGS("0", "5", "1") \ |
| ".pushsection .fixup,\"ax\" \n\t" \ |
| "3: movb $1, %4 \n\t" \ |
| "jmp 2b \n\t" \ |
| ".popsection \n\t" \ |
| _ASM_EXTABLE(1b, 3b) \ |
| : "=m" ((ctxt)->eflags), "=&r" (_tmp), \ |
| "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \ |
| : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \ |
| "a" (*rax), "d" (*rdx)); \ |
| } while (0) |
| |
| /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ |
| #define emulate_1op_rax_rdx(ctxt, _op, _ex) \ |
| do { \ |
| switch((ctxt)->src.bytes) { \ |
| case 1: \ |
| __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \ |
| break; \ |
| case 2: \ |
| __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \ |
| break; \ |
| case 4: \ |
| __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \ |
| break; \ |
| case 8: ON64( \ |
| __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \ |
| break; \ |
| } \ |
| } while (0) |
| |
| static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, |
| enum x86_intercept intercept, |
| enum x86_intercept_stage stage) |
| { |
| struct x86_instruction_info info = { |
| .intercept = intercept, |
| .rep_prefix = ctxt->rep_prefix, |
| .modrm_mod = ctxt->modrm_mod, |
| .modrm_reg = ctxt->modrm_reg, |
| .modrm_rm = ctxt->modrm_rm, |
| .src_val = ctxt->src.val64, |
| .src_bytes = ctxt->src.bytes, |
| .dst_bytes = ctxt->dst.bytes, |
| .ad_bytes = ctxt->ad_bytes, |
| .next_rip = ctxt->eip, |
| }; |
| |
| return ctxt->ops->intercept(ctxt, &info, stage); |
| } |
| |
| static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) |
| { |
| return (1UL << (ctxt->ad_bytes << 3)) - 1; |
| } |
| |
| /* Access/update address held in a register, based on addressing mode. */ |
| static inline unsigned long |
| address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) |
| { |
| if (ctxt->ad_bytes == sizeof(unsigned long)) |
| return reg; |
| else |
| return reg & ad_mask(ctxt); |
| } |
| |
| static inline unsigned long |
| register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) |
| { |
| return address_mask(ctxt, reg); |
| } |
| |
| static inline void |
| register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) |
| { |
| if (ctxt->ad_bytes == sizeof(unsigned long)) |
| *reg += inc; |
| else |
| *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); |
| } |
| |
| static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) |
| { |
| register_address_increment(ctxt, &ctxt->_eip, rel); |
| } |
| |
| static u32 desc_limit_scaled(struct desc_struct *desc) |
| { |
| u32 limit = get_desc_limit(desc); |
| |
| return desc->g ? (limit << 12) | 0xfff : limit; |
| } |
| |
| static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) |
| { |
| ctxt->has_seg_override = true; |
| ctxt->seg_override = seg; |
| } |
| |
| static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) |
| { |
| if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
| return 0; |
| |
| return ctxt->ops->get_cached_segment_base(ctxt, seg); |
| } |
| |
| static unsigned seg_override(struct x86_emulate_ctxt *ctxt) |
| { |
| if (!ctxt->has_seg_override) |
| return 0; |
| |
| return ctxt->seg_override; |
| } |
| |
| static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, |
| u32 error, bool valid) |
| { |
| ctxt->exception.vector = vec; |
| ctxt->exception.error_code = error; |
| ctxt->exception.error_code_valid = valid; |
| return X86EMUL_PROPAGATE_FAULT; |
| } |
| |
| static int emulate_db(struct x86_emulate_ctxt *ctxt) |
| { |
| return emulate_exception(ctxt, DB_VECTOR, 0, false); |
| } |
| |
| static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) |
| { |
| return emulate_exception(ctxt, GP_VECTOR, err, true); |
| } |
| |
| static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) |
| { |
| return emulate_exception(ctxt, SS_VECTOR, err, true); |
| } |
| |
| static int emulate_ud(struct x86_emulate_ctxt *ctxt) |
| { |
| return emulate_exception(ctxt, UD_VECTOR, 0, false); |
| } |
| |
| static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) |
| { |
| return emulate_exception(ctxt, TS_VECTOR, err, true); |
| } |
| |
| static int emulate_de(struct x86_emulate_ctxt *ctxt) |
| { |
| return emulate_exception(ctxt, DE_VECTOR, 0, false); |
| } |
| |
| static int emulate_nm(struct x86_emulate_ctxt *ctxt) |
| { |
| return emulate_exception(ctxt, NM_VECTOR, 0, false); |
| } |
| |
| static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) |
| { |
| u16 selector; |
| struct desc_struct desc; |
| |
| ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); |
| return selector; |
| } |
| |
| static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, |
| unsigned seg) |
| { |
| u16 dummy; |
| u32 base3; |
| struct desc_struct desc; |
| |
| ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); |
| ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); |
| } |
| |
| static int __linearize(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| unsigned size, bool write, bool fetch, |
| ulong *linear) |
| { |
| struct desc_struct desc; |
| bool usable; |
| ulong la; |
| u32 lim; |
| u16 sel; |
| unsigned cpl, rpl; |
| |
| la = seg_base(ctxt, addr.seg) + addr.ea; |
| switch (ctxt->mode) { |
| case X86EMUL_MODE_REAL: |
| break; |
| case X86EMUL_MODE_PROT64: |
| if (((signed long)la << 16) >> 16 != la) |
| return emulate_gp(ctxt, 0); |
| break; |
| default: |
| usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, |
| addr.seg); |
| if (!usable) |
| goto bad; |
| /* code segment or read-only data segment */ |
| if (((desc.type & 8) || !(desc.type & 2)) && write) |
| goto bad; |
| /* unreadable code segment */ |
| if (!fetch && (desc.type & 8) && !(desc.type & 2)) |
| goto bad; |
| lim = desc_limit_scaled(&desc); |
| if ((desc.type & 8) || !(desc.type & 4)) { |
| /* expand-up segment */ |
| if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) |
| goto bad; |
| } else { |
| /* exapand-down segment */ |
| if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) |
| goto bad; |
| lim = desc.d ? 0xffffffff : 0xffff; |
| if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) |
| goto bad; |
| } |
| cpl = ctxt->ops->cpl(ctxt); |
| rpl = sel & 3; |
| cpl = max(cpl, rpl); |
| if (!(desc.type & 8)) { |
| /* data segment */ |
| if (cpl > desc.dpl) |
| goto bad; |
| } else if ((desc.type & 8) && !(desc.type & 4)) { |
| /* nonconforming code segment */ |
| if (cpl != desc.dpl) |
| goto bad; |
| } else if ((desc.type & 8) && (desc.type & 4)) { |
| /* conforming code segment */ |
| if (cpl < desc.dpl) |
| goto bad; |
| } |
| break; |
| } |
| if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) |
| la &= (u32)-1; |
| *linear = la; |
| return X86EMUL_CONTINUE; |
| bad: |
| if (addr.seg == VCPU_SREG_SS) |
| return emulate_ss(ctxt, addr.seg); |
| else |
| return emulate_gp(ctxt, addr.seg); |
| } |
| |
| static int linearize(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| unsigned size, bool write, |
| ulong *linear) |
| { |
| return __linearize(ctxt, addr, size, write, false, linear); |
| } |
| |
| |
| static int segmented_read_std(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| void *data, |
| unsigned size) |
| { |
| int rc; |
| ulong linear; |
| |
| rc = linearize(ctxt, addr, size, false, &linear); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); |
| } |
| |
| /* |
| * Fetch the next byte of the instruction being emulated which is pointed to |
| * by ctxt->_eip, then increment ctxt->_eip. |
| * |
| * Also prefetch the remaining bytes of the instruction without crossing page |
| * boundary if they are not in fetch_cache yet. |
| */ |
| static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) |
| { |
| struct fetch_cache *fc = &ctxt->fetch; |
| int rc; |
| int size, cur_size; |
| |
| if (ctxt->_eip == fc->end) { |
| unsigned long linear; |
| struct segmented_address addr = { .seg = VCPU_SREG_CS, |
| .ea = ctxt->_eip }; |
| cur_size = fc->end - fc->start; |
| size = min(15UL - cur_size, |
| PAGE_SIZE - offset_in_page(ctxt->_eip)); |
| rc = __linearize(ctxt, addr, size, false, true, &linear); |
| if (unlikely(rc != X86EMUL_CONTINUE)) |
| return rc; |
| rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, |
| size, &ctxt->exception); |
| if (unlikely(rc != X86EMUL_CONTINUE)) |
| return rc; |
| fc->end += size; |
| } |
| *dest = fc->data[ctxt->_eip - fc->start]; |
| ctxt->_eip++; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, |
| void *dest, unsigned size) |
| { |
| int rc; |
| |
| /* x86 instructions are limited to 15 bytes. */ |
| if (unlikely(ctxt->_eip + size - ctxt->eip > 15)) |
| return X86EMUL_UNHANDLEABLE; |
| while (size--) { |
| rc = do_insn_fetch_byte(ctxt, dest++); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| /* Fetch next part of the instruction being emulated. */ |
| #define insn_fetch(_type, _ctxt) \ |
| ({ unsigned long _x; \ |
| rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \ |
| if (rc != X86EMUL_CONTINUE) \ |
| goto done; \ |
| (_type)_x; \ |
| }) |
| |
| #define insn_fetch_arr(_arr, _size, _ctxt) \ |
| ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \ |
| if (rc != X86EMUL_CONTINUE) \ |
| goto done; \ |
| }) |
| |
| /* |
| * Given the 'reg' portion of a ModRM byte, and a register block, return a |
| * pointer into the block that addresses the relevant register. |
| * @highbyte_regs specifies whether to decode AH,CH,DH,BH. |
| */ |
| static void *decode_register(u8 modrm_reg, unsigned long *regs, |
| int highbyte_regs) |
| { |
| void *p; |
| |
| p = ®s[modrm_reg]; |
| if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) |
| p = (unsigned char *)®s[modrm_reg & 3] + 1; |
| return p; |
| } |
| |
| static int read_descriptor(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| u16 *size, unsigned long *address, int op_bytes) |
| { |
| int rc; |
| |
| if (op_bytes == 2) |
| op_bytes = 3; |
| *address = 0; |
| rc = segmented_read_std(ctxt, addr, size, 2); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| addr.ea += 2; |
| rc = segmented_read_std(ctxt, addr, address, op_bytes); |
| return rc; |
| } |
| |
| static int test_cc(unsigned int condition, unsigned int flags) |
| { |
| int rc = 0; |
| |
| switch ((condition & 15) >> 1) { |
| case 0: /* o */ |
| rc |= (flags & EFLG_OF); |
| break; |
| case 1: /* b/c/nae */ |
| rc |= (flags & EFLG_CF); |
| break; |
| case 2: /* z/e */ |
| rc |= (flags & EFLG_ZF); |
| break; |
| case 3: /* be/na */ |
| rc |= (flags & (EFLG_CF|EFLG_ZF)); |
| break; |
| case 4: /* s */ |
| rc |= (flags & EFLG_SF); |
| break; |
| case 5: /* p/pe */ |
| rc |= (flags & EFLG_PF); |
| break; |
| case 7: /* le/ng */ |
| rc |= (flags & EFLG_ZF); |
| /* fall through */ |
| case 6: /* l/nge */ |
| rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF)); |
| break; |
| } |
| |
| /* Odd condition identifiers (lsb == 1) have inverted sense. */ |
| return (!!rc ^ (condition & 1)); |
| } |
| |
| static void fetch_register_operand(struct operand *op) |
| { |
| switch (op->bytes) { |
| case 1: |
| op->val = *(u8 *)op->addr.reg; |
| break; |
| case 2: |
| op->val = *(u16 *)op->addr.reg; |
| break; |
| case 4: |
| op->val = *(u32 *)op->addr.reg; |
| break; |
| case 8: |
| op->val = *(u64 *)op->addr.reg; |
| break; |
| } |
| } |
| |
| static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) |
| { |
| ctxt->ops->get_fpu(ctxt); |
| switch (reg) { |
| case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break; |
| case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break; |
| case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break; |
| case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break; |
| case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break; |
| case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break; |
| case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break; |
| case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break; |
| #ifdef CONFIG_X86_64 |
| case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break; |
| case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break; |
| case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break; |
| case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break; |
| case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break; |
| case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break; |
| case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break; |
| case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break; |
| #endif |
| default: BUG(); |
| } |
| ctxt->ops->put_fpu(ctxt); |
| } |
| |
| static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, |
| int reg) |
| { |
| ctxt->ops->get_fpu(ctxt); |
| switch (reg) { |
| case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break; |
| case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break; |
| case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break; |
| case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break; |
| case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break; |
| case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break; |
| case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break; |
| case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break; |
| #ifdef CONFIG_X86_64 |
| case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break; |
| case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break; |
| case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break; |
| case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break; |
| case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break; |
| case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break; |
| case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break; |
| case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break; |
| #endif |
| default: BUG(); |
| } |
| ctxt->ops->put_fpu(ctxt); |
| } |
| |
| static void decode_register_operand(struct x86_emulate_ctxt *ctxt, |
| struct operand *op) |
| { |
| unsigned reg = ctxt->modrm_reg; |
| int highbyte_regs = ctxt->rex_prefix == 0; |
| |
| if (!(ctxt->d & ModRM)) |
| reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); |
| |
| if (ctxt->d & Sse) { |
| op->type = OP_XMM; |
| op->bytes = 16; |
| op->addr.xmm = reg; |
| read_sse_reg(ctxt, &op->vec_val, reg); |
| return; |
| } |
| |
| op->type = OP_REG; |
| if (ctxt->d & ByteOp) { |
| op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs); |
| op->bytes = 1; |
| } else { |
| op->addr.reg = decode_register(reg, ctxt->regs, 0); |
| op->bytes = ctxt->op_bytes; |
| } |
| fetch_register_operand(op); |
| op->orig_val = op->val; |
| } |
| |
| static int decode_modrm(struct x86_emulate_ctxt *ctxt, |
| struct operand *op) |
| { |
| u8 sib; |
| int index_reg = 0, base_reg = 0, scale; |
| int rc = X86EMUL_CONTINUE; |
| ulong modrm_ea = 0; |
| |
| if (ctxt->rex_prefix) { |
| ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */ |
| index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */ |
| ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ |
| } |
| |
| ctxt->modrm = insn_fetch(u8, ctxt); |
| ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; |
| ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; |
| ctxt->modrm_rm |= (ctxt->modrm & 0x07); |
| ctxt->modrm_seg = VCPU_SREG_DS; |
| |
| if (ctxt->modrm_mod == 3) { |
| op->type = OP_REG; |
| op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
| op->addr.reg = decode_register(ctxt->modrm_rm, |
| ctxt->regs, ctxt->d & ByteOp); |
| if (ctxt->d & Sse) { |
| op->type = OP_XMM; |
| op->bytes = 16; |
| op->addr.xmm = ctxt->modrm_rm; |
| read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); |
| return rc; |
| } |
| fetch_register_operand(op); |
| return rc; |
| } |
| |
| op->type = OP_MEM; |
| |
| if (ctxt->ad_bytes == 2) { |
| unsigned bx = ctxt->regs[VCPU_REGS_RBX]; |
| unsigned bp = ctxt->regs[VCPU_REGS_RBP]; |
| unsigned si = ctxt->regs[VCPU_REGS_RSI]; |
| unsigned di = ctxt->regs[VCPU_REGS_RDI]; |
| |
| /* 16-bit ModR/M decode. */ |
| switch (ctxt->modrm_mod) { |
| case 0: |
| if (ctxt->modrm_rm == 6) |
| modrm_ea += insn_fetch(u16, ctxt); |
| break; |
| case 1: |
| modrm_ea += insn_fetch(s8, ctxt); |
| break; |
| case 2: |
| modrm_ea += insn_fetch(u16, ctxt); |
| break; |
| } |
| switch (ctxt->modrm_rm) { |
| case 0: |
| modrm_ea += bx + si; |
| break; |
| case 1: |
| modrm_ea += bx + di; |
| break; |
| case 2: |
| modrm_ea += bp + si; |
| break; |
| case 3: |
| modrm_ea += bp + di; |
| break; |
| case 4: |
| modrm_ea += si; |
| break; |
| case 5: |
| modrm_ea += di; |
| break; |
| case 6: |
| if (ctxt->modrm_mod != 0) |
| modrm_ea += bp; |
| break; |
| case 7: |
| modrm_ea += bx; |
| break; |
| } |
| if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || |
| (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) |
| ctxt->modrm_seg = VCPU_SREG_SS; |
| modrm_ea = (u16)modrm_ea; |
| } else { |
| /* 32/64-bit ModR/M decode. */ |
| if ((ctxt->modrm_rm & 7) == 4) { |
| sib = insn_fetch(u8, ctxt); |
| index_reg |= (sib >> 3) & 7; |
| base_reg |= sib & 7; |
| scale = sib >> 6; |
| |
| if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) |
| modrm_ea += insn_fetch(s32, ctxt); |
| else |
| modrm_ea += ctxt->regs[base_reg]; |
| if (index_reg != 4) |
| modrm_ea += ctxt->regs[index_reg] << scale; |
| } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { |
| if (ctxt->mode == X86EMUL_MODE_PROT64) |
| ctxt->rip_relative = 1; |
| } else |
| modrm_ea += ctxt->regs[ctxt->modrm_rm]; |
| switch (ctxt->modrm_mod) { |
| case 0: |
| if (ctxt->modrm_rm == 5) |
| modrm_ea += insn_fetch(s32, ctxt); |
| break; |
| case 1: |
| modrm_ea += insn_fetch(s8, ctxt); |
| break; |
| case 2: |
| modrm_ea += insn_fetch(s32, ctxt); |
| break; |
| } |
| } |
| op->addr.mem.ea = modrm_ea; |
| done: |
| return rc; |
| } |
| |
| static int decode_abs(struct x86_emulate_ctxt *ctxt, |
| struct operand *op) |
| { |
| int rc = X86EMUL_CONTINUE; |
| |
| op->type = OP_MEM; |
| switch (ctxt->ad_bytes) { |
| case 2: |
| op->addr.mem.ea = insn_fetch(u16, ctxt); |
| break; |
| case 4: |
| op->addr.mem.ea = insn_fetch(u32, ctxt); |
| break; |
| case 8: |
| op->addr.mem.ea = insn_fetch(u64, ctxt); |
| break; |
| } |
| done: |
| return rc; |
| } |
| |
| static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) |
| { |
| long sv = 0, mask; |
| |
| if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { |
| mask = ~(ctxt->dst.bytes * 8 - 1); |
| |
| if (ctxt->src.bytes == 2) |
| sv = (s16)ctxt->src.val & (s16)mask; |
| else if (ctxt->src.bytes == 4) |
| sv = (s32)ctxt->src.val & (s32)mask; |
| |
| ctxt->dst.addr.mem.ea += (sv >> 3); |
| } |
| |
| /* only subword offset */ |
| ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; |
| } |
| |
| static int read_emulated(struct x86_emulate_ctxt *ctxt, |
| unsigned long addr, void *dest, unsigned size) |
| { |
| int rc; |
| struct read_cache *mc = &ctxt->mem_read; |
| |
| while (size) { |
| int n = min(size, 8u); |
| size -= n; |
| if (mc->pos < mc->end) |
| goto read_cached; |
| |
| rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n, |
| &ctxt->exception); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| mc->end += n; |
| |
| read_cached: |
| memcpy(dest, mc->data + mc->pos, n); |
| mc->pos += n; |
| dest += n; |
| addr += n; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int segmented_read(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| void *data, |
| unsigned size) |
| { |
| int rc; |
| ulong linear; |
| |
| rc = linearize(ctxt, addr, size, false, &linear); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| return read_emulated(ctxt, linear, data, size); |
| } |
| |
| static int segmented_write(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| const void *data, |
| unsigned size) |
| { |
| int rc; |
| ulong linear; |
| |
| rc = linearize(ctxt, addr, size, true, &linear); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| return ctxt->ops->write_emulated(ctxt, linear, data, size, |
| &ctxt->exception); |
| } |
| |
| static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, |
| struct segmented_address addr, |
| const void *orig_data, const void *data, |
| unsigned size) |
| { |
| int rc; |
| ulong linear; |
| |
| rc = linearize(ctxt, addr, size, true, &linear); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, |
| size, &ctxt->exception); |
| } |
| |
| static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, |
| unsigned int size, unsigned short port, |
| void *dest) |
| { |
| struct read_cache *rc = &ctxt->io_read; |
| |
| if (rc->pos == rc->end) { /* refill pio read ahead */ |
| unsigned int in_page, n; |
| unsigned int count = ctxt->rep_prefix ? |
| address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1; |
| in_page = (ctxt->eflags & EFLG_DF) ? |
| offset_in_page(ctxt->regs[VCPU_REGS_RDI]) : |
| PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]); |
| n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, |
| count); |
| if (n == 0) |
| n = 1; |
| rc->pos = rc->end = 0; |
| if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) |
| return 0; |
| rc->end = n * size; |
| } |
| |
| memcpy(dest, rc->data + rc->pos, size); |
| rc->pos += size; |
| return 1; |
| } |
| |
| static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, |
| u16 index, struct desc_struct *desc) |
| { |
| struct desc_ptr dt; |
| ulong addr; |
| |
| ctxt->ops->get_idt(ctxt, &dt); |
| |
| if (dt.size < index * 8 + 7) |
| return emulate_gp(ctxt, index << 3 | 0x2); |
| |
| addr = dt.address + index * 8; |
| return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, |
| &ctxt->exception); |
| } |
| |
| static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, |
| u16 selector, struct desc_ptr *dt) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| |
| if (selector & 1 << 2) { |
| struct desc_struct desc; |
| u16 sel; |
| |
| memset (dt, 0, sizeof *dt); |
| if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) |
| return; |
| |
| dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
| dt->address = get_desc_base(&desc); |
| } else |
| ops->get_gdt(ctxt, dt); |
| } |
| |
| /* allowed just for 8 bytes segments */ |
| static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
| u16 selector, struct desc_struct *desc) |
| { |
| struct desc_ptr dt; |
| u16 index = selector >> 3; |
| ulong addr; |
| |
| get_descriptor_table_ptr(ctxt, selector, &dt); |
| |
| if (dt.size < index * 8 + 7) |
| return emulate_gp(ctxt, selector & 0xfffc); |
| |
| addr = dt.address + index * 8; |
| return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, |
| &ctxt->exception); |
| } |
| |
| /* allowed just for 8 bytes segments */ |
| static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
| u16 selector, struct desc_struct *desc) |
| { |
| struct desc_ptr dt; |
| u16 index = selector >> 3; |
| ulong addr; |
| |
| get_descriptor_table_ptr(ctxt, selector, &dt); |
| |
| if (dt.size < index * 8 + 7) |
| return emulate_gp(ctxt, selector & 0xfffc); |
| |
| addr = dt.address + index * 8; |
| return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, |
| &ctxt->exception); |
| } |
| |
| /* Does not support long mode */ |
| static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
| u16 selector, int seg) |
| { |
| struct desc_struct seg_desc; |
| u8 dpl, rpl, cpl; |
| unsigned err_vec = GP_VECTOR; |
| u32 err_code = 0; |
| bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ |
| int ret; |
| |
| memset(&seg_desc, 0, sizeof seg_desc); |
| |
| if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) |
| || ctxt->mode == X86EMUL_MODE_REAL) { |
| /* set real mode segment descriptor */ |
| set_desc_base(&seg_desc, selector << 4); |
| set_desc_limit(&seg_desc, 0xffff); |
| seg_desc.type = 3; |
| seg_desc.p = 1; |
| seg_desc.s = 1; |
| if (ctxt->mode == X86EMUL_MODE_VM86) |
| seg_desc.dpl = 3; |
| goto load; |
| } |
| |
| /* NULL selector is not valid for TR, CS and SS */ |
| if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) |
| && null_selector) |
| goto exception; |
| |
| /* TR should be in GDT only */ |
| if (seg == VCPU_SREG_TR && (selector & (1 << 2))) |
| goto exception; |
| |
| if (null_selector) /* for NULL selector skip all following checks */ |
| goto load; |
| |
| ret = read_segment_descriptor(ctxt, selector, &seg_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| err_code = selector & 0xfffc; |
| err_vec = GP_VECTOR; |
| |
| /* can't load system descriptor into segment selecor */ |
| if (seg <= VCPU_SREG_GS && !seg_desc.s) |
| goto exception; |
| |
| if (!seg_desc.p) { |
| err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; |
| goto exception; |
| } |
| |
| rpl = selector & 3; |
| dpl = seg_desc.dpl; |
| cpl = ctxt->ops->cpl(ctxt); |
| |
| switch (seg) { |
| case VCPU_SREG_SS: |
| /* |
| * segment is not a writable data segment or segment |
| * selector's RPL != CPL or segment selector's RPL != CPL |
| */ |
| if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) |
| goto exception; |
| break; |
| case VCPU_SREG_CS: |
| if (!(seg_desc.type & 8)) |
| goto exception; |
| |
| if (seg_desc.type & 4) { |
| /* conforming */ |
| if (dpl > cpl) |
| goto exception; |
| } else { |
| /* nonconforming */ |
| if (rpl > cpl || dpl != cpl) |
| goto exception; |
| } |
| /* CS(RPL) <- CPL */ |
| selector = (selector & 0xfffc) | cpl; |
| break; |
| case VCPU_SREG_TR: |
| if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) |
| goto exception; |
| break; |
| case VCPU_SREG_LDTR: |
| if (seg_desc.s || seg_desc.type != 2) |
| goto exception; |
| break; |
| default: /* DS, ES, FS, or GS */ |
| /* |
| * segment is not a data or readable code segment or |
| * ((segment is a data or nonconforming code segment) |
| * and (both RPL and CPL > DPL)) |
| */ |
| if ((seg_desc.type & 0xa) == 0x8 || |
| (((seg_desc.type & 0xc) != 0xc) && |
| (rpl > dpl && cpl > dpl))) |
| goto exception; |
| break; |
| } |
| |
| if (seg_desc.s) { |
| /* mark segment as accessed */ |
| seg_desc.type |= 1; |
| ret = write_segment_descriptor(ctxt, selector, &seg_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| } |
| load: |
| ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); |
| return X86EMUL_CONTINUE; |
| exception: |
| emulate_exception(ctxt, err_vec, err_code, true); |
| return X86EMUL_PROPAGATE_FAULT; |
| } |
| |
| static void write_register_operand(struct operand *op) |
| { |
| /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ |
| switch (op->bytes) { |
| case 1: |
| *(u8 *)op->addr.reg = (u8)op->val; |
| break; |
| case 2: |
| *(u16 *)op->addr.reg = (u16)op->val; |
| break; |
| case 4: |
| *op->addr.reg = (u32)op->val; |
| break; /* 64b: zero-extend */ |
| case 8: |
| *op->addr.reg = op->val; |
| break; |
| } |
| } |
| |
| static int writeback(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc; |
| |
| switch (ctxt->dst.type) { |
| case OP_REG: |
| write_register_operand(&ctxt->dst); |
| break; |
| case OP_MEM: |
| if (ctxt->lock_prefix) |
| rc = segmented_cmpxchg(ctxt, |
| ctxt->dst.addr.mem, |
| &ctxt->dst.orig_val, |
| &ctxt->dst.val, |
| ctxt->dst.bytes); |
| else |
| rc = segmented_write(ctxt, |
| ctxt->dst.addr.mem, |
| &ctxt->dst.val, |
| ctxt->dst.bytes); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| break; |
| case OP_XMM: |
| write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); |
| break; |
| case OP_NONE: |
| /* no writeback */ |
| break; |
| default: |
| break; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_push(struct x86_emulate_ctxt *ctxt) |
| { |
| struct segmented_address addr; |
| |
| register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes); |
| addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); |
| addr.seg = VCPU_SREG_SS; |
| |
| /* Disable writeback. */ |
| ctxt->dst.type = OP_NONE; |
| return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes); |
| } |
| |
| static int emulate_pop(struct x86_emulate_ctxt *ctxt, |
| void *dest, int len) |
| { |
| int rc; |
| struct segmented_address addr; |
| |
| addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); |
| addr.seg = VCPU_SREG_SS; |
| rc = segmented_read(ctxt, addr, dest, len); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); |
| return rc; |
| } |
| |
| static int em_pop(struct x86_emulate_ctxt *ctxt) |
| { |
| return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); |
| } |
| |
| static int emulate_popf(struct x86_emulate_ctxt *ctxt, |
| void *dest, int len) |
| { |
| int rc; |
| unsigned long val, change_mask; |
| int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
| int cpl = ctxt->ops->cpl(ctxt); |
| |
| rc = emulate_pop(ctxt, &val, len); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF |
| | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID; |
| |
| switch(ctxt->mode) { |
| case X86EMUL_MODE_PROT64: |
| case X86EMUL_MODE_PROT32: |
| case X86EMUL_MODE_PROT16: |
| if (cpl == 0) |
| change_mask |= EFLG_IOPL; |
| if (cpl <= iopl) |
| change_mask |= EFLG_IF; |
| break; |
| case X86EMUL_MODE_VM86: |
| if (iopl < 3) |
| return emulate_gp(ctxt, 0); |
| change_mask |= EFLG_IF; |
| break; |
| default: /* real mode */ |
| change_mask |= (EFLG_IOPL | EFLG_IF); |
| break; |
| } |
| |
| *(unsigned long *)dest = |
| (ctxt->eflags & ~change_mask) | (val & change_mask); |
| |
| return rc; |
| } |
| |
| static int em_popf(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.type = OP_REG; |
| ctxt->dst.addr.reg = &ctxt->eflags; |
| ctxt->dst.bytes = ctxt->op_bytes; |
| return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); |
| } |
| |
| static int em_push_sreg(struct x86_emulate_ctxt *ctxt) |
| { |
| int seg = ctxt->src2.val; |
| |
| ctxt->src.val = get_segment_selector(ctxt, seg); |
| |
| return em_push(ctxt); |
| } |
| |
| static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) |
| { |
| int seg = ctxt->src2.val; |
| unsigned long selector; |
| int rc; |
| |
| rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| rc = load_segment_descriptor(ctxt, (u16)selector, seg); |
| return rc; |
| } |
| |
| static int em_pusha(struct x86_emulate_ctxt *ctxt) |
| { |
| unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP]; |
| int rc = X86EMUL_CONTINUE; |
| int reg = VCPU_REGS_RAX; |
| |
| while (reg <= VCPU_REGS_RDI) { |
| (reg == VCPU_REGS_RSP) ? |
| (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]); |
| |
| rc = em_push(ctxt); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ++reg; |
| } |
| |
| return rc; |
| } |
| |
| static int em_pushf(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->src.val = (unsigned long)ctxt->eflags; |
| return em_push(ctxt); |
| } |
| |
| static int em_popa(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc = X86EMUL_CONTINUE; |
| int reg = VCPU_REGS_RDI; |
| |
| while (reg >= VCPU_REGS_RAX) { |
| if (reg == VCPU_REGS_RSP) { |
| register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], |
| ctxt->op_bytes); |
| --reg; |
| } |
| |
| rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes); |
| if (rc != X86EMUL_CONTINUE) |
| break; |
| --reg; |
| } |
| return rc; |
| } |
| |
| int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| int rc; |
| struct desc_ptr dt; |
| gva_t cs_addr; |
| gva_t eip_addr; |
| u16 cs, eip; |
| |
| /* TODO: Add limit checks */ |
| ctxt->src.val = ctxt->eflags; |
| rc = em_push(ctxt); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); |
| |
| ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); |
| rc = em_push(ctxt); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->src.val = ctxt->_eip; |
| rc = em_push(ctxt); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ops->get_idt(ctxt, &dt); |
| |
| eip_addr = dt.address + (irq << 2); |
| cs_addr = dt.address + (irq << 2) + 2; |
| |
| rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->_eip = eip; |
| |
| return rc; |
| } |
| |
| static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) |
| { |
| switch(ctxt->mode) { |
| case X86EMUL_MODE_REAL: |
| return emulate_int_real(ctxt, irq); |
| case X86EMUL_MODE_VM86: |
| case X86EMUL_MODE_PROT16: |
| case X86EMUL_MODE_PROT32: |
| case X86EMUL_MODE_PROT64: |
| default: |
| /* Protected mode interrupts unimplemented yet */ |
| return X86EMUL_UNHANDLEABLE; |
| } |
| } |
| |
| static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc = X86EMUL_CONTINUE; |
| unsigned long temp_eip = 0; |
| unsigned long temp_eflags = 0; |
| unsigned long cs = 0; |
| unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | |
| EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | |
| EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ |
| unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; |
| |
| /* TODO: Add stack limit check */ |
| |
| rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); |
| |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| if (temp_eip & ~0xffff) |
| return emulate_gp(ctxt, 0); |
| |
| rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
| |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); |
| |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); |
| |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->_eip = temp_eip; |
| |
| |
| if (ctxt->op_bytes == 4) |
| ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); |
| else if (ctxt->op_bytes == 2) { |
| ctxt->eflags &= ~0xffff; |
| ctxt->eflags |= temp_eflags; |
| } |
| |
| ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ |
| ctxt->eflags |= EFLG_RESERVED_ONE_MASK; |
| |
| return rc; |
| } |
| |
| static int em_iret(struct x86_emulate_ctxt *ctxt) |
| { |
| switch(ctxt->mode) { |
| case X86EMUL_MODE_REAL: |
| return emulate_iret_real(ctxt); |
| case X86EMUL_MODE_VM86: |
| case X86EMUL_MODE_PROT16: |
| case X86EMUL_MODE_PROT32: |
| case X86EMUL_MODE_PROT64: |
| default: |
| /* iret from protected mode unimplemented yet */ |
| return X86EMUL_UNHANDLEABLE; |
| } |
| } |
| |
| static int em_jmp_far(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc; |
| unsigned short sel; |
| |
| memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
| |
| rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->_eip = 0; |
| memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_grp2(struct x86_emulate_ctxt *ctxt) |
| { |
| switch (ctxt->modrm_reg) { |
| case 0: /* rol */ |
| emulate_2op_SrcB(ctxt, "rol"); |
| break; |
| case 1: /* ror */ |
| emulate_2op_SrcB(ctxt, "ror"); |
| break; |
| case 2: /* rcl */ |
| emulate_2op_SrcB(ctxt, "rcl"); |
| break; |
| case 3: /* rcr */ |
| emulate_2op_SrcB(ctxt, "rcr"); |
| break; |
| case 4: /* sal/shl */ |
| case 6: /* sal/shl */ |
| emulate_2op_SrcB(ctxt, "sal"); |
| break; |
| case 5: /* shr */ |
| emulate_2op_SrcB(ctxt, "shr"); |
| break; |
| case 7: /* sar */ |
| emulate_2op_SrcB(ctxt, "sar"); |
| break; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_not(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.val = ~ctxt->dst.val; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_neg(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_1op(ctxt, "neg"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_mul_ex(struct x86_emulate_ctxt *ctxt) |
| { |
| u8 ex = 0; |
| |
| emulate_1op_rax_rdx(ctxt, "mul", ex); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_imul_ex(struct x86_emulate_ctxt *ctxt) |
| { |
| u8 ex = 0; |
| |
| emulate_1op_rax_rdx(ctxt, "imul", ex); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_div_ex(struct x86_emulate_ctxt *ctxt) |
| { |
| u8 de = 0; |
| |
| emulate_1op_rax_rdx(ctxt, "div", de); |
| if (de) |
| return emulate_de(ctxt); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_idiv_ex(struct x86_emulate_ctxt *ctxt) |
| { |
| u8 de = 0; |
| |
| emulate_1op_rax_rdx(ctxt, "idiv", de); |
| if (de) |
| return emulate_de(ctxt); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_grp45(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc = X86EMUL_CONTINUE; |
| |
| switch (ctxt->modrm_reg) { |
| case 0: /* inc */ |
| emulate_1op(ctxt, "inc"); |
| break; |
| case 1: /* dec */ |
| emulate_1op(ctxt, "dec"); |
| break; |
| case 2: /* call near abs */ { |
| long int old_eip; |
| old_eip = ctxt->_eip; |
| ctxt->_eip = ctxt->src.val; |
| ctxt->src.val = old_eip; |
| rc = em_push(ctxt); |
| break; |
| } |
| case 4: /* jmp abs */ |
| ctxt->_eip = ctxt->src.val; |
| break; |
| case 5: /* jmp far */ |
| rc = em_jmp_far(ctxt); |
| break; |
| case 6: /* push */ |
| rc = em_push(ctxt); |
| break; |
| } |
| return rc; |
| } |
| |
| static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) |
| { |
| u64 old = ctxt->dst.orig_val64; |
| |
| if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) || |
| ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) { |
| ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0); |
| ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32); |
| ctxt->eflags &= ~EFLG_ZF; |
| } else { |
| ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) | |
| (u32) ctxt->regs[VCPU_REGS_RBX]; |
| |
| ctxt->eflags |= EFLG_ZF; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_ret(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.type = OP_REG; |
| ctxt->dst.addr.reg = &ctxt->_eip; |
| ctxt->dst.bytes = ctxt->op_bytes; |
| return em_pop(ctxt); |
| } |
| |
| static int em_ret_far(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc; |
| unsigned long cs; |
| |
| rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| if (ctxt->op_bytes == 4) |
| ctxt->_eip = (u32)ctxt->_eip; |
| rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); |
| return rc; |
| } |
| |
| static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) |
| { |
| /* Save real source value, then compare EAX against destination. */ |
| ctxt->src.orig_val = ctxt->src.val; |
| ctxt->src.val = ctxt->regs[VCPU_REGS_RAX]; |
| emulate_2op_SrcV(ctxt, "cmp"); |
| |
| if (ctxt->eflags & EFLG_ZF) { |
| /* Success: write back to memory. */ |
| ctxt->dst.val = ctxt->src.orig_val; |
| } else { |
| /* Failure: write the value we saw to EAX. */ |
| ctxt->dst.type = OP_REG; |
| ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX]; |
| } |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_lseg(struct x86_emulate_ctxt *ctxt) |
| { |
| int seg = ctxt->src2.val; |
| unsigned short sel; |
| int rc; |
| |
| memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
| |
| rc = load_segment_descriptor(ctxt, sel, seg); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->dst.val = ctxt->src.val; |
| return rc; |
| } |
| |
| static void |
| setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, |
| struct desc_struct *cs, struct desc_struct *ss) |
| { |
| u16 selector; |
| |
| memset(cs, 0, sizeof(struct desc_struct)); |
| ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); |
| memset(ss, 0, sizeof(struct desc_struct)); |
| |
| cs->l = 0; /* will be adjusted later */ |
| set_desc_base(cs, 0); /* flat segment */ |
| cs->g = 1; /* 4kb granularity */ |
| set_desc_limit(cs, 0xfffff); /* 4GB limit */ |
| cs->type = 0x0b; /* Read, Execute, Accessed */ |
| cs->s = 1; |
| cs->dpl = 0; /* will be adjusted later */ |
| cs->p = 1; |
| cs->d = 1; |
| |
| set_desc_base(ss, 0); /* flat segment */ |
| set_desc_limit(ss, 0xfffff); /* 4GB limit */ |
| ss->g = 1; /* 4kb granularity */ |
| ss->s = 1; |
| ss->type = 0x03; /* Read/Write, Accessed */ |
| ss->d = 1; /* 32bit stack segment */ |
| ss->dpl = 0; |
| ss->p = 1; |
| } |
| |
| static bool vendor_intel(struct x86_emulate_ctxt *ctxt) |
| { |
| u32 eax, ebx, ecx, edx; |
| |
| eax = ecx = 0; |
| return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx) |
| && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx |
| && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx |
| && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; |
| } |
| |
| static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| u32 eax, ebx, ecx, edx; |
| |
| /* |
| * syscall should always be enabled in longmode - so only become |
| * vendor specific (cpuid) if other modes are active... |
| */ |
| if (ctxt->mode == X86EMUL_MODE_PROT64) |
| return true; |
| |
| eax = 0x00000000; |
| ecx = 0x00000000; |
| if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { |
| /* |
| * Intel ("GenuineIntel") |
| * remark: Intel CPUs only support "syscall" in 64bit |
| * longmode. Also an 64bit guest with a |
| * 32bit compat-app running will #UD !! While this |
| * behaviour can be fixed (by emulating) into AMD |
| * response - CPUs of AMD can't behave like Intel. |
| */ |
| if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && |
| ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && |
| edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) |
| return false; |
| |
| /* AMD ("AuthenticAMD") */ |
| if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && |
| ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && |
| edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) |
| return true; |
| |
| /* AMD ("AMDisbetter!") */ |
| if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && |
| ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && |
| edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) |
| return true; |
| } |
| |
| /* default: (not Intel, not AMD), apply Intel's stricter rules... */ |
| return false; |
| } |
| |
| static int em_syscall(struct x86_emulate_ctxt *ctxt) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct desc_struct cs, ss; |
| u64 msr_data; |
| u16 cs_sel, ss_sel; |
| u64 efer = 0; |
| |
| /* syscall is not available in real mode */ |
| if (ctxt->mode == X86EMUL_MODE_REAL || |
| ctxt->mode == X86EMUL_MODE_VM86) |
| return emulate_ud(ctxt); |
| |
| if (!(em_syscall_is_enabled(ctxt))) |
| return emulate_ud(ctxt); |
| |
| ops->get_msr(ctxt, MSR_EFER, &efer); |
| setup_syscalls_segments(ctxt, &cs, &ss); |
| |
| if (!(efer & EFER_SCE)) |
| return emulate_ud(ctxt); |
| |
| ops->get_msr(ctxt, MSR_STAR, &msr_data); |
| msr_data >>= 32; |
| cs_sel = (u16)(msr_data & 0xfffc); |
| ss_sel = (u16)(msr_data + 8); |
| |
| if (efer & EFER_LMA) { |
| cs.d = 0; |
| cs.l = 1; |
| } |
| ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
| ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
| |
| ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip; |
| if (efer & EFER_LMA) { |
| #ifdef CONFIG_X86_64 |
| ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; |
| |
| ops->get_msr(ctxt, |
| ctxt->mode == X86EMUL_MODE_PROT64 ? |
| MSR_LSTAR : MSR_CSTAR, &msr_data); |
| ctxt->_eip = msr_data; |
| |
| ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
| ctxt->eflags &= ~(msr_data | EFLG_RF); |
| #endif |
| } else { |
| /* legacy mode */ |
| ops->get_msr(ctxt, MSR_STAR, &msr_data); |
| ctxt->_eip = (u32)msr_data; |
| |
| ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
| } |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_sysenter(struct x86_emulate_ctxt *ctxt) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct desc_struct cs, ss; |
| u64 msr_data; |
| u16 cs_sel, ss_sel; |
| u64 efer = 0; |
| |
| ops->get_msr(ctxt, MSR_EFER, &efer); |
| /* inject #GP if in real mode */ |
| if (ctxt->mode == X86EMUL_MODE_REAL) |
| return emulate_gp(ctxt, 0); |
| |
| /* |
| * Not recognized on AMD in compat mode (but is recognized in legacy |
| * mode). |
| */ |
| if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) |
| && !vendor_intel(ctxt)) |
| return emulate_ud(ctxt); |
| |
| /* XXX sysenter/sysexit have not been tested in 64bit mode. |
| * Therefore, we inject an #UD. |
| */ |
| if (ctxt->mode == X86EMUL_MODE_PROT64) |
| return emulate_ud(ctxt); |
| |
| setup_syscalls_segments(ctxt, &cs, &ss); |
| |
| ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
| switch (ctxt->mode) { |
| case X86EMUL_MODE_PROT32: |
| if ((msr_data & 0xfffc) == 0x0) |
| return emulate_gp(ctxt, 0); |
| break; |
| case X86EMUL_MODE_PROT64: |
| if (msr_data == 0x0) |
| return emulate_gp(ctxt, 0); |
| break; |
| } |
| |
| ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
| cs_sel = (u16)msr_data; |
| cs_sel &= ~SELECTOR_RPL_MASK; |
| ss_sel = cs_sel + 8; |
| ss_sel &= ~SELECTOR_RPL_MASK; |
| if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { |
| cs.d = 0; |
| cs.l = 1; |
| } |
| |
| ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
| ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
| |
| ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); |
| ctxt->_eip = msr_data; |
| |
| ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); |
| ctxt->regs[VCPU_REGS_RSP] = msr_data; |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_sysexit(struct x86_emulate_ctxt *ctxt) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct desc_struct cs, ss; |
| u64 msr_data; |
| int usermode; |
| u16 cs_sel = 0, ss_sel = 0; |
| |
| /* inject #GP if in real mode or Virtual 8086 mode */ |
| if (ctxt->mode == X86EMUL_MODE_REAL || |
| ctxt->mode == X86EMUL_MODE_VM86) |
| return emulate_gp(ctxt, 0); |
| |
| setup_syscalls_segments(ctxt, &cs, &ss); |
| |
| if ((ctxt->rex_prefix & 0x8) != 0x0) |
| usermode = X86EMUL_MODE_PROT64; |
| else |
| usermode = X86EMUL_MODE_PROT32; |
| |
| cs.dpl = 3; |
| ss.dpl = 3; |
| ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
| switch (usermode) { |
| case X86EMUL_MODE_PROT32: |
| cs_sel = (u16)(msr_data + 16); |
| if ((msr_data & 0xfffc) == 0x0) |
| return emulate_gp(ctxt, 0); |
| ss_sel = (u16)(msr_data + 24); |
| break; |
| case X86EMUL_MODE_PROT64: |
| cs_sel = (u16)(msr_data + 32); |
| if (msr_data == 0x0) |
| return emulate_gp(ctxt, 0); |
| ss_sel = cs_sel + 8; |
| cs.d = 0; |
| cs.l = 1; |
| break; |
| } |
| cs_sel |= SELECTOR_RPL_MASK; |
| ss_sel |= SELECTOR_RPL_MASK; |
| |
| ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
| ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
| |
| ctxt->_eip = ctxt->regs[VCPU_REGS_RDX]; |
| ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX]; |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) |
| { |
| int iopl; |
| if (ctxt->mode == X86EMUL_MODE_REAL) |
| return false; |
| if (ctxt->mode == X86EMUL_MODE_VM86) |
| return true; |
| iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
| return ctxt->ops->cpl(ctxt) > iopl; |
| } |
| |
| static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, |
| u16 port, u16 len) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct desc_struct tr_seg; |
| u32 base3; |
| int r; |
| u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; |
| unsigned mask = (1 << len) - 1; |
| unsigned long base; |
| |
| ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); |
| if (!tr_seg.p) |
| return false; |
| if (desc_limit_scaled(&tr_seg) < 103) |
| return false; |
| base = get_desc_base(&tr_seg); |
| #ifdef CONFIG_X86_64 |
| base |= ((u64)base3) << 32; |
| #endif |
| r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); |
| if (r != X86EMUL_CONTINUE) |
| return false; |
| if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) |
| return false; |
| r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); |
| if (r != X86EMUL_CONTINUE) |
| return false; |
| if ((perm >> bit_idx) & mask) |
| return false; |
| return true; |
| } |
| |
| static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, |
| u16 port, u16 len) |
| { |
| if (ctxt->perm_ok) |
| return true; |
| |
| if (emulator_bad_iopl(ctxt)) |
| if (!emulator_io_port_access_allowed(ctxt, port, len)) |
| return false; |
| |
| ctxt->perm_ok = true; |
| |
| return true; |
| } |
| |
| static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, |
| struct tss_segment_16 *tss) |
| { |
| tss->ip = ctxt->_eip; |
| tss->flag = ctxt->eflags; |
| tss->ax = ctxt->regs[VCPU_REGS_RAX]; |
| tss->cx = ctxt->regs[VCPU_REGS_RCX]; |
| tss->dx = ctxt->regs[VCPU_REGS_RDX]; |
| tss->bx = ctxt->regs[VCPU_REGS_RBX]; |
| tss->sp = ctxt->regs[VCPU_REGS_RSP]; |
| tss->bp = ctxt->regs[VCPU_REGS_RBP]; |
| tss->si = ctxt->regs[VCPU_REGS_RSI]; |
| tss->di = ctxt->regs[VCPU_REGS_RDI]; |
| |
| tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
| tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
| tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
| tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
| tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
| } |
| |
| static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, |
| struct tss_segment_16 *tss) |
| { |
| int ret; |
| |
| ctxt->_eip = tss->ip; |
| ctxt->eflags = tss->flag | 2; |
| ctxt->regs[VCPU_REGS_RAX] = tss->ax; |
| ctxt->regs[VCPU_REGS_RCX] = tss->cx; |
| ctxt->regs[VCPU_REGS_RDX] = tss->dx; |
| ctxt->regs[VCPU_REGS_RBX] = tss->bx; |
| ctxt->regs[VCPU_REGS_RSP] = tss->sp; |
| ctxt->regs[VCPU_REGS_RBP] = tss->bp; |
| ctxt->regs[VCPU_REGS_RSI] = tss->si; |
| ctxt->regs[VCPU_REGS_RDI] = tss->di; |
| |
| /* |
| * SDM says that segment selectors are loaded before segment |
| * descriptors |
| */ |
| set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); |
| set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
| set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
| set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
| set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
| |
| /* |
| * Now load segment descriptors. If fault happenes at this stage |
| * it is handled in a context of new task |
| */ |
| ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int task_switch_16(struct x86_emulate_ctxt *ctxt, |
| u16 tss_selector, u16 old_tss_sel, |
| ulong old_tss_base, struct desc_struct *new_desc) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct tss_segment_16 tss_seg; |
| int ret; |
| u32 new_tss_base = get_desc_base(new_desc); |
| |
| ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| save_state_to_tss16(ctxt, &tss_seg); |
| |
| ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| if (old_tss_sel != 0xffff) { |
| tss_seg.prev_task_link = old_tss_sel; |
| |
| ret = ops->write_std(ctxt, new_tss_base, |
| &tss_seg.prev_task_link, |
| sizeof tss_seg.prev_task_link, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| } |
| |
| return load_state_from_tss16(ctxt, &tss_seg); |
| } |
| |
| static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, |
| struct tss_segment_32 *tss) |
| { |
| tss->cr3 = ctxt->ops->get_cr(ctxt, 3); |
| tss->eip = ctxt->_eip; |
| tss->eflags = ctxt->eflags; |
| tss->eax = ctxt->regs[VCPU_REGS_RAX]; |
| tss->ecx = ctxt->regs[VCPU_REGS_RCX]; |
| tss->edx = ctxt->regs[VCPU_REGS_RDX]; |
| tss->ebx = ctxt->regs[VCPU_REGS_RBX]; |
| tss->esp = ctxt->regs[VCPU_REGS_RSP]; |
| tss->ebp = ctxt->regs[VCPU_REGS_RBP]; |
| tss->esi = ctxt->regs[VCPU_REGS_RSI]; |
| tss->edi = ctxt->regs[VCPU_REGS_RDI]; |
| |
| tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
| tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
| tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
| tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
| tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); |
| tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); |
| tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
| } |
| |
| static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, |
| struct tss_segment_32 *tss) |
| { |
| int ret; |
| |
| if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) |
| return emulate_gp(ctxt, 0); |
| ctxt->_eip = tss->eip; |
| ctxt->eflags = tss->eflags | 2; |
| |
| /* General purpose registers */ |
| ctxt->regs[VCPU_REGS_RAX] = tss->eax; |
| ctxt->regs[VCPU_REGS_RCX] = tss->ecx; |
| ctxt->regs[VCPU_REGS_RDX] = tss->edx; |
| ctxt->regs[VCPU_REGS_RBX] = tss->ebx; |
| ctxt->regs[VCPU_REGS_RSP] = tss->esp; |
| ctxt->regs[VCPU_REGS_RBP] = tss->ebp; |
| ctxt->regs[VCPU_REGS_RSI] = tss->esi; |
| ctxt->regs[VCPU_REGS_RDI] = tss->edi; |
| |
| /* |
| * SDM says that segment selectors are loaded before segment |
| * descriptors |
| */ |
| set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); |
| set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
| set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
| set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
| set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
| set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); |
| set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); |
| |
| /* |
| * If we're switching between Protected Mode and VM86, we need to make |
| * sure to update the mode before loading the segment descriptors so |
| * that the selectors are interpreted correctly. |
| * |
| * Need to get rflags to the vcpu struct immediately because it |
| * influences the CPL which is checked at least when loading the segment |
| * descriptors and when pushing an error code to the new kernel stack. |
| * |
| * TODO Introduce a separate ctxt->ops->set_cpl callback |
| */ |
| if (ctxt->eflags & X86_EFLAGS_VM) |
| ctxt->mode = X86EMUL_MODE_VM86; |
| else |
| ctxt->mode = X86EMUL_MODE_PROT32; |
| |
| ctxt->ops->set_rflags(ctxt, ctxt->eflags); |
| |
| /* |
| * Now load segment descriptors. If fault happenes at this stage |
| * it is handled in a context of new task |
| */ |
| ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int task_switch_32(struct x86_emulate_ctxt *ctxt, |
| u16 tss_selector, u16 old_tss_sel, |
| ulong old_tss_base, struct desc_struct *new_desc) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct tss_segment_32 tss_seg; |
| int ret; |
| u32 new_tss_base = get_desc_base(new_desc); |
| |
| ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| save_state_to_tss32(ctxt, &tss_seg); |
| |
| ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| |
| if (old_tss_sel != 0xffff) { |
| tss_seg.prev_task_link = old_tss_sel; |
| |
| ret = ops->write_std(ctxt, new_tss_base, |
| &tss_seg.prev_task_link, |
| sizeof tss_seg.prev_task_link, |
| &ctxt->exception); |
| if (ret != X86EMUL_CONTINUE) |
| /* FIXME: need to provide precise fault address */ |
| return ret; |
| } |
| |
| return load_state_from_tss32(ctxt, &tss_seg); |
| } |
| |
| static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, |
| u16 tss_selector, int idt_index, int reason, |
| bool has_error_code, u32 error_code) |
| { |
| struct x86_emulate_ops *ops = ctxt->ops; |
| struct desc_struct curr_tss_desc, next_tss_desc; |
| int ret; |
| u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); |
| ulong old_tss_base = |
| ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); |
| u32 desc_limit; |
| |
| /* FIXME: old_tss_base == ~0 ? */ |
| |
| ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| /* FIXME: check that next_tss_desc is tss */ |
| |
| /* |
| * Check privileges. The three cases are task switch caused by... |
| * |
| * 1. jmp/call/int to task gate: Check against DPL of the task gate |
| * 2. Exception/IRQ/iret: No check is performed |
| * 3. jmp/call to TSS: Check agains DPL of the TSS |
| */ |
| if (reason == TASK_SWITCH_GATE) { |
| if (idt_index != -1) { |
| /* Software interrupts */ |
| struct desc_struct task_gate_desc; |
| int dpl; |
| |
| ret = read_interrupt_descriptor(ctxt, idt_index, |
| &task_gate_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| dpl = task_gate_desc.dpl; |
| if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) |
| return emulate_gp(ctxt, (idt_index << 3) | 0x2); |
| } |
| } else if (reason != TASK_SWITCH_IRET) { |
| int dpl = next_tss_desc.dpl; |
| if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) |
| return emulate_gp(ctxt, tss_selector); |
| } |
| |
| |
| desc_limit = desc_limit_scaled(&next_tss_desc); |
| if (!next_tss_desc.p || |
| ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || |
| desc_limit < 0x2b)) { |
| emulate_ts(ctxt, tss_selector & 0xfffc); |
| return X86EMUL_PROPAGATE_FAULT; |
| } |
| |
| if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { |
| curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ |
| write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); |
| } |
| |
| if (reason == TASK_SWITCH_IRET) |
| ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; |
| |
| /* set back link to prev task only if NT bit is set in eflags |
| note that old_tss_sel is not used afetr this point */ |
| if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) |
| old_tss_sel = 0xffff; |
| |
| if (next_tss_desc.type & 8) |
| ret = task_switch_32(ctxt, tss_selector, old_tss_sel, |
| old_tss_base, &next_tss_desc); |
| else |
| ret = task_switch_16(ctxt, tss_selector, old_tss_sel, |
| old_tss_base, &next_tss_desc); |
| if (ret != X86EMUL_CONTINUE) |
| return ret; |
| |
| if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) |
| ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; |
| |
| if (reason != TASK_SWITCH_IRET) { |
| next_tss_desc.type |= (1 << 1); /* set busy flag */ |
| write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); |
| } |
| |
| ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); |
| ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); |
| |
| if (has_error_code) { |
| ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; |
| ctxt->lock_prefix = 0; |
| ctxt->src.val = (unsigned long) error_code; |
| ret = em_push(ctxt); |
| } |
| |
| return ret; |
| } |
| |
| int emulator_task_switch(struct x86_emulate_ctxt *ctxt, |
| u16 tss_selector, int idt_index, int reason, |
| bool has_error_code, u32 error_code) |
| { |
| int rc; |
| |
| ctxt->_eip = ctxt->eip; |
| ctxt->dst.type = OP_NONE; |
| |
| rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, |
| has_error_code, error_code); |
| |
| if (rc == X86EMUL_CONTINUE) |
| ctxt->eip = ctxt->_eip; |
| |
| return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
| } |
| |
| static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, |
| int reg, struct operand *op) |
| { |
| int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; |
| |
| register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes); |
| op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]); |
| op->addr.mem.seg = seg; |
| } |
| |
| static int em_das(struct x86_emulate_ctxt *ctxt) |
| { |
| u8 al, old_al; |
| bool af, cf, old_cf; |
| |
| cf = ctxt->eflags & X86_EFLAGS_CF; |
| al = ctxt->dst.val; |
| |
| old_al = al; |
| old_cf = cf; |
| cf = false; |
| af = ctxt->eflags & X86_EFLAGS_AF; |
| if ((al & 0x0f) > 9 || af) { |
| al -= 6; |
| cf = old_cf | (al >= 250); |
| af = true; |
| } else { |
| af = false; |
| } |
| if (old_al > 0x99 || old_cf) { |
| al -= 0x60; |
| cf = true; |
| } |
| |
| ctxt->dst.val = al; |
| /* Set PF, ZF, SF */ |
| ctxt->src.type = OP_IMM; |
| ctxt->src.val = 0; |
| ctxt->src.bytes = 1; |
| emulate_2op_SrcV(ctxt, "or"); |
| ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); |
| if (cf) |
| ctxt->eflags |= X86_EFLAGS_CF; |
| if (af) |
| ctxt->eflags |= X86_EFLAGS_AF; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_call(struct x86_emulate_ctxt *ctxt) |
| { |
| long rel = ctxt->src.val; |
| |
| ctxt->src.val = (unsigned long)ctxt->_eip; |
| jmp_rel(ctxt, rel); |
| return em_push(ctxt); |
| } |
| |
| static int em_call_far(struct x86_emulate_ctxt *ctxt) |
| { |
| u16 sel, old_cs; |
| ulong old_eip; |
| int rc; |
| |
| old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
| old_eip = ctxt->_eip; |
| |
| memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
| if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) |
| return X86EMUL_CONTINUE; |
| |
| ctxt->_eip = 0; |
| memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); |
| |
| ctxt->src.val = old_cs; |
| rc = em_push(ctxt); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| |
| ctxt->src.val = old_eip; |
| return em_push(ctxt); |
| } |
| |
| static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) |
| { |
| int rc; |
| |
| ctxt->dst.type = OP_REG; |
| ctxt->dst.addr.reg = &ctxt->_eip; |
| ctxt->dst.bytes = ctxt->op_bytes; |
| rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); |
| if (rc != X86EMUL_CONTINUE) |
| return rc; |
| register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_add(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "add"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_or(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "or"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_adc(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "adc"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_sbb(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "sbb"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_and(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "and"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_sub(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "sub"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_xor(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "xor"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_cmp(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "cmp"); |
| /* Disable writeback. */ |
| ctxt->dst.type = OP_NONE; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_test(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV(ctxt, "test"); |
| /* Disable writeback. */ |
| ctxt->dst.type = OP_NONE; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_xchg(struct x86_emulate_ctxt *ctxt) |
| { |
| /* Write back the register source. */ |
| ctxt->src.val = ctxt->dst.val; |
| write_register_operand(&ctxt->src); |
| |
| /* Write back the memory destination with implicit LOCK prefix. */ |
| ctxt->dst.val = ctxt->src.orig_val; |
| ctxt->lock_prefix = 1; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_imul(struct x86_emulate_ctxt *ctxt) |
| { |
| emulate_2op_SrcV_nobyte(ctxt, "imul"); |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_imul_3op(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.val = ctxt->src2.val; |
| return em_imul(ctxt); |
| } |
| |
| static int em_cwd(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.type = OP_REG; |
| ctxt->dst.bytes = ctxt->src.bytes; |
| ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; |
| ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); |
| |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_rdtsc(struct x86_emulate_ctxt *ctxt) |
| { |
| u64 tsc = 0; |
| |
| ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); |
| ctxt->regs[VCPU_REGS_RAX] = (u32)tsc; |
| ctxt->regs[VCPU_REGS_RDX] = tsc >> 32; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_rdpmc(struct x86_emulate_ctxt *ctxt) |
| { |
| u64 pmc; |
| |
| if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc)) |
| return emulate_gp(ctxt, 0); |
| ctxt->regs[VCPU_REGS_RAX] = (u32)pmc; |
| ctxt->regs[VCPU_REGS_RDX] = pmc >> 32; |
| return X86EMUL_CONTINUE; |
| } |
| |
| static int em_mov(struct x86_emulate_ctxt *ctxt) |
| { |
| ctxt->dst.val = ctxt->src.val; |
| return X86EMUL_CONTINUE; |
| } |
| |
| |