1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstMem16 (OpMem16 << DstShift)
90 #define DstImmUByte (OpImmUByte << DstShift)
91 #define DstDX (OpDX << DstShift)
92 #define DstAccLo (OpAccLo << DstShift)
93 #define DstMask (OpMask << DstShift)
94 /* Source operand type. */
96 #define SrcNone (OpNone << SrcShift)
97 #define SrcReg (OpReg << SrcShift)
98 #define SrcMem (OpMem << SrcShift)
99 #define SrcMem16 (OpMem16 << SrcShift)
100 #define SrcMem32 (OpMem32 << SrcShift)
101 #define SrcImm (OpImm << SrcShift)
102 #define SrcImmByte (OpImmByte << SrcShift)
103 #define SrcOne (OpOne << SrcShift)
104 #define SrcImmUByte (OpImmUByte << SrcShift)
105 #define SrcImmU (OpImmU << SrcShift)
106 #define SrcSI (OpSI << SrcShift)
107 #define SrcXLat (OpXLat << SrcShift)
108 #define SrcImmFAddr (OpImmFAddr << SrcShift)
109 #define SrcMemFAddr (OpMemFAddr << SrcShift)
110 #define SrcAcc (OpAcc << SrcShift)
111 #define SrcImmU16 (OpImmU16 << SrcShift)
112 #define SrcImm64 (OpImm64 << SrcShift)
113 #define SrcDX (OpDX << SrcShift)
114 #define SrcMem8 (OpMem8 << SrcShift)
115 #define SrcAccHi (OpAccHi << SrcShift)
116 #define SrcMask (OpMask << SrcShift)
117 #define BitOp (1<<11)
118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
119 #define String (1<<13) /* String instruction (rep capable) */
120 #define Stack (1<<14) /* Stack instruction (push/pop) */
121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
126 #define Escape (5<<15) /* Escape to coprocessor instruction */
127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
128 #define Sse (1<<18) /* SSE Vector instruction */
129 /* Generic ModRM decode. */
130 #define ModRM (1<<19)
131 /* Destination is only written; never read. */
134 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
135 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
136 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
137 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
138 #define Undefined (1<<25) /* No Such Instruction */
139 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
140 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
142 #define PageTable (1 << 29) /* instruction used to write page table */
143 #define NotImpl (1 << 30) /* instruction is not implemented */
144 /* Source 2 operand type */
145 #define Src2Shift (31)
146 #define Src2None (OpNone << Src2Shift)
147 #define Src2Mem (OpMem << Src2Shift)
148 #define Src2CL (OpCL << Src2Shift)
149 #define Src2ImmByte (OpImmByte << Src2Shift)
150 #define Src2One (OpOne << Src2Shift)
151 #define Src2Imm (OpImm << Src2Shift)
152 #define Src2ES (OpES << Src2Shift)
153 #define Src2CS (OpCS << Src2Shift)
154 #define Src2SS (OpSS << Src2Shift)
155 #define Src2DS (OpDS << Src2Shift)
156 #define Src2FS (OpFS << Src2Shift)
157 #define Src2GS (OpGS << Src2Shift)
158 #define Src2Mask (OpMask << Src2Shift)
159 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
160 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
161 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
162 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
163 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
164 #define NoWrite ((u64)1 << 45) /* No writeback */
165 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
166 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
167 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
168 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
169 #define NoBigReal ((u64)1 << 50) /* No big real mode */
170 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
171 #define NearBranch ((u64)1 << 52) /* Near branches */
172 #define No16 ((u64)1 << 53) /* No 16 bit operand */
174 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
176 #define X2(x...) x, x
177 #define X3(x...) X2(x), x
178 #define X4(x...) X2(x), X2(x)
179 #define X5(x...) X4(x), x
180 #define X6(x...) X4(x), X2(x)
181 #define X7(x...) X4(x), X3(x)
182 #define X8(x...) X4(x), X4(x)
183 #define X16(x...) X8(x), X8(x)
185 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
186 #define FASTOP_SIZE 8
189 * fastop functions have a special calling convention:
194 * flags: rflags (in/out)
195 * ex: rsi (in:fastop pointer, out:zero if exception)
197 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
198 * different operand sizes can be reached by calculation, rather than a jump
199 * table (which would be bigger than the code).
201 * fastop functions are declared as taking a never-defined fastop parameter,
202 * so they can't be called from C directly.
211 int (*execute)(struct x86_emulate_ctxt *ctxt);
212 const struct opcode *group;
213 const struct group_dual *gdual;
214 const struct gprefix *gprefix;
215 const struct escape *esc;
216 const struct instr_dual *idual;
217 void (*fastop)(struct fastop *fake);
219 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
223 struct opcode mod012[8];
224 struct opcode mod3[8];
228 struct opcode pfx_no;
229 struct opcode pfx_66;
230 struct opcode pfx_f2;
231 struct opcode pfx_f3;
236 struct opcode high[64];
240 struct opcode mod012;
244 /* EFLAGS bit definitions. */
245 #define EFLG_ID (1<<21)
246 #define EFLG_VIP (1<<20)
247 #define EFLG_VIF (1<<19)
248 #define EFLG_AC (1<<18)
249 #define EFLG_VM (1<<17)
250 #define EFLG_RF (1<<16)
251 #define EFLG_IOPL (3<<12)
252 #define EFLG_NT (1<<14)
253 #define EFLG_OF (1<<11)
254 #define EFLG_DF (1<<10)
255 #define EFLG_IF (1<<9)
256 #define EFLG_TF (1<<8)
257 #define EFLG_SF (1<<7)
258 #define EFLG_ZF (1<<6)
259 #define EFLG_AF (1<<4)
260 #define EFLG_PF (1<<2)
261 #define EFLG_CF (1<<0)
263 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
264 #define EFLG_RESERVED_ONE_MASK 2
266 enum x86_transfer_type {
268 X86_TRANSFER_CALL_JMP,
270 X86_TRANSFER_TASK_SWITCH,
273 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 if (!(ctxt->regs_valid & (1 << nr))) {
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
279 return ctxt->_regs[nr];
282 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
284 ctxt->regs_valid |= 1 << nr;
285 ctxt->regs_dirty |= 1 << nr;
286 return &ctxt->_regs[nr];
289 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
292 return reg_write(ctxt, nr);
295 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
299 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
300 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
303 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
305 ctxt->regs_dirty = 0;
306 ctxt->regs_valid = 0;
310 * These EFLAGS bits are restored from saved value during emulation, and
311 * any changes are written back to the saved value after emulation.
313 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
321 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
323 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
324 #define FOP_RET "ret \n\t"
326 #define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
336 #define FOPNOP() FOP_ALIGN FOP_RET
338 #define FOP1E(op, dst) \
339 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
341 #define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
344 #define FASTOP1(op) \
349 ON64(FOP1E(op##q, rax)) \
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
358 ON64(FOP1E(op, rcx)) \
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
367 ON64(FOP1EEX(op, rcx)) \
370 #define FOP2E(op, dst, src) \
371 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
373 #define FASTOP2(op) \
375 FOP2E(op##b, al, dl) \
376 FOP2E(op##w, ax, dx) \
377 FOP2E(op##l, eax, edx) \
378 ON64(FOP2E(op##q, rax, rdx)) \
381 /* 2 operand, word only */
382 #define FASTOP2W(op) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
390 /* 2 operand, src is CL */
391 #define FASTOP2CL(op) \
393 FOP2E(op##b, al, cl) \
394 FOP2E(op##w, ax, cl) \
395 FOP2E(op##l, eax, cl) \
396 ON64(FOP2E(op##q, rax, cl)) \
399 /* 2 operand, src and dest are reversed */
400 #define FASTOP2R(op, name) \
402 FOP2E(op##b, dl, al) \
403 FOP2E(op##w, dx, ax) \
404 FOP2E(op##l, edx, eax) \
405 ON64(FOP2E(op##q, rdx, rax)) \
408 #define FOP3E(op, dst, src, src2) \
409 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
423 asm(".global kvm_fastop_exception \n"
424 "kvm_fastop_exception: xor %esi, %esi; ret");
445 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
448 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
449 enum x86_intercept intercept,
450 enum x86_intercept_stage stage)
452 struct x86_instruction_info info = {
453 .intercept = intercept,
454 .rep_prefix = ctxt->rep_prefix,
455 .modrm_mod = ctxt->modrm_mod,
456 .modrm_reg = ctxt->modrm_reg,
457 .modrm_rm = ctxt->modrm_rm,
458 .src_val = ctxt->src.val64,
459 .dst_val = ctxt->dst.val64,
460 .src_bytes = ctxt->src.bytes,
461 .dst_bytes = ctxt->dst.bytes,
462 .ad_bytes = ctxt->ad_bytes,
463 .next_rip = ctxt->eip,
466 return ctxt->ops->intercept(ctxt, &info, stage);
469 static void assign_masked(ulong *dest, ulong src, ulong mask)
471 *dest = (*dest & ~mask) | (src & mask);
474 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
476 return (1UL << (ctxt->ad_bytes << 3)) - 1;
479 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
482 struct desc_struct ss;
484 if (ctxt->mode == X86EMUL_MODE_PROT64)
486 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
487 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
490 static int stack_size(struct x86_emulate_ctxt *ctxt)
492 return (__fls(stack_mask(ctxt)) + 1) >> 3;
495 /* Access/update address held in a register, based on addressing mode. */
496 static inline unsigned long
497 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
499 if (ctxt->ad_bytes == sizeof(unsigned long))
502 return reg & ad_mask(ctxt);
505 static inline unsigned long
506 register_address(struct x86_emulate_ctxt *ctxt, int reg)
508 return address_mask(ctxt, reg_read(ctxt, reg));
511 static void masked_increment(ulong *reg, ulong mask, int inc)
513 assign_masked(reg, *reg + inc, mask);
517 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
521 if (ctxt->ad_bytes == sizeof(unsigned long))
524 mask = ad_mask(ctxt);
525 masked_increment(reg_rmw(ctxt, reg), mask, inc);
528 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
530 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
533 static u32 desc_limit_scaled(struct desc_struct *desc)
535 u32 limit = get_desc_limit(desc);
537 return desc->g ? (limit << 12) | 0xfff : limit;
540 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
542 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
545 return ctxt->ops->get_cached_segment_base(ctxt, seg);
548 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
549 u32 error, bool valid)
552 ctxt->exception.vector = vec;
553 ctxt->exception.error_code = error;
554 ctxt->exception.error_code_valid = valid;
555 return X86EMUL_PROPAGATE_FAULT;
558 static int emulate_db(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, DB_VECTOR, 0, false);
563 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
565 return emulate_exception(ctxt, GP_VECTOR, err, true);
568 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
570 return emulate_exception(ctxt, SS_VECTOR, err, true);
573 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
575 return emulate_exception(ctxt, UD_VECTOR, 0, false);
578 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
580 return emulate_exception(ctxt, TS_VECTOR, err, true);
583 static int emulate_de(struct x86_emulate_ctxt *ctxt)
585 return emulate_exception(ctxt, DE_VECTOR, 0, false);
588 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
590 return emulate_exception(ctxt, NM_VECTOR, 0, false);
593 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
596 struct desc_struct desc;
598 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
602 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
607 struct desc_struct desc;
609 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
610 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
614 * x86 defines three classes of vector instructions: explicitly
615 * aligned, explicitly unaligned, and the rest, which change behaviour
616 * depending on whether they're AVX encoded or not.
618 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
619 * subject to the same check.
621 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
623 if (likely(size < 16))
626 if (ctxt->d & Aligned)
628 else if (ctxt->d & Unaligned)
630 else if (ctxt->d & Avx)
636 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
637 struct segmented_address addr,
638 unsigned *max_size, unsigned size,
639 bool write, bool fetch,
640 enum x86emul_mode mode, ulong *linear)
642 struct desc_struct desc;
648 la = seg_base(ctxt, addr.seg) + addr.ea;
651 case X86EMUL_MODE_PROT64:
652 if (is_noncanonical_address(la))
655 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
656 if (size > *max_size)
660 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
664 /* code segment in protected mode or read-only data segment */
665 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
666 || !(desc.type & 2)) && write)
668 /* unreadable code segment */
669 if (!fetch && (desc.type & 8) && !(desc.type & 2))
671 lim = desc_limit_scaled(&desc);
672 if (!(desc.type & 8) && (desc.type & 4)) {
673 /* expand-down segment */
676 lim = desc.d ? 0xffffffff : 0xffff;
680 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
681 if (size > *max_size)
686 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
687 return emulate_gp(ctxt, 0);
689 return X86EMUL_CONTINUE;
691 if (addr.seg == VCPU_SREG_SS)
692 return emulate_ss(ctxt, 0);
694 return emulate_gp(ctxt, 0);
697 static int linearize(struct x86_emulate_ctxt *ctxt,
698 struct segmented_address addr,
699 unsigned size, bool write,
703 return __linearize(ctxt, addr, &max_size, size, write, false,
707 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
708 enum x86emul_mode mode)
713 struct segmented_address addr = { .seg = VCPU_SREG_CS,
716 if (ctxt->op_bytes != sizeof(unsigned long))
717 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
718 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
719 if (rc == X86EMUL_CONTINUE)
720 ctxt->_eip = addr.ea;
724 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
726 return assign_eip(ctxt, dst, ctxt->mode);
729 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
730 const struct desc_struct *cs_desc)
732 enum x86emul_mode mode = ctxt->mode;
735 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
738 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
740 mode = X86EMUL_MODE_PROT64;
743 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
744 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
745 return assign_eip(ctxt, dst, mode);
748 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
750 return assign_eip_near(ctxt, ctxt->_eip + rel);
753 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
761 rc = linearize(ctxt, addr, size, false, &linear);
762 if (rc != X86EMUL_CONTINUE)
764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
768 * Prefetch the remaining bytes of the instruction without crossing page
769 * boundary if they are not in fetch_cache yet.
771 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
774 unsigned size, max_size;
775 unsigned long linear;
776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
778 .ea = ctxt->eip + cur_size };
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
792 if (unlikely(rc != X86EMUL_CONTINUE))
795 size = min_t(unsigned, 15UL ^ cur_size, max_size);
796 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
799 * One instruction can only straddle two pages,
800 * and one has been loaded at the beginning of
801 * x86_decode_insn. So, if not enough bytes
802 * still, we must have hit the 15-byte boundary.
804 if (unlikely(size < op_size))
805 return emulate_gp(ctxt, 0);
807 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
808 size, &ctxt->exception);
809 if (unlikely(rc != X86EMUL_CONTINUE))
811 ctxt->fetch.end += size;
812 return X86EMUL_CONTINUE;
815 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
818 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
820 if (unlikely(done_size < size))
821 return __do_insn_fetch_bytes(ctxt, size - done_size);
823 return X86EMUL_CONTINUE;
826 /* Fetch next part of the instruction being emulated. */
827 #define insn_fetch(_type, _ctxt) \
830 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
831 if (rc != X86EMUL_CONTINUE) \
833 ctxt->_eip += sizeof(_type); \
834 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
835 ctxt->fetch.ptr += sizeof(_type); \
839 #define insn_fetch_arr(_arr, _size, _ctxt) \
841 rc = do_insn_fetch_bytes(_ctxt, _size); \
842 if (rc != X86EMUL_CONTINUE) \
844 ctxt->_eip += (_size); \
845 memcpy(_arr, ctxt->fetch.ptr, _size); \
846 ctxt->fetch.ptr += (_size); \
850 * Given the 'reg' portion of a ModRM byte, and a register block, return a
851 * pointer into the block that addresses the relevant register.
852 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
854 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
858 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
860 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
861 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
863 p = reg_rmw(ctxt, modrm_reg);
867 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
868 struct segmented_address addr,
869 u16 *size, unsigned long *address, int op_bytes)
876 rc = segmented_read_std(ctxt, addr, size, 2);
877 if (rc != X86EMUL_CONTINUE)
880 rc = segmented_read_std(ctxt, addr, address, op_bytes);
894 FASTOP1SRC2(mul, mul_ex);
895 FASTOP1SRC2(imul, imul_ex);
896 FASTOP1SRC2EX(div, div_ex);
897 FASTOP1SRC2EX(idiv, idiv_ex);
926 FASTOP2R(cmp, cmp_r);
928 static u8 test_cc(unsigned int condition, unsigned long flags)
931 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
933 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
934 asm("push %[flags]; popf; call *%[fastop]"
935 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
939 static void fetch_register_operand(struct operand *op)
943 op->val = *(u8 *)op->addr.reg;
946 op->val = *(u16 *)op->addr.reg;
949 op->val = *(u32 *)op->addr.reg;
952 op->val = *(u64 *)op->addr.reg;
957 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
959 ctxt->ops->get_fpu(ctxt);
961 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
962 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
963 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
964 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
965 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
966 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
967 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
968 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
970 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
971 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
972 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
973 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
974 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
975 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
976 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
977 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
981 ctxt->ops->put_fpu(ctxt);
984 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
987 ctxt->ops->get_fpu(ctxt);
989 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
990 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
991 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
992 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
993 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
994 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
995 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
996 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
998 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
999 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1000 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1001 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1002 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1003 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1004 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1005 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1009 ctxt->ops->put_fpu(ctxt);
1012 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1014 ctxt->ops->get_fpu(ctxt);
1016 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1017 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1018 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1019 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1020 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1021 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1022 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1023 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1026 ctxt->ops->put_fpu(ctxt);
1029 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1031 ctxt->ops->get_fpu(ctxt);
1033 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1034 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1035 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1036 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1037 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1038 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1039 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1040 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1043 ctxt->ops->put_fpu(ctxt);
1046 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1048 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1049 return emulate_nm(ctxt);
1051 ctxt->ops->get_fpu(ctxt);
1052 asm volatile("fninit");
1053 ctxt->ops->put_fpu(ctxt);
1054 return X86EMUL_CONTINUE;
1057 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1061 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1062 return emulate_nm(ctxt);
1064 ctxt->ops->get_fpu(ctxt);
1065 asm volatile("fnstcw %0": "+m"(fcw));
1066 ctxt->ops->put_fpu(ctxt);
1068 ctxt->dst.val = fcw;
1070 return X86EMUL_CONTINUE;
1073 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1077 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1078 return emulate_nm(ctxt);
1080 ctxt->ops->get_fpu(ctxt);
1081 asm volatile("fnstsw %0": "+m"(fsw));
1082 ctxt->ops->put_fpu(ctxt);
1084 ctxt->dst.val = fsw;
1086 return X86EMUL_CONTINUE;
1089 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1092 unsigned reg = ctxt->modrm_reg;
1094 if (!(ctxt->d & ModRM))
1095 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1097 if (ctxt->d & Sse) {
1101 read_sse_reg(ctxt, &op->vec_val, reg);
1104 if (ctxt->d & Mmx) {
1113 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1114 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1116 fetch_register_operand(op);
1117 op->orig_val = op->val;
1120 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1122 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1123 ctxt->modrm_seg = VCPU_SREG_SS;
1126 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1130 int index_reg, base_reg, scale;
1131 int rc = X86EMUL_CONTINUE;
1134 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1135 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1136 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1138 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1139 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1140 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1141 ctxt->modrm_seg = VCPU_SREG_DS;
1143 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1145 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1146 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1148 if (ctxt->d & Sse) {
1151 op->addr.xmm = ctxt->modrm_rm;
1152 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1155 if (ctxt->d & Mmx) {
1158 op->addr.mm = ctxt->modrm_rm & 7;
1161 fetch_register_operand(op);
1167 if (ctxt->ad_bytes == 2) {
1168 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1169 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1170 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1171 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1173 /* 16-bit ModR/M decode. */
1174 switch (ctxt->modrm_mod) {
1176 if (ctxt->modrm_rm == 6)
1177 modrm_ea += insn_fetch(u16, ctxt);
1180 modrm_ea += insn_fetch(s8, ctxt);
1183 modrm_ea += insn_fetch(u16, ctxt);
1186 switch (ctxt->modrm_rm) {
1188 modrm_ea += bx + si;
1191 modrm_ea += bx + di;
1194 modrm_ea += bp + si;
1197 modrm_ea += bp + di;
1206 if (ctxt->modrm_mod != 0)
1213 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1214 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1215 ctxt->modrm_seg = VCPU_SREG_SS;
1216 modrm_ea = (u16)modrm_ea;
1218 /* 32/64-bit ModR/M decode. */
1219 if ((ctxt->modrm_rm & 7) == 4) {
1220 sib = insn_fetch(u8, ctxt);
1221 index_reg |= (sib >> 3) & 7;
1222 base_reg |= sib & 7;
1225 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1226 modrm_ea += insn_fetch(s32, ctxt);
1228 modrm_ea += reg_read(ctxt, base_reg);
1229 adjust_modrm_seg(ctxt, base_reg);
1232 modrm_ea += reg_read(ctxt, index_reg) << scale;
1233 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1234 modrm_ea += insn_fetch(s32, ctxt);
1235 if (ctxt->mode == X86EMUL_MODE_PROT64)
1236 ctxt->rip_relative = 1;
1238 base_reg = ctxt->modrm_rm;
1239 modrm_ea += reg_read(ctxt, base_reg);
1240 adjust_modrm_seg(ctxt, base_reg);
1242 switch (ctxt->modrm_mod) {
1244 modrm_ea += insn_fetch(s8, ctxt);
1247 modrm_ea += insn_fetch(s32, ctxt);
1251 op->addr.mem.ea = modrm_ea;
1252 if (ctxt->ad_bytes != 8)
1253 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1259 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1262 int rc = X86EMUL_CONTINUE;
1265 switch (ctxt->ad_bytes) {
1267 op->addr.mem.ea = insn_fetch(u16, ctxt);
1270 op->addr.mem.ea = insn_fetch(u32, ctxt);
1273 op->addr.mem.ea = insn_fetch(u64, ctxt);
1280 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1284 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1285 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1287 if (ctxt->src.bytes == 2)
1288 sv = (s16)ctxt->src.val & (s16)mask;
1289 else if (ctxt->src.bytes == 4)
1290 sv = (s32)ctxt->src.val & (s32)mask;
1292 sv = (s64)ctxt->src.val & (s64)mask;
1294 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1295 ctxt->dst.addr.mem.ea + (sv >> 3));
1298 /* only subword offset */
1299 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1302 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1303 unsigned long addr, void *dest, unsigned size)
1306 struct read_cache *mc = &ctxt->mem_read;
1308 if (mc->pos < mc->end)
1311 WARN_ON((mc->end + size) >= sizeof(mc->data));
1313 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1315 if (rc != X86EMUL_CONTINUE)
1321 memcpy(dest, mc->data + mc->pos, size);
1323 return X86EMUL_CONTINUE;
1326 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1327 struct segmented_address addr,
1334 rc = linearize(ctxt, addr, size, false, &linear);
1335 if (rc != X86EMUL_CONTINUE)
1337 return read_emulated(ctxt, linear, data, size);
1340 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1341 struct segmented_address addr,
1348 rc = linearize(ctxt, addr, size, true, &linear);
1349 if (rc != X86EMUL_CONTINUE)
1351 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1355 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1356 struct segmented_address addr,
1357 const void *orig_data, const void *data,
1363 rc = linearize(ctxt, addr, size, true, &linear);
1364 if (rc != X86EMUL_CONTINUE)
1366 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1367 size, &ctxt->exception);
1370 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1371 unsigned int size, unsigned short port,
1374 struct read_cache *rc = &ctxt->io_read;
1376 if (rc->pos == rc->end) { /* refill pio read ahead */
1377 unsigned int in_page, n;
1378 unsigned int count = ctxt->rep_prefix ?
1379 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1380 in_page = (ctxt->eflags & EFLG_DF) ?
1381 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1382 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1383 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1386 rc->pos = rc->end = 0;
1387 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1392 if (ctxt->rep_prefix && (ctxt->d & String) &&
1393 !(ctxt->eflags & EFLG_DF)) {
1394 ctxt->dst.data = rc->data + rc->pos;
1395 ctxt->dst.type = OP_MEM_STR;
1396 ctxt->dst.count = (rc->end - rc->pos) / size;
1399 memcpy(dest, rc->data + rc->pos, size);
1405 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1406 u16 index, struct desc_struct *desc)
1411 ctxt->ops->get_idt(ctxt, &dt);
1413 if (dt.size < index * 8 + 7)
1414 return emulate_gp(ctxt, index << 3 | 0x2);
1416 addr = dt.address + index * 8;
1417 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1421 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1422 u16 selector, struct desc_ptr *dt)
1424 const struct x86_emulate_ops *ops = ctxt->ops;
1427 if (selector & 1 << 2) {
1428 struct desc_struct desc;
1431 memset (dt, 0, sizeof *dt);
1432 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1436 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1437 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1439 ops->get_gdt(ctxt, dt);
1442 /* allowed just for 8 bytes segments */
1443 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1444 u16 selector, struct desc_struct *desc,
1448 u16 index = selector >> 3;
1451 get_descriptor_table_ptr(ctxt, selector, &dt);
1453 if (dt.size < index * 8 + 7)
1454 return emulate_gp(ctxt, selector & 0xfffc);
1456 *desc_addr_p = addr = dt.address + index * 8;
1457 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1461 /* allowed just for 8 bytes segments */
1462 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1463 u16 selector, struct desc_struct *desc)
1466 u16 index = selector >> 3;
1469 get_descriptor_table_ptr(ctxt, selector, &dt);
1471 if (dt.size < index * 8 + 7)
1472 return emulate_gp(ctxt, selector & 0xfffc);
1474 addr = dt.address + index * 8;
1475 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1479 /* Does not support long mode */
1480 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1481 u16 selector, int seg, u8 cpl,
1482 enum x86_transfer_type transfer,
1483 struct desc_struct *desc)
1485 struct desc_struct seg_desc, old_desc;
1487 unsigned err_vec = GP_VECTOR;
1489 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1495 memset(&seg_desc, 0, sizeof seg_desc);
1497 if (ctxt->mode == X86EMUL_MODE_REAL) {
1498 /* set real mode segment descriptor (keep limit etc. for
1500 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1501 set_desc_base(&seg_desc, selector << 4);
1503 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1504 /* VM86 needs a clean new segment descriptor */
1505 set_desc_base(&seg_desc, selector << 4);
1506 set_desc_limit(&seg_desc, 0xffff);
1516 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1517 if ((seg == VCPU_SREG_CS
1518 || (seg == VCPU_SREG_SS
1519 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1520 || seg == VCPU_SREG_TR)
1524 /* TR should be in GDT only */
1525 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1528 if (null_selector) /* for NULL selector skip all following checks */
1531 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1532 if (ret != X86EMUL_CONTINUE)
1535 err_code = selector & 0xfffc;
1536 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1539 /* can't load system descriptor into segment selector */
1540 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1541 if (transfer == X86_TRANSFER_CALL_JMP)
1542 return X86EMUL_UNHANDLEABLE;
1547 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1556 * segment is not a writable data segment or segment
1557 * selector's RPL != CPL or segment selector's RPL != CPL
1559 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1563 if (!(seg_desc.type & 8))
1566 if (seg_desc.type & 4) {
1572 if (rpl > cpl || dpl != cpl)
1575 /* in long-mode d/b must be clear if l is set */
1576 if (seg_desc.d && seg_desc.l) {
1579 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1580 if (efer & EFER_LMA)
1584 /* CS(RPL) <- CPL */
1585 selector = (selector & 0xfffc) | cpl;
1588 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1590 old_desc = seg_desc;
1591 seg_desc.type |= 2; /* busy */
1592 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1593 sizeof(seg_desc), &ctxt->exception);
1594 if (ret != X86EMUL_CONTINUE)
1597 case VCPU_SREG_LDTR:
1598 if (seg_desc.s || seg_desc.type != 2)
1601 default: /* DS, ES, FS, or GS */
1603 * segment is not a data or readable code segment or
1604 * ((segment is a data or nonconforming code segment)
1605 * and (both RPL and CPL > DPL))
1607 if ((seg_desc.type & 0xa) == 0x8 ||
1608 (((seg_desc.type & 0xc) != 0xc) &&
1609 (rpl > dpl && cpl > dpl)))
1615 /* mark segment as accessed */
1617 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1618 if (ret != X86EMUL_CONTINUE)
1620 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1621 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1622 sizeof(base3), &ctxt->exception);
1623 if (ret != X86EMUL_CONTINUE)
1625 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1626 ((u64)base3 << 32)))
1627 return emulate_gp(ctxt, 0);
1630 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1633 return X86EMUL_CONTINUE;
1635 return emulate_exception(ctxt, err_vec, err_code, true);
1638 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1639 u16 selector, int seg)
1641 u8 cpl = ctxt->ops->cpl(ctxt);
1642 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1643 X86_TRANSFER_NONE, NULL);
1646 static void write_register_operand(struct operand *op)
1648 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1649 switch (op->bytes) {
1651 *(u8 *)op->addr.reg = (u8)op->val;
1654 *(u16 *)op->addr.reg = (u16)op->val;
1657 *op->addr.reg = (u32)op->val;
1658 break; /* 64b: zero-extend */
1660 *op->addr.reg = op->val;
1665 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1669 write_register_operand(op);
1672 if (ctxt->lock_prefix)
1673 return segmented_cmpxchg(ctxt,
1679 return segmented_write(ctxt,
1685 return segmented_write(ctxt,
1688 op->bytes * op->count);
1691 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1694 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1702 return X86EMUL_CONTINUE;
1705 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1707 struct segmented_address addr;
1709 rsp_increment(ctxt, -bytes);
1710 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1711 addr.seg = VCPU_SREG_SS;
1713 return segmented_write(ctxt, addr, data, bytes);
1716 static int em_push(struct x86_emulate_ctxt *ctxt)
1718 /* Disable writeback. */
1719 ctxt->dst.type = OP_NONE;
1720 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1723 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1724 void *dest, int len)
1727 struct segmented_address addr;
1729 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1730 addr.seg = VCPU_SREG_SS;
1731 rc = segmented_read(ctxt, addr, dest, len);
1732 if (rc != X86EMUL_CONTINUE)
1735 rsp_increment(ctxt, len);
1739 static int em_pop(struct x86_emulate_ctxt *ctxt)
1741 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1744 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1745 void *dest, int len)
1748 unsigned long val, change_mask;
1749 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1750 int cpl = ctxt->ops->cpl(ctxt);
1752 rc = emulate_pop(ctxt, &val, len);
1753 if (rc != X86EMUL_CONTINUE)
1756 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1757 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1759 switch(ctxt->mode) {
1760 case X86EMUL_MODE_PROT64:
1761 case X86EMUL_MODE_PROT32:
1762 case X86EMUL_MODE_PROT16:
1764 change_mask |= EFLG_IOPL;
1766 change_mask |= EFLG_IF;
1768 case X86EMUL_MODE_VM86:
1770 return emulate_gp(ctxt, 0);
1771 change_mask |= EFLG_IF;
1773 default: /* real mode */
1774 change_mask |= (EFLG_IOPL | EFLG_IF);
1778 *(unsigned long *)dest =
1779 (ctxt->eflags & ~change_mask) | (val & change_mask);
1784 static int em_popf(struct x86_emulate_ctxt *ctxt)
1786 ctxt->dst.type = OP_REG;
1787 ctxt->dst.addr.reg = &ctxt->eflags;
1788 ctxt->dst.bytes = ctxt->op_bytes;
1789 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1792 static int em_enter(struct x86_emulate_ctxt *ctxt)
1795 unsigned frame_size = ctxt->src.val;
1796 unsigned nesting_level = ctxt->src2.val & 31;
1800 return X86EMUL_UNHANDLEABLE;
1802 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1803 rc = push(ctxt, &rbp, stack_size(ctxt));
1804 if (rc != X86EMUL_CONTINUE)
1806 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1808 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1809 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1811 return X86EMUL_CONTINUE;
1814 static int em_leave(struct x86_emulate_ctxt *ctxt)
1816 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1818 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1821 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1823 int seg = ctxt->src2.val;
1825 ctxt->src.val = get_segment_selector(ctxt, seg);
1826 if (ctxt->op_bytes == 4) {
1827 rsp_increment(ctxt, -2);
1831 return em_push(ctxt);
1834 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1836 int seg = ctxt->src2.val;
1837 unsigned long selector;
1840 rc = emulate_pop(ctxt, &selector, 2);
1841 if (rc != X86EMUL_CONTINUE)
1844 if (ctxt->modrm_reg == VCPU_SREG_SS)
1845 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1846 if (ctxt->op_bytes > 2)
1847 rsp_increment(ctxt, ctxt->op_bytes - 2);
1849 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1853 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1855 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1856 int rc = X86EMUL_CONTINUE;
1857 int reg = VCPU_REGS_RAX;
1859 while (reg <= VCPU_REGS_RDI) {
1860 (reg == VCPU_REGS_RSP) ?
1861 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1864 if (rc != X86EMUL_CONTINUE)
1873 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1875 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1876 return em_push(ctxt);
1879 static int em_popa(struct x86_emulate_ctxt *ctxt)
1881 int rc = X86EMUL_CONTINUE;
1882 int reg = VCPU_REGS_RDI;
1884 while (reg >= VCPU_REGS_RAX) {
1885 if (reg == VCPU_REGS_RSP) {
1886 rsp_increment(ctxt, ctxt->op_bytes);
1890 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1891 if (rc != X86EMUL_CONTINUE)
1898 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1900 const struct x86_emulate_ops *ops = ctxt->ops;
1907 /* TODO: Add limit checks */
1908 ctxt->src.val = ctxt->eflags;
1910 if (rc != X86EMUL_CONTINUE)
1913 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1915 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1917 if (rc != X86EMUL_CONTINUE)
1920 ctxt->src.val = ctxt->_eip;
1922 if (rc != X86EMUL_CONTINUE)
1925 ops->get_idt(ctxt, &dt);
1927 eip_addr = dt.address + (irq << 2);
1928 cs_addr = dt.address + (irq << 2) + 2;
1930 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1931 if (rc != X86EMUL_CONTINUE)
1934 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1935 if (rc != X86EMUL_CONTINUE)
1938 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1939 if (rc != X86EMUL_CONTINUE)
1947 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1951 invalidate_registers(ctxt);
1952 rc = __emulate_int_real(ctxt, irq);
1953 if (rc == X86EMUL_CONTINUE)
1954 writeback_registers(ctxt);
1958 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1960 switch(ctxt->mode) {
1961 case X86EMUL_MODE_REAL:
1962 return __emulate_int_real(ctxt, irq);
1963 case X86EMUL_MODE_VM86:
1964 case X86EMUL_MODE_PROT16:
1965 case X86EMUL_MODE_PROT32:
1966 case X86EMUL_MODE_PROT64:
1968 /* Protected mode interrupts unimplemented yet */
1969 return X86EMUL_UNHANDLEABLE;
1973 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1975 int rc = X86EMUL_CONTINUE;
1976 unsigned long temp_eip = 0;
1977 unsigned long temp_eflags = 0;
1978 unsigned long cs = 0;
1979 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1980 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1981 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1982 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1984 /* TODO: Add stack limit check */
1986 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1988 if (rc != X86EMUL_CONTINUE)
1991 if (temp_eip & ~0xffff)
1992 return emulate_gp(ctxt, 0);
1994 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1996 if (rc != X86EMUL_CONTINUE)
1999 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2001 if (rc != X86EMUL_CONTINUE)
2004 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2006 if (rc != X86EMUL_CONTINUE)
2009 ctxt->_eip = temp_eip;
2012 if (ctxt->op_bytes == 4)
2013 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2014 else if (ctxt->op_bytes == 2) {
2015 ctxt->eflags &= ~0xffff;
2016 ctxt->eflags |= temp_eflags;
2019 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2020 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2025 static int em_iret(struct x86_emulate_ctxt *ctxt)
2027 switch(ctxt->mode) {
2028 case X86EMUL_MODE_REAL:
2029 return emulate_iret_real(ctxt);
2030 case X86EMUL_MODE_VM86:
2031 case X86EMUL_MODE_PROT16:
2032 case X86EMUL_MODE_PROT32:
2033 case X86EMUL_MODE_PROT64:
2035 /* iret from protected mode unimplemented yet */
2036 return X86EMUL_UNHANDLEABLE;
2040 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2043 unsigned short sel, old_sel;
2044 struct desc_struct old_desc, new_desc;
2045 const struct x86_emulate_ops *ops = ctxt->ops;
2046 u8 cpl = ctxt->ops->cpl(ctxt);
2048 /* Assignment of RIP may only fail in 64-bit mode */
2049 if (ctxt->mode == X86EMUL_MODE_PROT64)
2050 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2053 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2055 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2056 X86_TRANSFER_CALL_JMP,
2058 if (rc != X86EMUL_CONTINUE)
2061 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2062 if (rc != X86EMUL_CONTINUE) {
2063 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2064 /* assigning eip failed; restore the old cs */
2065 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2071 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2073 return assign_eip_near(ctxt, ctxt->src.val);
2076 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2081 old_eip = ctxt->_eip;
2082 rc = assign_eip_near(ctxt, ctxt->src.val);
2083 if (rc != X86EMUL_CONTINUE)
2085 ctxt->src.val = old_eip;
2090 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2092 u64 old = ctxt->dst.orig_val64;
2094 if (ctxt->dst.bytes == 16)
2095 return X86EMUL_UNHANDLEABLE;
2097 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2098 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2099 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2100 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2101 ctxt->eflags &= ~EFLG_ZF;
2103 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2104 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2106 ctxt->eflags |= EFLG_ZF;
2108 return X86EMUL_CONTINUE;
2111 static int em_ret(struct x86_emulate_ctxt *ctxt)
2116 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2117 if (rc != X86EMUL_CONTINUE)
2120 return assign_eip_near(ctxt, eip);
2123 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2126 unsigned long eip, cs;
2128 int cpl = ctxt->ops->cpl(ctxt);
2129 struct desc_struct old_desc, new_desc;
2130 const struct x86_emulate_ops *ops = ctxt->ops;
2132 if (ctxt->mode == X86EMUL_MODE_PROT64)
2133 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2136 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2137 if (rc != X86EMUL_CONTINUE)
2139 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2140 if (rc != X86EMUL_CONTINUE)
2142 /* Outer-privilege level return is not implemented */
2143 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2144 return X86EMUL_UNHANDLEABLE;
2145 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2148 if (rc != X86EMUL_CONTINUE)
2150 rc = assign_eip_far(ctxt, eip, &new_desc);
2151 if (rc != X86EMUL_CONTINUE) {
2152 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2153 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2158 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2162 rc = em_ret_far(ctxt);
2163 if (rc != X86EMUL_CONTINUE)
2165 rsp_increment(ctxt, ctxt->src.val);
2166 return X86EMUL_CONTINUE;
2169 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2171 /* Save real source value, then compare EAX against destination. */
2172 ctxt->dst.orig_val = ctxt->dst.val;
2173 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2174 ctxt->src.orig_val = ctxt->src.val;
2175 ctxt->src.val = ctxt->dst.orig_val;
2176 fastop(ctxt, em_cmp);
2178 if (ctxt->eflags & EFLG_ZF) {
2179 /* Success: write back to memory. */
2180 ctxt->dst.val = ctxt->src.orig_val;
2182 /* Failure: write the value we saw to EAX. */
2183 ctxt->dst.type = OP_REG;
2184 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2185 ctxt->dst.val = ctxt->dst.orig_val;
2187 return X86EMUL_CONTINUE;
2190 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2192 int seg = ctxt->src2.val;
2196 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2198 rc = load_segment_descriptor(ctxt, sel, seg);
2199 if (rc != X86EMUL_CONTINUE)
2202 ctxt->dst.val = ctxt->src.val;
2207 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2208 struct desc_struct *cs, struct desc_struct *ss)
2210 cs->l = 0; /* will be adjusted later */
2211 set_desc_base(cs, 0); /* flat segment */
2212 cs->g = 1; /* 4kb granularity */
2213 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2214 cs->type = 0x0b; /* Read, Execute, Accessed */
2216 cs->dpl = 0; /* will be adjusted later */
2221 set_desc_base(ss, 0); /* flat segment */
2222 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2223 ss->g = 1; /* 4kb granularity */
2225 ss->type = 0x03; /* Read/Write, Accessed */
2226 ss->d = 1; /* 32bit stack segment */
2233 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2235 u32 eax, ebx, ecx, edx;
2238 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2239 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2240 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2241 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2244 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2246 const struct x86_emulate_ops *ops = ctxt->ops;
2247 u32 eax, ebx, ecx, edx;
2250 * syscall should always be enabled in longmode - so only become
2251 * vendor specific (cpuid) if other modes are active...
2253 if (ctxt->mode == X86EMUL_MODE_PROT64)
2258 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2260 * Intel ("GenuineIntel")
2261 * remark: Intel CPUs only support "syscall" in 64bit
2262 * longmode. Also an 64bit guest with a
2263 * 32bit compat-app running will #UD !! While this
2264 * behaviour can be fixed (by emulating) into AMD
2265 * response - CPUs of AMD can't behave like Intel.
2267 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2268 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2269 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2272 /* AMD ("AuthenticAMD") */
2273 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2274 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2275 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2278 /* AMD ("AMDisbetter!") */
2279 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2280 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2281 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2284 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2288 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2290 const struct x86_emulate_ops *ops = ctxt->ops;
2291 struct desc_struct cs, ss;
2296 /* syscall is not available in real mode */
2297 if (ctxt->mode == X86EMUL_MODE_REAL ||
2298 ctxt->mode == X86EMUL_MODE_VM86)
2299 return emulate_ud(ctxt);
2301 if (!(em_syscall_is_enabled(ctxt)))
2302 return emulate_ud(ctxt);
2304 ops->get_msr(ctxt, MSR_EFER, &efer);
2305 setup_syscalls_segments(ctxt, &cs, &ss);
2307 if (!(efer & EFER_SCE))
2308 return emulate_ud(ctxt);
2310 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2312 cs_sel = (u16)(msr_data & 0xfffc);
2313 ss_sel = (u16)(msr_data + 8);
2315 if (efer & EFER_LMA) {
2319 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2320 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2322 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2323 if (efer & EFER_LMA) {
2324 #ifdef CONFIG_X86_64
2325 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2328 ctxt->mode == X86EMUL_MODE_PROT64 ?
2329 MSR_LSTAR : MSR_CSTAR, &msr_data);
2330 ctxt->_eip = msr_data;
2332 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2333 ctxt->eflags &= ~msr_data;
2334 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2338 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2339 ctxt->_eip = (u32)msr_data;
2341 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2344 return X86EMUL_CONTINUE;
2347 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2349 const struct x86_emulate_ops *ops = ctxt->ops;
2350 struct desc_struct cs, ss;
2355 ops->get_msr(ctxt, MSR_EFER, &efer);
2356 /* inject #GP if in real mode */
2357 if (ctxt->mode == X86EMUL_MODE_REAL)
2358 return emulate_gp(ctxt, 0);
2361 * Not recognized on AMD in compat mode (but is recognized in legacy
2364 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2365 && !vendor_intel(ctxt))
2366 return emulate_ud(ctxt);
2368 /* sysenter/sysexit have not been tested in 64bit mode. */
2369 if (ctxt->mode == X86EMUL_MODE_PROT64)
2370 return X86EMUL_UNHANDLEABLE;
2372 setup_syscalls_segments(ctxt, &cs, &ss);
2374 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2375 switch (ctxt->mode) {
2376 case X86EMUL_MODE_PROT32:
2377 if ((msr_data & 0xfffc) == 0x0)
2378 return emulate_gp(ctxt, 0);
2380 case X86EMUL_MODE_PROT64:
2381 if (msr_data == 0x0)
2382 return emulate_gp(ctxt, 0);
2388 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2389 cs_sel = (u16)msr_data;
2390 cs_sel &= ~SELECTOR_RPL_MASK;
2391 ss_sel = cs_sel + 8;
2392 ss_sel &= ~SELECTOR_RPL_MASK;
2393 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2398 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2399 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2401 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2402 ctxt->_eip = msr_data;
2404 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2405 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2407 return X86EMUL_CONTINUE;
2410 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2412 const struct x86_emulate_ops *ops = ctxt->ops;
2413 struct desc_struct cs, ss;
2414 u64 msr_data, rcx, rdx;
2416 u16 cs_sel = 0, ss_sel = 0;
2418 /* inject #GP if in real mode or Virtual 8086 mode */
2419 if (ctxt->mode == X86EMUL_MODE_REAL ||
2420 ctxt->mode == X86EMUL_MODE_VM86)
2421 return emulate_gp(ctxt, 0);
2423 setup_syscalls_segments(ctxt, &cs, &ss);
2425 if ((ctxt->rex_prefix & 0x8) != 0x0)
2426 usermode = X86EMUL_MODE_PROT64;
2428 usermode = X86EMUL_MODE_PROT32;
2430 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2431 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2435 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2437 case X86EMUL_MODE_PROT32:
2438 cs_sel = (u16)(msr_data + 16);
2439 if ((msr_data & 0xfffc) == 0x0)
2440 return emulate_gp(ctxt, 0);
2441 ss_sel = (u16)(msr_data + 24);
2445 case X86EMUL_MODE_PROT64:
2446 cs_sel = (u16)(msr_data + 32);
2447 if (msr_data == 0x0)
2448 return emulate_gp(ctxt, 0);
2449 ss_sel = cs_sel + 8;
2452 if (is_noncanonical_address(rcx) ||
2453 is_noncanonical_address(rdx))
2454 return emulate_gp(ctxt, 0);
2457 cs_sel |= SELECTOR_RPL_MASK;
2458 ss_sel |= SELECTOR_RPL_MASK;
2460 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2461 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2464 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2466 return X86EMUL_CONTINUE;
2469 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2472 if (ctxt->mode == X86EMUL_MODE_REAL)
2474 if (ctxt->mode == X86EMUL_MODE_VM86)
2476 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2477 return ctxt->ops->cpl(ctxt) > iopl;
2480 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2483 const struct x86_emulate_ops *ops = ctxt->ops;
2484 struct desc_struct tr_seg;
2487 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2488 unsigned mask = (1 << len) - 1;
2491 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2494 if (desc_limit_scaled(&tr_seg) < 103)
2496 base = get_desc_base(&tr_seg);
2497 #ifdef CONFIG_X86_64
2498 base |= ((u64)base3) << 32;
2500 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2501 if (r != X86EMUL_CONTINUE)
2503 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2505 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2506 if (r != X86EMUL_CONTINUE)
2508 if ((perm >> bit_idx) & mask)
2513 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2519 if (emulator_bad_iopl(ctxt))
2520 if (!emulator_io_port_access_allowed(ctxt, port, len))
2523 ctxt->perm_ok = true;
2528 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2529 struct tss_segment_16 *tss)
2531 tss->ip = ctxt->_eip;
2532 tss->flag = ctxt->eflags;
2533 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2534 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2535 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2536 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2537 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2538 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2539 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2540 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2542 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2543 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2544 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2545 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2546 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2549 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2550 struct tss_segment_16 *tss)
2555 ctxt->_eip = tss->ip;
2556 ctxt->eflags = tss->flag | 2;
2557 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2558 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2559 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2560 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2561 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2562 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2563 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2564 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2567 * SDM says that segment selectors are loaded before segment
2570 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2571 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2572 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2573 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2574 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2579 * Now load segment descriptors. If fault happens at this stage
2580 * it is handled in a context of new task
2582 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2583 X86_TRANSFER_TASK_SWITCH, NULL);
2584 if (ret != X86EMUL_CONTINUE)
2586 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2587 X86_TRANSFER_TASK_SWITCH, NULL);
2588 if (ret != X86EMUL_CONTINUE)
2590 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2591 X86_TRANSFER_TASK_SWITCH, NULL);
2592 if (ret != X86EMUL_CONTINUE)
2594 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2595 X86_TRANSFER_TASK_SWITCH, NULL);
2596 if (ret != X86EMUL_CONTINUE)
2598 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2599 X86_TRANSFER_TASK_SWITCH, NULL);
2600 if (ret != X86EMUL_CONTINUE)
2603 return X86EMUL_CONTINUE;
2606 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2607 u16 tss_selector, u16 old_tss_sel,
2608 ulong old_tss_base, struct desc_struct *new_desc)
2610 const struct x86_emulate_ops *ops = ctxt->ops;
2611 struct tss_segment_16 tss_seg;
2613 u32 new_tss_base = get_desc_base(new_desc);
2615 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2617 if (ret != X86EMUL_CONTINUE)
2620 save_state_to_tss16(ctxt, &tss_seg);
2622 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2624 if (ret != X86EMUL_CONTINUE)
2627 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2629 if (ret != X86EMUL_CONTINUE)
2632 if (old_tss_sel != 0xffff) {
2633 tss_seg.prev_task_link = old_tss_sel;
2635 ret = ops->write_std(ctxt, new_tss_base,
2636 &tss_seg.prev_task_link,
2637 sizeof tss_seg.prev_task_link,
2639 if (ret != X86EMUL_CONTINUE)
2643 return load_state_from_tss16(ctxt, &tss_seg);
2646 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2647 struct tss_segment_32 *tss)
2649 /* CR3 and ldt selector are not saved intentionally */
2650 tss->eip = ctxt->_eip;
2651 tss->eflags = ctxt->eflags;
2652 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2653 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2654 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2655 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2656 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2657 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2658 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2659 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2661 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2662 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2663 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2664 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2665 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2666 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2669 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2670 struct tss_segment_32 *tss)
2675 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2676 return emulate_gp(ctxt, 0);
2677 ctxt->_eip = tss->eip;
2678 ctxt->eflags = tss->eflags | 2;
2680 /* General purpose registers */
2681 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2682 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2683 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2684 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2685 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2686 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2687 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2688 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2691 * SDM says that segment selectors are loaded before segment
2692 * descriptors. This is important because CPL checks will
2695 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2696 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2697 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2698 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2699 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2700 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2701 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2704 * If we're switching between Protected Mode and VM86, we need to make
2705 * sure to update the mode before loading the segment descriptors so
2706 * that the selectors are interpreted correctly.
2708 if (ctxt->eflags & X86_EFLAGS_VM) {
2709 ctxt->mode = X86EMUL_MODE_VM86;
2712 ctxt->mode = X86EMUL_MODE_PROT32;
2717 * Now load segment descriptors. If fault happenes at this stage
2718 * it is handled in a context of new task
2720 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2721 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2725 X86_TRANSFER_TASK_SWITCH, NULL);
2726 if (ret != X86EMUL_CONTINUE)
2728 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2729 X86_TRANSFER_TASK_SWITCH, NULL);
2730 if (ret != X86EMUL_CONTINUE)
2732 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2733 X86_TRANSFER_TASK_SWITCH, NULL);
2734 if (ret != X86EMUL_CONTINUE)
2736 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2737 X86_TRANSFER_TASK_SWITCH, NULL);
2738 if (ret != X86EMUL_CONTINUE)
2740 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2741 X86_TRANSFER_TASK_SWITCH, NULL);
2742 if (ret != X86EMUL_CONTINUE)
2744 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2745 X86_TRANSFER_TASK_SWITCH, NULL);
2746 if (ret != X86EMUL_CONTINUE)
2749 return X86EMUL_CONTINUE;
2752 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2753 u16 tss_selector, u16 old_tss_sel,
2754 ulong old_tss_base, struct desc_struct *new_desc)
2756 const struct x86_emulate_ops *ops = ctxt->ops;
2757 struct tss_segment_32 tss_seg;
2759 u32 new_tss_base = get_desc_base(new_desc);
2760 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2761 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2763 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2765 if (ret != X86EMUL_CONTINUE)
2768 save_state_to_tss32(ctxt, &tss_seg);
2770 /* Only GP registers and segment selectors are saved */
2771 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2772 ldt_sel_offset - eip_offset, &ctxt->exception);
2773 if (ret != X86EMUL_CONTINUE)
2776 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2778 if (ret != X86EMUL_CONTINUE)
2781 if (old_tss_sel != 0xffff) {
2782 tss_seg.prev_task_link = old_tss_sel;
2784 ret = ops->write_std(ctxt, new_tss_base,
2785 &tss_seg.prev_task_link,
2786 sizeof tss_seg.prev_task_link,
2788 if (ret != X86EMUL_CONTINUE)
2792 return load_state_from_tss32(ctxt, &tss_seg);
2795 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2796 u16 tss_selector, int idt_index, int reason,
2797 bool has_error_code, u32 error_code)
2799 const struct x86_emulate_ops *ops = ctxt->ops;
2800 struct desc_struct curr_tss_desc, next_tss_desc;
2802 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2803 ulong old_tss_base =
2804 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2808 /* FIXME: old_tss_base == ~0 ? */
2810 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2811 if (ret != X86EMUL_CONTINUE)
2813 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2814 if (ret != X86EMUL_CONTINUE)
2817 /* FIXME: check that next_tss_desc is tss */
2820 * Check privileges. The three cases are task switch caused by...
2822 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2823 * 2. Exception/IRQ/iret: No check is performed
2824 * 3. jmp/call to TSS/task-gate: No check is performed since the
2825 * hardware checks it before exiting.
2827 if (reason == TASK_SWITCH_GATE) {
2828 if (idt_index != -1) {
2829 /* Software interrupts */
2830 struct desc_struct task_gate_desc;
2833 ret = read_interrupt_descriptor(ctxt, idt_index,
2835 if (ret != X86EMUL_CONTINUE)
2838 dpl = task_gate_desc.dpl;
2839 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2840 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2844 desc_limit = desc_limit_scaled(&next_tss_desc);
2845 if (!next_tss_desc.p ||
2846 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2847 desc_limit < 0x2b)) {
2848 return emulate_ts(ctxt, tss_selector & 0xfffc);
2851 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2852 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2853 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2856 if (reason == TASK_SWITCH_IRET)
2857 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2859 /* set back link to prev task only if NT bit is set in eflags
2860 note that old_tss_sel is not used after this point */
2861 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2862 old_tss_sel = 0xffff;
2864 if (next_tss_desc.type & 8)
2865 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2866 old_tss_base, &next_tss_desc);
2868 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2869 old_tss_base, &next_tss_desc);
2870 if (ret != X86EMUL_CONTINUE)
2873 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2874 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2876 if (reason != TASK_SWITCH_IRET) {
2877 next_tss_desc.type |= (1 << 1); /* set busy flag */
2878 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2881 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2882 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2884 if (has_error_code) {
2885 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2886 ctxt->lock_prefix = 0;
2887 ctxt->src.val = (unsigned long) error_code;
2888 ret = em_push(ctxt);
2894 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2895 u16 tss_selector, int idt_index, int reason,
2896 bool has_error_code, u32 error_code)
2900 invalidate_registers(ctxt);
2901 ctxt->_eip = ctxt->eip;
2902 ctxt->dst.type = OP_NONE;
2904 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2905 has_error_code, error_code);
2907 if (rc == X86EMUL_CONTINUE) {
2908 ctxt->eip = ctxt->_eip;
2909 writeback_registers(ctxt);
2912 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2915 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2918 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2920 register_address_increment(ctxt, reg, df * op->bytes);
2921 op->addr.mem.ea = register_address(ctxt, reg);
2924 static int em_das(struct x86_emulate_ctxt *ctxt)
2927 bool af, cf, old_cf;
2929 cf = ctxt->eflags & X86_EFLAGS_CF;
2935 af = ctxt->eflags & X86_EFLAGS_AF;
2936 if ((al & 0x0f) > 9 || af) {
2938 cf = old_cf | (al >= 250);
2943 if (old_al > 0x99 || old_cf) {
2949 /* Set PF, ZF, SF */
2950 ctxt->src.type = OP_IMM;
2952 ctxt->src.bytes = 1;
2953 fastop(ctxt, em_or);
2954 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2956 ctxt->eflags |= X86_EFLAGS_CF;
2958 ctxt->eflags |= X86_EFLAGS_AF;
2959 return X86EMUL_CONTINUE;
2962 static int em_aam(struct x86_emulate_ctxt *ctxt)
2966 if (ctxt->src.val == 0)
2967 return emulate_de(ctxt);
2969 al = ctxt->dst.val & 0xff;
2970 ah = al / ctxt->src.val;
2971 al %= ctxt->src.val;
2973 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2975 /* Set PF, ZF, SF */
2976 ctxt->src.type = OP_IMM;
2978 ctxt->src.bytes = 1;
2979 fastop(ctxt, em_or);
2981 return X86EMUL_CONTINUE;
2984 static int em_aad(struct x86_emulate_ctxt *ctxt)
2986 u8 al = ctxt->dst.val & 0xff;
2987 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2989 al = (al + (ah * ctxt->src.val)) & 0xff;
2991 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2993 /* Set PF, ZF, SF */
2994 ctxt->src.type = OP_IMM;
2996 ctxt->src.bytes = 1;
2997 fastop(ctxt, em_or);
2999 return X86EMUL_CONTINUE;
3002 static int em_call(struct x86_emulate_ctxt *ctxt)
3005 long rel = ctxt->src.val;
3007 ctxt->src.val = (unsigned long)ctxt->_eip;
3008 rc = jmp_rel(ctxt, rel);
3009 if (rc != X86EMUL_CONTINUE)
3011 return em_push(ctxt);
3014 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3019 struct desc_struct old_desc, new_desc;
3020 const struct x86_emulate_ops *ops = ctxt->ops;
3021 int cpl = ctxt->ops->cpl(ctxt);
3023 old_eip = ctxt->_eip;
3024 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3026 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3027 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3028 X86_TRANSFER_CALL_JMP, &new_desc);
3029 if (rc != X86EMUL_CONTINUE)
3032 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3033 if (rc != X86EMUL_CONTINUE)
3036 ctxt->src.val = old_cs;
3038 if (rc != X86EMUL_CONTINUE)
3041 ctxt->src.val = old_eip;
3043 /* If we failed, we tainted the memory, but the very least we should
3045 if (rc != X86EMUL_CONTINUE)
3049 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3054 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3059 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3060 if (rc != X86EMUL_CONTINUE)
3062 rc = assign_eip_near(ctxt, eip);
3063 if (rc != X86EMUL_CONTINUE)
3065 rsp_increment(ctxt, ctxt->src.val);
3066 return X86EMUL_CONTINUE;
3069 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3071 /* Write back the register source. */
3072 ctxt->src.val = ctxt->dst.val;
3073 write_register_operand(&ctxt->src);
3075 /* Write back the memory destination with implicit LOCK prefix. */
3076 ctxt->dst.val = ctxt->src.orig_val;
3077 ctxt->lock_prefix = 1;
3078 return X86EMUL_CONTINUE;
3081 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3083 ctxt->dst.val = ctxt->src2.val;
3084 return fastop(ctxt, em_imul);
3087 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3089 ctxt->dst.type = OP_REG;
3090 ctxt->dst.bytes = ctxt->src.bytes;
3091 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3092 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3094 return X86EMUL_CONTINUE;
3097 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3101 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3102 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3104 return X86EMUL_CONTINUE;
3107 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3111 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3112 return emulate_gp(ctxt, 0);
3113 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3114 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3115 return X86EMUL_CONTINUE;
3118 static int em_mov(struct x86_emulate_ctxt *ctxt)
3120 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3121 return X86EMUL_CONTINUE;
3124 #define FFL(x) bit(X86_FEATURE_##x)
3126 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3128 u32 ebx, ecx, edx, eax = 1;
3132 * Check MOVBE is set in the guest-visible CPUID leaf.
3134 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3135 if (!(ecx & FFL(MOVBE)))
3136 return emulate_ud(ctxt);
3138 switch (ctxt->op_bytes) {
3141 * From MOVBE definition: "...When the operand size is 16 bits,
3142 * the upper word of the destination register remains unchanged
3145 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3146 * rules so we have to do the operation almost per hand.
3148 tmp = (u16)ctxt->src.val;
3149 ctxt->dst.val &= ~0xffffUL;
3150 ctxt->dst.val |= (unsigned long)swab16(tmp);
3153 ctxt->dst.val = swab32((u32)ctxt->src.val);
3156 ctxt->dst.val = swab64(ctxt->src.val);
3161 return X86EMUL_CONTINUE;
3164 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3166 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3167 return emulate_gp(ctxt, 0);
3169 /* Disable writeback. */
3170 ctxt->dst.type = OP_NONE;
3171 return X86EMUL_CONTINUE;
3174 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3178 if (ctxt->mode == X86EMUL_MODE_PROT64)
3179 val = ctxt->src.val & ~0ULL;
3181 val = ctxt->src.val & ~0U;
3183 /* #UD condition is already handled. */
3184 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3185 return emulate_gp(ctxt, 0);
3187 /* Disable writeback. */
3188 ctxt->dst.type = OP_NONE;
3189 return X86EMUL_CONTINUE;
3192 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3196 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3197 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3198 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3199 return emulate_gp(ctxt, 0);
3201 return X86EMUL_CONTINUE;
3204 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3208 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3209 return emulate_gp(ctxt, 0);
3211 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3212 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3213 return X86EMUL_CONTINUE;
3216 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3218 if (ctxt->modrm_reg > VCPU_SREG_GS)
3219 return emulate_ud(ctxt);
3221 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3222 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3223 ctxt->dst.bytes = 2;
3224 return X86EMUL_CONTINUE;
3227 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3229 u16 sel = ctxt->src.val;
3231 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3232 return emulate_ud(ctxt);
3234 if (ctxt->modrm_reg == VCPU_SREG_SS)
3235 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3237 /* Disable writeback. */
3238 ctxt->dst.type = OP_NONE;
3239 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3242 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3244 u16 sel = ctxt->src.val;
3246 /* Disable writeback. */
3247 ctxt->dst.type = OP_NONE;
3248 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3251 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3253 u16 sel = ctxt->src.val;
3255 /* Disable writeback. */
3256 ctxt->dst.type = OP_NONE;
3257 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3260 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3265 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3266 if (rc == X86EMUL_CONTINUE)
3267 ctxt->ops->invlpg(ctxt, linear);
3268 /* Disable writeback. */
3269 ctxt->dst.type = OP_NONE;
3270 return X86EMUL_CONTINUE;
3273 static int em_clts(struct x86_emulate_ctxt *ctxt)
3277 cr0 = ctxt->ops->get_cr(ctxt, 0);
3279 ctxt->ops->set_cr(ctxt, 0, cr0);
3280 return X86EMUL_CONTINUE;
3283 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3285 int rc = ctxt->ops->fix_hypercall(ctxt);
3287 if (rc != X86EMUL_CONTINUE)
3290 /* Let the processor re-execute the fixed hypercall */
3291 ctxt->_eip = ctxt->eip;
3292 /* Disable writeback. */
3293 ctxt->dst.type = OP_NONE;
3294 return X86EMUL_CONTINUE;
3297 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3298 void (*get)(struct x86_emulate_ctxt *ctxt,
3299 struct desc_ptr *ptr))
3301 struct desc_ptr desc_ptr;
3303 if (ctxt->mode == X86EMUL_MODE_PROT64)
3305 get(ctxt, &desc_ptr);
3306 if (ctxt->op_bytes == 2) {
3308 desc_ptr.address &= 0x00ffffff;
3310 /* Disable writeback. */
3311 ctxt->dst.type = OP_NONE;
3312 return segmented_write(ctxt, ctxt->dst.addr.mem,
3313 &desc_ptr, 2 + ctxt->op_bytes);
3316 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3318 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3321 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3323 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3326 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3328 struct desc_ptr desc_ptr;
3331 if (ctxt->mode == X86EMUL_MODE_PROT64)
3333 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3334 &desc_ptr.size, &desc_ptr.address,
3336 if (rc != X86EMUL_CONTINUE)
3338 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3339 is_noncanonical_address(desc_ptr.address))
3340 return emulate_gp(ctxt, 0);
3342 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3344 ctxt->ops->set_idt(ctxt, &desc_ptr);
3345 /* Disable writeback. */
3346 ctxt->dst.type = OP_NONE;
3347 return X86EMUL_CONTINUE;
3350 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3352 return em_lgdt_lidt(ctxt, true);
3355 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3359 rc = ctxt->ops->fix_hypercall(ctxt);
3361 /* Disable writeback. */
3362 ctxt->dst.type = OP_NONE;
3366 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3368 return em_lgdt_lidt(ctxt, false);
3371 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3373 if (ctxt->dst.type == OP_MEM)
3374 ctxt->dst.bytes = 2;
3375 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3376 return X86EMUL_CONTINUE;
3379 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3381 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3382 | (ctxt->src.val & 0x0f));
3383 ctxt->dst.type = OP_NONE;
3384 return X86EMUL_CONTINUE;
3387 static int em_loop(struct x86_emulate_ctxt *ctxt)
3389 int rc = X86EMUL_CONTINUE;
3391 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3392 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3393 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3394 rc = jmp_rel(ctxt, ctxt->src.val);
3399 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3401 int rc = X86EMUL_CONTINUE;
3403 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3404 rc = jmp_rel(ctxt, ctxt->src.val);
3409 static int em_in(struct x86_emulate_ctxt *ctxt)
3411 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3413 return X86EMUL_IO_NEEDED;
3415 return X86EMUL_CONTINUE;
3418 static int em_out(struct x86_emulate_ctxt *ctxt)
3420 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3422 /* Disable writeback. */
3423 ctxt->dst.type = OP_NONE;
3424 return X86EMUL_CONTINUE;
3427 static int em_cli(struct x86_emulate_ctxt *ctxt)
3429 if (emulator_bad_iopl(ctxt))
3430 return emulate_gp(ctxt, 0);
3432 ctxt->eflags &= ~X86_EFLAGS_IF;
3433 return X86EMUL_CONTINUE;
3436 static int em_sti(struct x86_emulate_ctxt *ctxt)
3438 if (emulator_bad_iopl(ctxt))
3439 return emulate_gp(ctxt, 0);
3441 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3442 ctxt->eflags |= X86_EFLAGS_IF;
3443 return X86EMUL_CONTINUE;
3446 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3448 u32 eax, ebx, ecx, edx;
3450 eax = reg_read(ctxt, VCPU_REGS_RAX);
3451 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3452 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3453 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3454 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3455 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3456 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3457 return X86EMUL_CONTINUE;
3460 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3464 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3465 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3467 ctxt->eflags &= ~0xffUL;
3468 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3469 return X86EMUL_CONTINUE;
3472 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3474 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3475 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3476 return X86EMUL_CONTINUE;
3479 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3481 switch (ctxt->op_bytes) {
3482 #ifdef CONFIG_X86_64
3484 asm("bswap %0" : "+r"(ctxt->dst.val));
3488 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3491 return X86EMUL_CONTINUE;
3494 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3496 /* emulating clflush regardless of cpuid */
3497 return X86EMUL_CONTINUE;
3500 static bool valid_cr(int nr)
3512 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3514 if (!valid_cr(ctxt->modrm_reg))
3515 return emulate_ud(ctxt);
3517 return X86EMUL_CONTINUE;
3520 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3522 u64 new_val = ctxt->src.val64;
3523 int cr = ctxt->modrm_reg;
3526 static u64 cr_reserved_bits[] = {
3527 0xffffffff00000000ULL,
3528 0, 0, 0, /* CR3 checked later */
3535 return emulate_ud(ctxt);
3537 if (new_val & cr_reserved_bits[cr])
3538 return emulate_gp(ctxt, 0);
3543 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3544 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3545 return emulate_gp(ctxt, 0);
3547 cr4 = ctxt->ops->get_cr(ctxt, 4);
3548 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3550 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3551 !(cr4 & X86_CR4_PAE))
3552 return emulate_gp(ctxt, 0);
3559 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3560 if (efer & EFER_LMA)
3561 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3564 return emulate_gp(ctxt, 0);
3569 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3571 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3572 return emulate_gp(ctxt, 0);
3578 return X86EMUL_CONTINUE;
3581 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3585 ctxt->ops->get_dr(ctxt, 7, &dr7);
3587 /* Check if DR7.Global_Enable is set */
3588 return dr7 & (1 << 13);
3591 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3593 int dr = ctxt->modrm_reg;
3597 return emulate_ud(ctxt);
3599 cr4 = ctxt->ops->get_cr(ctxt, 4);
3600 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3601 return emulate_ud(ctxt);
3603 if (check_dr7_gd(ctxt)) {
3606 ctxt->ops->get_dr(ctxt, 6, &dr6);
3608 dr6 |= DR6_BD | DR6_RTM;
3609 ctxt->ops->set_dr(ctxt, 6, dr6);
3610 return emulate_db(ctxt);
3613 return X86EMUL_CONTINUE;
3616 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3618 u64 new_val = ctxt->src.val64;
3619 int dr = ctxt->modrm_reg;
3621 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3622 return emulate_gp(ctxt, 0);
3624 return check_dr_read(ctxt);
3627 static int check_svme(struct x86_emulate_ctxt *ctxt)
3631 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3633 if (!(efer & EFER_SVME))
3634 return emulate_ud(ctxt);
3636 return X86EMUL_CONTINUE;
3639 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3641 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3643 /* Valid physical address? */
3644 if (rax & 0xffff000000000000ULL)
3645 return emulate_gp(ctxt, 0);
3647 return check_svme(ctxt);
3650 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3652 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3654 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3655 return emulate_ud(ctxt);
3657 return X86EMUL_CONTINUE;
3660 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3662 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3663 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3665 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3666 ctxt->ops->check_pmc(ctxt, rcx))
3667 return emulate_gp(ctxt, 0);
3669 return X86EMUL_CONTINUE;
3672 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3674 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3675 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3676 return emulate_gp(ctxt, 0);
3678 return X86EMUL_CONTINUE;
3681 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3683 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3684 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3685 return emulate_gp(ctxt, 0);
3687 return X86EMUL_CONTINUE;
3690 #define D(_y) { .flags = (_y) }
3691 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3692 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3693 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3694 #define N D(NotImpl)
3695 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3696 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3697 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3698 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3699 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3700 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3701 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3702 #define II(_f, _e, _i) \
3703 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3704 #define IIP(_f, _e, _i, _p) \
3705 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3706 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3707 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3709 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3710 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3711 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3712 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3713 #define I2bvIP(_f, _e, _i, _p) \
3714 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3716 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3717 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3718 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3720 static const struct opcode group7_rm0[] = {
3722 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3726 static const struct opcode group7_rm1[] = {
3727 DI(SrcNone | Priv, monitor),
3728 DI(SrcNone | Priv, mwait),
3732 static const struct opcode group7_rm3[] = {
3733 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3734 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3735 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3736 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3737 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3738 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3739 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3740 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3743 static const struct opcode group7_rm7[] = {
3745 DIP(SrcNone, rdtscp, check_rdtsc),
3749 static const struct opcode group1[] = {
3751 F(Lock | PageTable, em_or),
3754 F(Lock | PageTable, em_and),
3760 static const struct opcode group1A[] = {
3761 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3764 static const struct opcode group2[] = {
3765 F(DstMem | ModRM, em_rol),
3766 F(DstMem | ModRM, em_ror),
3767 F(DstMem | ModRM, em_rcl),
3768 F(DstMem | ModRM, em_rcr),
3769 F(DstMem | ModRM, em_shl),
3770 F(DstMem | ModRM, em_shr),
3771 F(DstMem | ModRM, em_shl),
3772 F(DstMem | ModRM, em_sar),
3775 static const struct opcode group3[] = {
3776 F(DstMem | SrcImm | NoWrite, em_test),
3777 F(DstMem | SrcImm | NoWrite, em_test),
3778 F(DstMem | SrcNone | Lock, em_not),
3779 F(DstMem | SrcNone | Lock, em_neg),
3780 F(DstXacc | Src2Mem, em_mul_ex),
3781 F(DstXacc | Src2Mem, em_imul_ex),
3782 F(DstXacc | Src2Mem, em_div_ex),
3783 F(DstXacc | Src2Mem, em_idiv_ex),
3786 static const struct opcode group4[] = {
3787 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3788 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3792 static const struct opcode group5[] = {
3793 F(DstMem | SrcNone | Lock, em_inc),
3794 F(DstMem | SrcNone | Lock, em_dec),
3795 I(SrcMem | NearBranch, em_call_near_abs),
3796 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3797 I(SrcMem | NearBranch, em_jmp_abs),
3798 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3799 I(SrcMem | Stack, em_push), D(Undefined),
3802 static const struct opcode group6[] = {
3805 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3806 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3810 static const struct group_dual group7 = { {
3811 II(Mov | DstMem, em_sgdt, sgdt),
3812 II(Mov | DstMem, em_sidt, sidt),
3813 II(SrcMem | Priv, em_lgdt, lgdt),
3814 II(SrcMem | Priv, em_lidt, lidt),
3815 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3816 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3817 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3821 N, EXT(0, group7_rm3),
3822 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3823 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3827 static const struct opcode group8[] = {
3829 F(DstMem | SrcImmByte | NoWrite, em_bt),
3830 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3831 F(DstMem | SrcImmByte | Lock, em_btr),
3832 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3835 static const struct group_dual group9 = { {
3836 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3838 N, N, N, N, N, N, N, N,
3841 static const struct opcode group11[] = {
3842 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3846 static const struct gprefix pfx_0f_ae_7 = {
3847 I(SrcMem | ByteOp, em_clflush), N, N, N,
3850 static const struct group_dual group15 = { {
3851 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3853 N, N, N, N, N, N, N, N,
3856 static const struct gprefix pfx_0f_6f_0f_7f = {
3857 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3860 static const struct instr_dual instr_dual_0f_2b = {
3864 static const struct gprefix pfx_0f_2b = {
3865 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3868 static const struct gprefix pfx_0f_28_0f_29 = {
3869 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3872 static const struct gprefix pfx_0f_e7 = {
3873 N, I(Sse, em_mov), N, N,
3876 static const struct escape escape_d9 = { {
3877 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
3880 N, N, N, N, N, N, N, N,
3882 N, N, N, N, N, N, N, N,
3884 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, N, N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, N, N, N, N, N,
3897 static const struct escape escape_db = { {
3898 N, N, N, N, N, N, N, N,
3901 N, N, N, N, N, N, N, N,
3903 N, N, N, N, N, N, N, N,
3905 N, N, N, N, N, N, N, N,
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3911 N, N, N, N, N, N, N, N,
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3918 static const struct escape escape_dd = { {
3919 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
3922 N, N, N, N, N, N, N, N,
3924 N, N, N, N, N, N, N, N,
3926 N, N, N, N, N, N, N, N,
3928 N, N, N, N, N, N, N, N,
3930 N, N, N, N, N, N, N, N,
3932 N, N, N, N, N, N, N, N,
3934 N, N, N, N, N, N, N, N,
3936 N, N, N, N, N, N, N, N,
3939 static const struct instr_dual instr_dual_0f_c3 = {
3940 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3943 static const struct opcode opcode_table[256] = {
3945 F6ALU(Lock, em_add),
3946 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3947 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3949 F6ALU(Lock | PageTable, em_or),
3950 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3953 F6ALU(Lock, em_adc),
3954 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3955 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3957 F6ALU(Lock, em_sbb),
3958 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3959 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3961 F6ALU(Lock | PageTable, em_and), N, N,
3963 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3965 F6ALU(Lock, em_xor), N, N,
3967 F6ALU(NoWrite, em_cmp), N, N,
3969 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3971 X8(I(SrcReg | Stack, em_push)),
3973 X8(I(DstReg | Stack, em_pop)),
3975 I(ImplicitOps | Stack | No64, em_pusha),
3976 I(ImplicitOps | Stack | No64, em_popa),
3977 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3980 I(SrcImm | Mov | Stack, em_push),
3981 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3982 I(SrcImmByte | Mov | Stack, em_push),
3983 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3984 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3985 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3987 X16(D(SrcImmByte | NearBranch)),
3989 G(ByteOp | DstMem | SrcImm, group1),
3990 G(DstMem | SrcImm, group1),
3991 G(ByteOp | DstMem | SrcImm | No64, group1),
3992 G(DstMem | SrcImmByte, group1),
3993 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3994 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3996 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3997 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3998 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3999 D(ModRM | SrcMem | NoAccess | DstReg),
4000 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4003 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4005 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4006 I(SrcImmFAddr | No64, em_call_far), N,
4007 II(ImplicitOps | Stack, em_pushf, pushf),
4008 II(ImplicitOps | Stack, em_popf, popf),
4009 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4011 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4012 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4013 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4014 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4016 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4017 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4018 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4019 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4021 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4023 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4025 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4026 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4027 I(ImplicitOps | NearBranch, em_ret),
4028 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4029 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4030 G(ByteOp, group11), G(0, group11),
4032 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4033 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4034 I(ImplicitOps | Stack, em_ret_far),
4035 D(ImplicitOps), DI(SrcImmByte, intn),
4036 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4038 G(Src2One | ByteOp, group2), G(Src2One, group2),
4039 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4040 I(DstAcc | SrcImmUByte | No64, em_aam),
4041 I(DstAcc | SrcImmUByte | No64, em_aad),
4042 F(DstAcc | ByteOp | No64, em_salc),
4043 I(DstAcc | SrcXLat | ByteOp, em_mov),
4045 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4047 X3(I(SrcImmByte | NearBranch, em_loop)),
4048 I(SrcImmByte | NearBranch, em_jcxz),
4049 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4050 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4052 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4053 I(SrcImmFAddr | No64, em_jmp_far),
4054 D(SrcImmByte | ImplicitOps | NearBranch),
4055 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4056 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4058 N, DI(ImplicitOps, icebp), N, N,
4059 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4060 G(ByteOp, group3), G(0, group3),
4062 D(ImplicitOps), D(ImplicitOps),
4063 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4064 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4067 static const struct opcode twobyte_table[256] = {
4069 G(0, group6), GD(0, &group7), N, N,
4070 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4071 II(ImplicitOps | Priv, em_clts, clts), N,
4072 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4073 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4075 N, N, N, N, N, N, N, N,
4076 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4077 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4079 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4080 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4081 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4083 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4086 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4087 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4088 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4091 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4092 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4093 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4094 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4095 I(ImplicitOps | EmulateOnUD, em_sysenter),
4096 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4098 N, N, N, N, N, N, N, N,
4100 X16(D(DstReg | SrcMem | ModRM)),
4102 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4107 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4112 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4114 X16(D(SrcImm | NearBranch)),
4116 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4118 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4119 II(ImplicitOps, em_cpuid, cpuid),
4120 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4121 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4122 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4124 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4125 DI(ImplicitOps, rsm),
4126 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4127 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4128 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4129 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4131 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4132 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4133 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4134 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4135 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4136 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4140 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4141 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4142 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4144 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4145 N, ID(0, &instr_dual_0f_c3),
4146 N, N, N, GD(0, &group9),
4148 X8(I(DstReg, em_bswap)),
4150 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4152 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4153 N, N, N, N, N, N, N, N,
4155 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4158 static const struct instr_dual instr_dual_0f_38_f0 = {
4159 I(DstReg | SrcMem | Mov, em_movbe), N
4162 static const struct instr_dual instr_dual_0f_38_f1 = {
4163 I(DstMem | SrcReg | Mov, em_movbe), N
4166 static const struct gprefix three_byte_0f_38_f0 = {
4167 ID(0, &instr_dual_0f_38_f0), N, N, N
4170 static const struct gprefix three_byte_0f_38_f1 = {
4171 ID(0, &instr_dual_0f_38_f1), N, N, N
4175 * Insns below are selected by the prefix which indexed by the third opcode
4178 static const struct opcode opcode_map_0f_38[256] = {
4180 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4182 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4184 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4185 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4204 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4208 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4214 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4215 unsigned size, bool sign_extension)
4217 int rc = X86EMUL_CONTINUE;
4221 op->addr.mem.ea = ctxt->_eip;
4222 /* NB. Immediates are sign-extended as necessary. */
4223 switch (op->bytes) {
4225 op->val = insn_fetch(s8, ctxt);
4228 op->val = insn_fetch(s16, ctxt);
4231 op->val = insn_fetch(s32, ctxt);
4234 op->val = insn_fetch(s64, ctxt);
4237 if (!sign_extension) {
4238 switch (op->bytes) {
4246 op->val &= 0xffffffff;
4254 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4257 int rc = X86EMUL_CONTINUE;
4261 decode_register_operand(ctxt, op);
4264 rc = decode_imm(ctxt, op, 1, false);
4267 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4271 if (ctxt->d & BitOp)
4272 fetch_bit_operand(ctxt);
4273 op->orig_val = op->val;
4276 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4280 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4281 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4282 fetch_register_operand(op);
4283 op->orig_val = op->val;
4287 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4288 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4289 fetch_register_operand(op);
4290 op->orig_val = op->val;
4293 if (ctxt->d & ByteOp) {
4298 op->bytes = ctxt->op_bytes;
4299 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4300 fetch_register_operand(op);
4301 op->orig_val = op->val;
4305 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4307 register_address(ctxt, VCPU_REGS_RDI);
4308 op->addr.mem.seg = VCPU_SREG_ES;
4315 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4316 fetch_register_operand(op);
4321 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4324 rc = decode_imm(ctxt, op, 1, true);
4332 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4335 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4338 ctxt->memop.bytes = 1;
4339 if (ctxt->memop.type == OP_REG) {
4340 ctxt->memop.addr.reg = decode_register(ctxt,
4341 ctxt->modrm_rm, true);
4342 fetch_register_operand(&ctxt->memop);
4346 ctxt->memop.bytes = 2;
4349 ctxt->memop.bytes = 4;
4352 rc = decode_imm(ctxt, op, 2, false);
4355 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4359 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4361 register_address(ctxt, VCPU_REGS_RSI);
4362 op->addr.mem.seg = ctxt->seg_override;
4368 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4371 reg_read(ctxt, VCPU_REGS_RBX) +
4372 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4373 op->addr.mem.seg = ctxt->seg_override;
4378 op->addr.mem.ea = ctxt->_eip;
4379 op->bytes = ctxt->op_bytes + 2;
4380 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4383 ctxt->memop.bytes = ctxt->op_bytes + 2;
4387 op->val = VCPU_SREG_ES;
4391 op->val = VCPU_SREG_CS;
4395 op->val = VCPU_SREG_SS;
4399 op->val = VCPU_SREG_DS;
4403 op->val = VCPU_SREG_FS;
4407 op->val = VCPU_SREG_GS;
4410 /* Special instructions do their own operand decoding. */
4412 op->type = OP_NONE; /* Disable writeback. */
4420 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4422 int rc = X86EMUL_CONTINUE;
4423 int mode = ctxt->mode;
4424 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4425 bool op_prefix = false;
4426 bool has_seg_override = false;
4427 struct opcode opcode;
4429 ctxt->memop.type = OP_NONE;
4430 ctxt->memopp = NULL;
4431 ctxt->_eip = ctxt->eip;
4432 ctxt->fetch.ptr = ctxt->fetch.data;
4433 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4434 ctxt->opcode_len = 1;
4436 memcpy(ctxt->fetch.data, insn, insn_len);
4438 rc = __do_insn_fetch_bytes(ctxt, 1);
4439 if (rc != X86EMUL_CONTINUE)
4444 case X86EMUL_MODE_REAL:
4445 case X86EMUL_MODE_VM86:
4446 case X86EMUL_MODE_PROT16:
4447 def_op_bytes = def_ad_bytes = 2;
4449 case X86EMUL_MODE_PROT32:
4450 def_op_bytes = def_ad_bytes = 4;
4452 #ifdef CONFIG_X86_64
4453 case X86EMUL_MODE_PROT64:
4459 return EMULATION_FAILED;
4462 ctxt->op_bytes = def_op_bytes;
4463 ctxt->ad_bytes = def_ad_bytes;
4465 /* Legacy prefixes. */
4467 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4468 case 0x66: /* operand-size override */
4470 /* switch between 2/4 bytes */
4471 ctxt->op_bytes = def_op_bytes ^ 6;
4473 case 0x67: /* address-size override */
4474 if (mode == X86EMUL_MODE_PROT64)
4475 /* switch between 4/8 bytes */
4476 ctxt->ad_bytes = def_ad_bytes ^ 12;
4478 /* switch between 2/4 bytes */
4479 ctxt->ad_bytes = def_ad_bytes ^ 6;
4481 case 0x26: /* ES override */
4482 case 0x2e: /* CS override */
4483 case 0x36: /* SS override */
4484 case 0x3e: /* DS override */
4485 has_seg_override = true;
4486 ctxt->seg_override = (ctxt->b >> 3) & 3;
4488 case 0x64: /* FS override */
4489 case 0x65: /* GS override */
4490 has_seg_override = true;
4491 ctxt->seg_override = ctxt->b & 7;
4493 case 0x40 ... 0x4f: /* REX */
4494 if (mode != X86EMUL_MODE_PROT64)
4496 ctxt->rex_prefix = ctxt->b;
4498 case 0xf0: /* LOCK */
4499 ctxt->lock_prefix = 1;
4501 case 0xf2: /* REPNE/REPNZ */
4502 case 0xf3: /* REP/REPE/REPZ */
4503 ctxt->rep_prefix = ctxt->b;
4509 /* Any legacy prefix after a REX prefix nullifies its effect. */
4511 ctxt->rex_prefix = 0;
4517 if (ctxt->rex_prefix & 8)
4518 ctxt->op_bytes = 8; /* REX.W */
4520 /* Opcode byte(s). */
4521 opcode = opcode_table[ctxt->b];
4522 /* Two-byte opcode? */
4523 if (ctxt->b == 0x0f) {
4524 ctxt->opcode_len = 2;
4525 ctxt->b = insn_fetch(u8, ctxt);
4526 opcode = twobyte_table[ctxt->b];
4528 /* 0F_38 opcode map */
4529 if (ctxt->b == 0x38) {
4530 ctxt->opcode_len = 3;
4531 ctxt->b = insn_fetch(u8, ctxt);
4532 opcode = opcode_map_0f_38[ctxt->b];
4535 ctxt->d = opcode.flags;
4537 if (ctxt->d & ModRM)
4538 ctxt->modrm = insn_fetch(u8, ctxt);
4540 /* vex-prefix instructions are not implemented */
4541 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4542 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4546 while (ctxt->d & GroupMask) {
4547 switch (ctxt->d & GroupMask) {
4549 goffset = (ctxt->modrm >> 3) & 7;
4550 opcode = opcode.u.group[goffset];
4553 goffset = (ctxt->modrm >> 3) & 7;
4554 if ((ctxt->modrm >> 6) == 3)
4555 opcode = opcode.u.gdual->mod3[goffset];
4557 opcode = opcode.u.gdual->mod012[goffset];
4560 goffset = ctxt->modrm & 7;
4561 opcode = opcode.u.group[goffset];
4564 if (ctxt->rep_prefix && op_prefix)
4565 return EMULATION_FAILED;
4566 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4567 switch (simd_prefix) {
4568 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4569 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4570 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4571 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4575 if (ctxt->modrm > 0xbf)
4576 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4578 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4581 if ((ctxt->modrm >> 6) == 3)
4582 opcode = opcode.u.idual->mod3;
4584 opcode = opcode.u.idual->mod012;
4587 return EMULATION_FAILED;
4590 ctxt->d &= ~(u64)GroupMask;
4591 ctxt->d |= opcode.flags;
4596 return EMULATION_FAILED;
4598 ctxt->execute = opcode.u.execute;
4600 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4601 return EMULATION_FAILED;
4603 if (unlikely(ctxt->d &
4604 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4607 * These are copied unconditionally here, and checked unconditionally
4608 * in x86_emulate_insn.
4610 ctxt->check_perm = opcode.check_perm;
4611 ctxt->intercept = opcode.intercept;
4613 if (ctxt->d & NotImpl)
4614 return EMULATION_FAILED;
4616 if (mode == X86EMUL_MODE_PROT64) {
4617 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4619 else if (ctxt->d & NearBranch)
4623 if (ctxt->d & Op3264) {
4624 if (mode == X86EMUL_MODE_PROT64)
4630 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4634 ctxt->op_bytes = 16;
4635 else if (ctxt->d & Mmx)
4639 /* ModRM and SIB bytes. */
4640 if (ctxt->d & ModRM) {
4641 rc = decode_modrm(ctxt, &ctxt->memop);
4642 if (!has_seg_override) {
4643 has_seg_override = true;
4644 ctxt->seg_override = ctxt->modrm_seg;
4646 } else if (ctxt->d & MemAbs)
4647 rc = decode_abs(ctxt, &ctxt->memop);
4648 if (rc != X86EMUL_CONTINUE)
4651 if (!has_seg_override)
4652 ctxt->seg_override = VCPU_SREG_DS;
4654 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4657 * Decode and fetch the source operand: register, memory
4660 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4661 if (rc != X86EMUL_CONTINUE)
4665 * Decode and fetch the second source operand: register, memory
4668 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4669 if (rc != X86EMUL_CONTINUE)
4672 /* Decode and fetch the destination operand: register or memory. */
4673 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4675 if (ctxt->rip_relative)
4676 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4677 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4680 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4683 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4685 return ctxt->d & PageTable;
4688 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4690 /* The second termination condition only applies for REPE
4691 * and REPNE. Test if the repeat string operation prefix is
4692 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4693 * corresponding termination condition according to:
4694 * - if REPE/REPZ and ZF = 0 then done
4695 * - if REPNE/REPNZ and ZF = 1 then done
4697 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4698 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4699 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4700 ((ctxt->eflags & EFLG_ZF) == 0))
4701 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4702 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4708 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4712 ctxt->ops->get_fpu(ctxt);
4713 asm volatile("1: fwait \n\t"
4715 ".pushsection .fixup,\"ax\" \n\t"
4717 "movb $1, %[fault] \n\t"
4720 _ASM_EXTABLE(1b, 3b)
4721 : [fault]"+qm"(fault));
4722 ctxt->ops->put_fpu(ctxt);
4724 if (unlikely(fault))
4725 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4727 return X86EMUL_CONTINUE;
4730 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4733 if (op->type == OP_MM)
4734 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4737 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4739 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4740 if (!(ctxt->d & ByteOp))
4741 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4742 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4743 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4745 : "c"(ctxt->src2.val));
4746 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4747 if (!fop) /* exception is returned in fop variable */
4748 return emulate_de(ctxt);
4749 return X86EMUL_CONTINUE;
4752 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4754 memset(&ctxt->rip_relative, 0,
4755 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4757 ctxt->io_read.pos = 0;
4758 ctxt->io_read.end = 0;
4759 ctxt->mem_read.end = 0;
4762 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4764 const struct x86_emulate_ops *ops = ctxt->ops;
4765 int rc = X86EMUL_CONTINUE;
4766 int saved_dst_type = ctxt->dst.type;
4768 ctxt->mem_read.pos = 0;
4770 /* LOCK prefix is allowed only with some instructions */
4771 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4772 rc = emulate_ud(ctxt);
4776 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4777 rc = emulate_ud(ctxt);
4781 if (unlikely(ctxt->d &
4782 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4783 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4784 (ctxt->d & Undefined)) {
4785 rc = emulate_ud(ctxt);
4789 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4790 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4791 rc = emulate_ud(ctxt);
4795 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4796 rc = emulate_nm(ctxt);
4800 if (ctxt->d & Mmx) {
4801 rc = flush_pending_x87_faults(ctxt);
4802 if (rc != X86EMUL_CONTINUE)
4805 * Now that we know the fpu is exception safe, we can fetch
4808 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4809 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4810 if (!(ctxt->d & Mov))
4811 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4814 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4815 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4816 X86_ICPT_PRE_EXCEPT);
4817 if (rc != X86EMUL_CONTINUE)
4821 /* Instruction can only be executed in protected mode */
4822 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4823 rc = emulate_ud(ctxt);
4827 /* Privileged instruction can be executed only in CPL=0 */
4828 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4829 if (ctxt->d & PrivUD)
4830 rc = emulate_ud(ctxt);
4832 rc = emulate_gp(ctxt, 0);
4836 /* Do instruction specific permission checks */
4837 if (ctxt->d & CheckPerm) {
4838 rc = ctxt->check_perm(ctxt);
4839 if (rc != X86EMUL_CONTINUE)
4843 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4844 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4845 X86_ICPT_POST_EXCEPT);
4846 if (rc != X86EMUL_CONTINUE)
4850 if (ctxt->rep_prefix && (ctxt->d & String)) {
4851 /* All REP prefixes have the same first termination condition */
4852 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4853 ctxt->eip = ctxt->_eip;
4854 ctxt->eflags &= ~EFLG_RF;
4860 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4861 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4862 ctxt->src.valptr, ctxt->src.bytes);
4863 if (rc != X86EMUL_CONTINUE)
4865 ctxt->src.orig_val64 = ctxt->src.val64;
4868 if (ctxt->src2.type == OP_MEM) {
4869 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4870 &ctxt->src2.val, ctxt->src2.bytes);
4871 if (rc != X86EMUL_CONTINUE)
4875 if ((ctxt->d & DstMask) == ImplicitOps)
4879 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4880 /* optimisation - avoid slow emulated read if Mov */
4881 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4882 &ctxt->dst.val, ctxt->dst.bytes);
4883 if (rc != X86EMUL_CONTINUE)
4886 ctxt->dst.orig_val = ctxt->dst.val;
4890 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4891 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4892 X86_ICPT_POST_MEMACCESS);
4893 if (rc != X86EMUL_CONTINUE)
4897 if (ctxt->rep_prefix && (ctxt->d & String))
4898 ctxt->eflags |= EFLG_RF;
4900 ctxt->eflags &= ~EFLG_RF;
4902 if (ctxt->execute) {
4903 if (ctxt->d & Fastop) {
4904 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4905 rc = fastop(ctxt, fop);
4906 if (rc != X86EMUL_CONTINUE)
4910 rc = ctxt->execute(ctxt);
4911 if (rc != X86EMUL_CONTINUE)
4916 if (ctxt->opcode_len == 2)
4918 else if (ctxt->opcode_len == 3)
4919 goto threebyte_insn;
4922 case 0x63: /* movsxd */
4923 if (ctxt->mode != X86EMUL_MODE_PROT64)
4924 goto cannot_emulate;
4925 ctxt->dst.val = (s32) ctxt->src.val;
4927 case 0x70 ... 0x7f: /* jcc (short) */
4928 if (test_cc(ctxt->b, ctxt->eflags))
4929 rc = jmp_rel(ctxt, ctxt->src.val);
4931 case 0x8d: /* lea r16/r32, m */
4932 ctxt->dst.val = ctxt->src.addr.mem.ea;
4934 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4935 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4936 ctxt->dst.type = OP_NONE;
4940 case 0x98: /* cbw/cwde/cdqe */
4941 switch (ctxt->op_bytes) {
4942 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4943 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4944 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4947 case 0xcc: /* int3 */
4948 rc = emulate_int(ctxt, 3);
4950 case 0xcd: /* int n */
4951 rc = emulate_int(ctxt, ctxt->src.val);
4953 case 0xce: /* into */
4954 if (ctxt->eflags & EFLG_OF)
4955 rc = emulate_int(ctxt, 4);
4957 case 0xe9: /* jmp rel */
4958 case 0xeb: /* jmp rel short */
4959 rc = jmp_rel(ctxt, ctxt->src.val);
4960 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4962 case 0xf4: /* hlt */
4963 ctxt->ops->halt(ctxt);
4965 case 0xf5: /* cmc */
4966 /* complement carry flag from eflags reg */
4967 ctxt->eflags ^= EFLG_CF;
4969 case 0xf8: /* clc */
4970 ctxt->eflags &= ~EFLG_CF;
4972 case 0xf9: /* stc */
4973 ctxt->eflags |= EFLG_CF;
4975 case 0xfc: /* cld */
4976 ctxt->eflags &= ~EFLG_DF;
4978 case 0xfd: /* std */
4979 ctxt->eflags |= EFLG_DF;
4982 goto cannot_emulate;
4985 if (rc != X86EMUL_CONTINUE)
4989 if (ctxt->d & SrcWrite) {
4990 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4991 rc = writeback(ctxt, &ctxt->src);
4992 if (rc != X86EMUL_CONTINUE)
4995 if (!(ctxt->d & NoWrite)) {
4996 rc = writeback(ctxt, &ctxt->dst);
4997 if (rc != X86EMUL_CONTINUE)
5002 * restore dst type in case the decoding will be reused
5003 * (happens for string instruction )
5005 ctxt->dst.type = saved_dst_type;
5007 if ((ctxt->d & SrcMask) == SrcSI)
5008 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5010 if ((ctxt->d & DstMask) == DstDI)
5011 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5013 if (ctxt->rep_prefix && (ctxt->d & String)) {
5015 struct read_cache *r = &ctxt->io_read;
5016 if ((ctxt->d & SrcMask) == SrcSI)
5017 count = ctxt->src.count;
5019 count = ctxt->dst.count;
5020 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5022 if (!string_insn_completed(ctxt)) {
5024 * Re-enter guest when pio read ahead buffer is empty
5025 * or, if it is not used, after each 1024 iteration.
5027 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5028 (r->end == 0 || r->end != r->pos)) {
5030 * Reset read cache. Usually happens before
5031 * decode, but since instruction is restarted
5032 * we have to do it here.
5034 ctxt->mem_read.end = 0;
5035 writeback_registers(ctxt);
5036 return EMULATION_RESTART;
5038 goto done; /* skip rip writeback */
5040 ctxt->eflags &= ~EFLG_RF;
5043 ctxt->eip = ctxt->_eip;
5046 if (rc == X86EMUL_PROPAGATE_FAULT) {
5047 WARN_ON(ctxt->exception.vector > 0x1f);
5048 ctxt->have_exception = true;
5050 if (rc == X86EMUL_INTERCEPTED)
5051 return EMULATION_INTERCEPTED;
5053 if (rc == X86EMUL_CONTINUE)
5054 writeback_registers(ctxt);
5056 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5060 case 0x09: /* wbinvd */
5061 (ctxt->ops->wbinvd)(ctxt);
5063 case 0x08: /* invd */
5064 case 0x0d: /* GrpP (prefetch) */
5065 case 0x18: /* Grp16 (prefetch/nop) */
5066 case 0x1f: /* nop */
5068 case 0x20: /* mov cr, reg */
5069 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5071 case 0x21: /* mov from dr to reg */
5072 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5074 case 0x40 ... 0x4f: /* cmov */
5075 if (test_cc(ctxt->b, ctxt->eflags))
5076 ctxt->dst.val = ctxt->src.val;
5077 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5078 ctxt->op_bytes != 4)
5079 ctxt->dst.type = OP_NONE; /* no writeback */
5081 case 0x80 ... 0x8f: /* jnz rel, etc*/
5082 if (test_cc(ctxt->b, ctxt->eflags))
5083 rc = jmp_rel(ctxt, ctxt->src.val);
5085 case 0x90 ... 0x9f: /* setcc r/m8 */
5086 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5088 case 0xb6 ... 0xb7: /* movzx */
5089 ctxt->dst.bytes = ctxt->op_bytes;
5090 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5091 : (u16) ctxt->src.val;
5093 case 0xbe ... 0xbf: /* movsx */
5094 ctxt->dst.bytes = ctxt->op_bytes;
5095 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5096 (s16) ctxt->src.val;
5099 goto cannot_emulate;
5104 if (rc != X86EMUL_CONTINUE)
5110 return EMULATION_FAILED;
5113 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5115 invalidate_registers(ctxt);
5118 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5120 writeback_registers(ctxt);