1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
170 #define No16 ((u64)1 << 53) /* No 16 bit operand */
172 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
174 #define X2(x...) x, x
175 #define X3(x...) X2(x), x
176 #define X4(x...) X2(x), X2(x)
177 #define X5(x...) X4(x), x
178 #define X6(x...) X4(x), X2(x)
179 #define X7(x...) X4(x), X3(x)
180 #define X8(x...) X4(x), X4(x)
181 #define X16(x...) X8(x), X8(x)
183 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
184 #define FASTOP_SIZE 8
187 * fastop functions have a special calling convention:
192 * flags: rflags (in/out)
193 * ex: rsi (in:fastop pointer, out:zero if exception)
195 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
196 * different operand sizes can be reached by calculation, rather than a jump
197 * table (which would be bigger than the code).
199 * fastop functions are declared as taking a never-defined fastop parameter,
200 * so they can't be called from C directly.
209 int (*execute)(struct x86_emulate_ctxt *ctxt);
210 const struct opcode *group;
211 const struct group_dual *gdual;
212 const struct gprefix *gprefix;
213 const struct escape *esc;
214 void (*fastop)(struct fastop *fake);
216 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
220 struct opcode mod012[8];
221 struct opcode mod3[8];
225 struct opcode pfx_no;
226 struct opcode pfx_66;
227 struct opcode pfx_f2;
228 struct opcode pfx_f3;
233 struct opcode high[64];
236 /* EFLAGS bit definitions. */
237 #define EFLG_ID (1<<21)
238 #define EFLG_VIP (1<<20)
239 #define EFLG_VIF (1<<19)
240 #define EFLG_AC (1<<18)
241 #define EFLG_VM (1<<17)
242 #define EFLG_RF (1<<16)
243 #define EFLG_IOPL (3<<12)
244 #define EFLG_NT (1<<14)
245 #define EFLG_OF (1<<11)
246 #define EFLG_DF (1<<10)
247 #define EFLG_IF (1<<9)
248 #define EFLG_TF (1<<8)
249 #define EFLG_SF (1<<7)
250 #define EFLG_ZF (1<<6)
251 #define EFLG_AF (1<<4)
252 #define EFLG_PF (1<<2)
253 #define EFLG_CF (1<<0)
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 #define EFLG_RESERVED_ONE_MASK 2
258 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
260 if (!(ctxt->regs_valid & (1 << nr))) {
261 ctxt->regs_valid |= 1 << nr;
262 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
264 return ctxt->_regs[nr];
267 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->regs_dirty |= 1 << nr;
271 return &ctxt->_regs[nr];
274 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 return reg_write(ctxt, nr);
280 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
285 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
288 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
290 ctxt->regs_dirty = 0;
291 ctxt->regs_valid = 0;
295 * These EFLAGS bits are restored from saved value during emulation, and
296 * any changes are written back to the saved value after emulation.
298 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
306 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
308 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
309 #define FOP_RET "ret \n\t"
311 #define FOP_START(op) \
312 extern void em_##op(struct fastop *fake); \
313 asm(".pushsection .text, \"ax\" \n\t" \
314 ".global em_" #op " \n\t" \
321 #define FOPNOP() FOP_ALIGN FOP_RET
323 #define FOP1E(op, dst) \
324 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
326 #define FOP1EEX(op, dst) \
327 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
329 #define FASTOP1(op) \
334 ON64(FOP1E(op##q, rax)) \
337 /* 1-operand, using src2 (for MUL/DIV r/m) */
338 #define FASTOP1SRC2(op, name) \
343 ON64(FOP1E(op, rcx)) \
346 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
347 #define FASTOP1SRC2EX(op, name) \
352 ON64(FOP1EEX(op, rcx)) \
355 #define FOP2E(op, dst, src) \
356 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
358 #define FASTOP2(op) \
360 FOP2E(op##b, al, dl) \
361 FOP2E(op##w, ax, dx) \
362 FOP2E(op##l, eax, edx) \
363 ON64(FOP2E(op##q, rax, rdx)) \
366 /* 2 operand, word only */
367 #define FASTOP2W(op) \
370 FOP2E(op##w, ax, dx) \
371 FOP2E(op##l, eax, edx) \
372 ON64(FOP2E(op##q, rax, rdx)) \
375 /* 2 operand, src is CL */
376 #define FASTOP2CL(op) \
378 FOP2E(op##b, al, cl) \
379 FOP2E(op##w, ax, cl) \
380 FOP2E(op##l, eax, cl) \
381 ON64(FOP2E(op##q, rax, cl)) \
384 /* 2 operand, src and dest are reversed */
385 #define FASTOP2R(op, name) \
387 FOP2E(op##b, dl, al) \
388 FOP2E(op##w, dx, ax) \
389 FOP2E(op##l, edx, eax) \
390 ON64(FOP2E(op##q, rdx, rax)) \
393 #define FOP3E(op, dst, src, src2) \
394 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
396 /* 3-operand, word-only, src2=cl */
397 #define FASTOP3WCL(op) \
400 FOP3E(op##w, ax, dx, cl) \
401 FOP3E(op##l, eax, edx, cl) \
402 ON64(FOP3E(op##q, rax, rdx, cl)) \
405 /* Special case for SETcc - 1 instruction per cc */
406 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
408 asm(".global kvm_fastop_exception \n"
409 "kvm_fastop_exception: xor %esi, %esi; ret");
430 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
433 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
434 enum x86_intercept intercept,
435 enum x86_intercept_stage stage)
437 struct x86_instruction_info info = {
438 .intercept = intercept,
439 .rep_prefix = ctxt->rep_prefix,
440 .modrm_mod = ctxt->modrm_mod,
441 .modrm_reg = ctxt->modrm_reg,
442 .modrm_rm = ctxt->modrm_rm,
443 .src_val = ctxt->src.val64,
444 .dst_val = ctxt->dst.val64,
445 .src_bytes = ctxt->src.bytes,
446 .dst_bytes = ctxt->dst.bytes,
447 .ad_bytes = ctxt->ad_bytes,
448 .next_rip = ctxt->eip,
451 return ctxt->ops->intercept(ctxt, &info, stage);
454 static void assign_masked(ulong *dest, ulong src, ulong mask)
456 *dest = (*dest & ~mask) | (src & mask);
459 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
461 return (1UL << (ctxt->ad_bytes << 3)) - 1;
464 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
467 struct desc_struct ss;
469 if (ctxt->mode == X86EMUL_MODE_PROT64)
471 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
472 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
475 static int stack_size(struct x86_emulate_ctxt *ctxt)
477 return (__fls(stack_mask(ctxt)) + 1) >> 3;
480 /* Access/update address held in a register, based on addressing mode. */
481 static inline unsigned long
482 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
484 if (ctxt->ad_bytes == sizeof(unsigned long))
487 return reg & ad_mask(ctxt);
490 static inline unsigned long
491 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
493 return address_mask(ctxt, reg);
496 static void masked_increment(ulong *reg, ulong mask, int inc)
498 assign_masked(reg, *reg + inc, mask);
502 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
506 if (ctxt->ad_bytes == sizeof(unsigned long))
509 mask = ad_mask(ctxt);
510 masked_increment(reg, mask, inc);
513 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
515 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
518 static u32 desc_limit_scaled(struct desc_struct *desc)
520 u32 limit = get_desc_limit(desc);
522 return desc->g ? (limit << 12) | 0xfff : limit;
525 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
527 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
530 return ctxt->ops->get_cached_segment_base(ctxt, seg);
533 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
534 u32 error, bool valid)
537 ctxt->exception.vector = vec;
538 ctxt->exception.error_code = error;
539 ctxt->exception.error_code_valid = valid;
540 return X86EMUL_PROPAGATE_FAULT;
543 static int emulate_db(struct x86_emulate_ctxt *ctxt)
545 return emulate_exception(ctxt, DB_VECTOR, 0, false);
548 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
550 return emulate_exception(ctxt, GP_VECTOR, err, true);
553 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, SS_VECTOR, err, true);
558 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, UD_VECTOR, 0, false);
563 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
565 return emulate_exception(ctxt, TS_VECTOR, err, true);
568 static int emulate_de(struct x86_emulate_ctxt *ctxt)
570 return emulate_exception(ctxt, DE_VECTOR, 0, false);
573 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
575 return emulate_exception(ctxt, NM_VECTOR, 0, false);
578 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
581 struct desc_struct desc;
583 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
587 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
592 struct desc_struct desc;
594 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
595 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
599 * x86 defines three classes of vector instructions: explicitly
600 * aligned, explicitly unaligned, and the rest, which change behaviour
601 * depending on whether they're AVX encoded or not.
603 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
604 * subject to the same check.
606 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
608 if (likely(size < 16))
611 if (ctxt->d & Aligned)
613 else if (ctxt->d & Unaligned)
615 else if (ctxt->d & Avx)
621 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
622 struct segmented_address addr,
623 unsigned *max_size, unsigned size,
624 bool write, bool fetch,
625 enum x86emul_mode mode, ulong *linear)
627 struct desc_struct desc;
633 la = seg_base(ctxt, addr.seg) + addr.ea;
636 case X86EMUL_MODE_PROT64:
637 if (is_noncanonical_address(la))
638 return emulate_gp(ctxt, 0);
640 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
641 if (size > *max_size)
645 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
649 /* code segment in protected mode or read-only data segment */
650 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
651 || !(desc.type & 2)) && write)
653 /* unreadable code segment */
654 if (!fetch && (desc.type & 8) && !(desc.type & 2))
656 lim = desc_limit_scaled(&desc);
657 if ((desc.type & 8) || !(desc.type & 4)) {
658 /* expand-up segment */
661 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
663 /* expand-down segment */
666 lim = desc.d ? 0xffffffff : 0xffff;
669 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
671 if (size > *max_size)
675 if (ctxt->mode != X86EMUL_MODE_PROT64)
677 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
678 return emulate_gp(ctxt, 0);
680 return X86EMUL_CONTINUE;
682 if (addr.seg == VCPU_SREG_SS)
683 return emulate_ss(ctxt, 0);
685 return emulate_gp(ctxt, 0);
688 static int linearize(struct x86_emulate_ctxt *ctxt,
689 struct segmented_address addr,
690 unsigned size, bool write,
694 return __linearize(ctxt, addr, &max_size, size, write, false,
698 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
699 enum x86emul_mode mode)
704 struct segmented_address addr = { .seg = VCPU_SREG_CS,
707 if (ctxt->op_bytes != sizeof(unsigned long))
708 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
709 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
710 if (rc == X86EMUL_CONTINUE)
711 ctxt->_eip = addr.ea;
715 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
717 return assign_eip(ctxt, dst, ctxt->mode);
720 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
721 const struct desc_struct *cs_desc)
723 enum x86emul_mode mode = ctxt->mode;
726 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
729 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
731 mode = X86EMUL_MODE_PROT64;
734 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
735 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
736 return assign_eip(ctxt, dst, mode);
739 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
741 return assign_eip_near(ctxt, ctxt->_eip + rel);
744 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
752 rc = linearize(ctxt, addr, size, false, &linear);
753 if (rc != X86EMUL_CONTINUE)
755 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
759 * Prefetch the remaining bytes of the instruction without crossing page
760 * boundary if they are not in fetch_cache yet.
762 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
765 unsigned size, max_size;
766 unsigned long linear;
767 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = ctxt->eip + cur_size };
772 * We do not know exactly how many bytes will be needed, and
773 * __linearize is expensive, so fetch as much as possible. We
774 * just have to avoid going beyond the 15 byte limit, the end
775 * of the segment, or the end of the page.
777 * __linearize is called with size 0 so that it does not do any
778 * boundary check itself. Instead, we use max_size to check
781 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
783 if (unlikely(rc != X86EMUL_CONTINUE))
786 size = min_t(unsigned, 15UL ^ cur_size, max_size);
787 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
790 * One instruction can only straddle two pages,
791 * and one has been loaded at the beginning of
792 * x86_decode_insn. So, if not enough bytes
793 * still, we must have hit the 15-byte boundary.
795 if (unlikely(size < op_size))
796 return emulate_gp(ctxt, 0);
798 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
799 size, &ctxt->exception);
800 if (unlikely(rc != X86EMUL_CONTINUE))
802 ctxt->fetch.end += size;
803 return X86EMUL_CONTINUE;
806 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
809 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
811 if (unlikely(done_size < size))
812 return __do_insn_fetch_bytes(ctxt, size - done_size);
814 return X86EMUL_CONTINUE;
817 /* Fetch next part of the instruction being emulated. */
818 #define insn_fetch(_type, _ctxt) \
821 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
822 if (rc != X86EMUL_CONTINUE) \
824 ctxt->_eip += sizeof(_type); \
825 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
826 ctxt->fetch.ptr += sizeof(_type); \
830 #define insn_fetch_arr(_arr, _size, _ctxt) \
832 rc = do_insn_fetch_bytes(_ctxt, _size); \
833 if (rc != X86EMUL_CONTINUE) \
835 ctxt->_eip += (_size); \
836 memcpy(_arr, ctxt->fetch.ptr, _size); \
837 ctxt->fetch.ptr += (_size); \
841 * Given the 'reg' portion of a ModRM byte, and a register block, return a
842 * pointer into the block that addresses the relevant register.
843 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
845 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
849 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
851 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
852 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
854 p = reg_rmw(ctxt, modrm_reg);
858 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
859 struct segmented_address addr,
860 u16 *size, unsigned long *address, int op_bytes)
867 rc = segmented_read_std(ctxt, addr, size, 2);
868 if (rc != X86EMUL_CONTINUE)
871 rc = segmented_read_std(ctxt, addr, address, op_bytes);
885 FASTOP1SRC2(mul, mul_ex);
886 FASTOP1SRC2(imul, imul_ex);
887 FASTOP1SRC2EX(div, div_ex);
888 FASTOP1SRC2EX(idiv, idiv_ex);
917 FASTOP2R(cmp, cmp_r);
919 static u8 test_cc(unsigned int condition, unsigned long flags)
922 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
924 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
925 asm("push %[flags]; popf; call *%[fastop]"
926 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
930 static void fetch_register_operand(struct operand *op)
934 op->val = *(u8 *)op->addr.reg;
937 op->val = *(u16 *)op->addr.reg;
940 op->val = *(u32 *)op->addr.reg;
943 op->val = *(u64 *)op->addr.reg;
948 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
950 ctxt->ops->get_fpu(ctxt);
952 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
953 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
954 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
955 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
956 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
957 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
958 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
959 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
961 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
962 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
963 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
964 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
965 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
966 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
967 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
968 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
972 ctxt->ops->put_fpu(ctxt);
975 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
978 ctxt->ops->get_fpu(ctxt);
980 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
981 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
982 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
983 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
984 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
985 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
986 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
987 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
989 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
990 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
991 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
992 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
993 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
994 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
995 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
996 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1000 ctxt->ops->put_fpu(ctxt);
1003 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1005 ctxt->ops->get_fpu(ctxt);
1007 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1008 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1009 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1010 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1011 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1012 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1013 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1014 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1017 ctxt->ops->put_fpu(ctxt);
1020 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1022 ctxt->ops->get_fpu(ctxt);
1024 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1025 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1026 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1027 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1028 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1029 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1030 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1031 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1034 ctxt->ops->put_fpu(ctxt);
1037 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1039 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1040 return emulate_nm(ctxt);
1042 ctxt->ops->get_fpu(ctxt);
1043 asm volatile("fninit");
1044 ctxt->ops->put_fpu(ctxt);
1045 return X86EMUL_CONTINUE;
1048 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1052 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1053 return emulate_nm(ctxt);
1055 ctxt->ops->get_fpu(ctxt);
1056 asm volatile("fnstcw %0": "+m"(fcw));
1057 ctxt->ops->put_fpu(ctxt);
1059 /* force 2 byte destination */
1060 ctxt->dst.bytes = 2;
1061 ctxt->dst.val = fcw;
1063 return X86EMUL_CONTINUE;
1066 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1070 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1071 return emulate_nm(ctxt);
1073 ctxt->ops->get_fpu(ctxt);
1074 asm volatile("fnstsw %0": "+m"(fsw));
1075 ctxt->ops->put_fpu(ctxt);
1077 /* force 2 byte destination */
1078 ctxt->dst.bytes = 2;
1079 ctxt->dst.val = fsw;
1081 return X86EMUL_CONTINUE;
1084 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1087 unsigned reg = ctxt->modrm_reg;
1089 if (!(ctxt->d & ModRM))
1090 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1092 if (ctxt->d & Sse) {
1096 read_sse_reg(ctxt, &op->vec_val, reg);
1099 if (ctxt->d & Mmx) {
1108 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1109 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1111 fetch_register_operand(op);
1112 op->orig_val = op->val;
1115 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1117 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1118 ctxt->modrm_seg = VCPU_SREG_SS;
1121 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1125 int index_reg, base_reg, scale;
1126 int rc = X86EMUL_CONTINUE;
1129 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1130 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1131 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1133 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1134 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1135 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1136 ctxt->modrm_seg = VCPU_SREG_DS;
1138 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1140 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1141 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1143 if (ctxt->d & Sse) {
1146 op->addr.xmm = ctxt->modrm_rm;
1147 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1150 if (ctxt->d & Mmx) {
1153 op->addr.mm = ctxt->modrm_rm & 7;
1156 fetch_register_operand(op);
1162 if (ctxt->ad_bytes == 2) {
1163 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1164 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1165 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1166 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1168 /* 16-bit ModR/M decode. */
1169 switch (ctxt->modrm_mod) {
1171 if (ctxt->modrm_rm == 6)
1172 modrm_ea += insn_fetch(u16, ctxt);
1175 modrm_ea += insn_fetch(s8, ctxt);
1178 modrm_ea += insn_fetch(u16, ctxt);
1181 switch (ctxt->modrm_rm) {
1183 modrm_ea += bx + si;
1186 modrm_ea += bx + di;
1189 modrm_ea += bp + si;
1192 modrm_ea += bp + di;
1201 if (ctxt->modrm_mod != 0)
1208 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1209 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1210 ctxt->modrm_seg = VCPU_SREG_SS;
1211 modrm_ea = (u16)modrm_ea;
1213 /* 32/64-bit ModR/M decode. */
1214 if ((ctxt->modrm_rm & 7) == 4) {
1215 sib = insn_fetch(u8, ctxt);
1216 index_reg |= (sib >> 3) & 7;
1217 base_reg |= sib & 7;
1220 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1221 modrm_ea += insn_fetch(s32, ctxt);
1223 modrm_ea += reg_read(ctxt, base_reg);
1224 adjust_modrm_seg(ctxt, base_reg);
1227 modrm_ea += reg_read(ctxt, index_reg) << scale;
1228 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1229 modrm_ea += insn_fetch(s32, ctxt);
1230 if (ctxt->mode == X86EMUL_MODE_PROT64)
1231 ctxt->rip_relative = 1;
1233 base_reg = ctxt->modrm_rm;
1234 modrm_ea += reg_read(ctxt, base_reg);
1235 adjust_modrm_seg(ctxt, base_reg);
1237 switch (ctxt->modrm_mod) {
1239 modrm_ea += insn_fetch(s8, ctxt);
1242 modrm_ea += insn_fetch(s32, ctxt);
1246 op->addr.mem.ea = modrm_ea;
1247 if (ctxt->ad_bytes != 8)
1248 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1254 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1257 int rc = X86EMUL_CONTINUE;
1260 switch (ctxt->ad_bytes) {
1262 op->addr.mem.ea = insn_fetch(u16, ctxt);
1265 op->addr.mem.ea = insn_fetch(u32, ctxt);
1268 op->addr.mem.ea = insn_fetch(u64, ctxt);
1275 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1279 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1280 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1282 if (ctxt->src.bytes == 2)
1283 sv = (s16)ctxt->src.val & (s16)mask;
1284 else if (ctxt->src.bytes == 4)
1285 sv = (s32)ctxt->src.val & (s32)mask;
1287 sv = (s64)ctxt->src.val & (s64)mask;
1289 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1290 ctxt->dst.addr.mem.ea + (sv >> 3));
1293 /* only subword offset */
1294 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1297 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1298 unsigned long addr, void *dest, unsigned size)
1301 struct read_cache *mc = &ctxt->mem_read;
1303 if (mc->pos < mc->end)
1306 WARN_ON((mc->end + size) >= sizeof(mc->data));
1308 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1310 if (rc != X86EMUL_CONTINUE)
1316 memcpy(dest, mc->data + mc->pos, size);
1318 return X86EMUL_CONTINUE;
1321 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1322 struct segmented_address addr,
1329 rc = linearize(ctxt, addr, size, false, &linear);
1330 if (rc != X86EMUL_CONTINUE)
1332 return read_emulated(ctxt, linear, data, size);
1335 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1336 struct segmented_address addr,
1343 rc = linearize(ctxt, addr, size, true, &linear);
1344 if (rc != X86EMUL_CONTINUE)
1346 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1350 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1351 struct segmented_address addr,
1352 const void *orig_data, const void *data,
1358 rc = linearize(ctxt, addr, size, true, &linear);
1359 if (rc != X86EMUL_CONTINUE)
1361 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1362 size, &ctxt->exception);
1365 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1366 unsigned int size, unsigned short port,
1369 struct read_cache *rc = &ctxt->io_read;
1371 if (rc->pos == rc->end) { /* refill pio read ahead */
1372 unsigned int in_page, n;
1373 unsigned int count = ctxt->rep_prefix ?
1374 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1375 in_page = (ctxt->eflags & EFLG_DF) ?
1376 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1377 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1378 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1381 rc->pos = rc->end = 0;
1382 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1387 if (ctxt->rep_prefix && (ctxt->d & String) &&
1388 !(ctxt->eflags & EFLG_DF)) {
1389 ctxt->dst.data = rc->data + rc->pos;
1390 ctxt->dst.type = OP_MEM_STR;
1391 ctxt->dst.count = (rc->end - rc->pos) / size;
1394 memcpy(dest, rc->data + rc->pos, size);
1400 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1401 u16 index, struct desc_struct *desc)
1406 ctxt->ops->get_idt(ctxt, &dt);
1408 if (dt.size < index * 8 + 7)
1409 return emulate_gp(ctxt, index << 3 | 0x2);
1411 addr = dt.address + index * 8;
1412 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1416 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1417 u16 selector, struct desc_ptr *dt)
1419 const struct x86_emulate_ops *ops = ctxt->ops;
1422 if (selector & 1 << 2) {
1423 struct desc_struct desc;
1426 memset (dt, 0, sizeof *dt);
1427 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1431 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1432 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1434 ops->get_gdt(ctxt, dt);
1437 /* allowed just for 8 bytes segments */
1438 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1439 u16 selector, struct desc_struct *desc,
1443 u16 index = selector >> 3;
1446 get_descriptor_table_ptr(ctxt, selector, &dt);
1448 if (dt.size < index * 8 + 7)
1449 return emulate_gp(ctxt, selector & 0xfffc);
1451 *desc_addr_p = addr = dt.address + index * 8;
1452 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1456 /* allowed just for 8 bytes segments */
1457 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1458 u16 selector, struct desc_struct *desc)
1461 u16 index = selector >> 3;
1464 get_descriptor_table_ptr(ctxt, selector, &dt);
1466 if (dt.size < index * 8 + 7)
1467 return emulate_gp(ctxt, selector & 0xfffc);
1469 addr = dt.address + index * 8;
1470 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1474 /* Does not support long mode */
1475 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1476 u16 selector, int seg, u8 cpl,
1477 bool in_task_switch,
1478 struct desc_struct *desc)
1480 struct desc_struct seg_desc, old_desc;
1482 unsigned err_vec = GP_VECTOR;
1484 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1490 memset(&seg_desc, 0, sizeof seg_desc);
1492 if (ctxt->mode == X86EMUL_MODE_REAL) {
1493 /* set real mode segment descriptor (keep limit etc. for
1495 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1496 set_desc_base(&seg_desc, selector << 4);
1498 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1499 /* VM86 needs a clean new segment descriptor */
1500 set_desc_base(&seg_desc, selector << 4);
1501 set_desc_limit(&seg_desc, 0xffff);
1511 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1512 if ((seg == VCPU_SREG_CS
1513 || (seg == VCPU_SREG_SS
1514 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1515 || seg == VCPU_SREG_TR)
1519 /* TR should be in GDT only */
1520 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1523 if (null_selector) /* for NULL selector skip all following checks */
1526 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1527 if (ret != X86EMUL_CONTINUE)
1530 err_code = selector & 0xfffc;
1531 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1533 /* can't load system descriptor into segment selector */
1534 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1538 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1547 * segment is not a writable data segment or segment
1548 * selector's RPL != CPL or segment selector's RPL != CPL
1550 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1554 if (!(seg_desc.type & 8))
1557 if (seg_desc.type & 4) {
1563 if (rpl > cpl || dpl != cpl)
1566 /* in long-mode d/b must be clear if l is set */
1567 if (seg_desc.d && seg_desc.l) {
1570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1571 if (efer & EFER_LMA)
1575 /* CS(RPL) <- CPL */
1576 selector = (selector & 0xfffc) | cpl;
1579 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1581 old_desc = seg_desc;
1582 seg_desc.type |= 2; /* busy */
1583 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1584 sizeof(seg_desc), &ctxt->exception);
1585 if (ret != X86EMUL_CONTINUE)
1588 case VCPU_SREG_LDTR:
1589 if (seg_desc.s || seg_desc.type != 2)
1592 default: /* DS, ES, FS, or GS */
1594 * segment is not a data or readable code segment or
1595 * ((segment is a data or nonconforming code segment)
1596 * and (both RPL and CPL > DPL))
1598 if ((seg_desc.type & 0xa) == 0x8 ||
1599 (((seg_desc.type & 0xc) != 0xc) &&
1600 (rpl > dpl && cpl > dpl)))
1606 /* mark segment as accessed */
1608 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1609 if (ret != X86EMUL_CONTINUE)
1611 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1612 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1613 sizeof(base3), &ctxt->exception);
1614 if (ret != X86EMUL_CONTINUE)
1616 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1617 ((u64)base3 << 32)))
1618 return emulate_gp(ctxt, 0);
1621 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1624 return X86EMUL_CONTINUE;
1626 return emulate_exception(ctxt, err_vec, err_code, true);
1629 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1630 u16 selector, int seg)
1632 u8 cpl = ctxt->ops->cpl(ctxt);
1633 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1636 static void write_register_operand(struct operand *op)
1638 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1639 switch (op->bytes) {
1641 *(u8 *)op->addr.reg = (u8)op->val;
1644 *(u16 *)op->addr.reg = (u16)op->val;
1647 *op->addr.reg = (u32)op->val;
1648 break; /* 64b: zero-extend */
1650 *op->addr.reg = op->val;
1655 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1659 write_register_operand(op);
1662 if (ctxt->lock_prefix)
1663 return segmented_cmpxchg(ctxt,
1669 return segmented_write(ctxt,
1675 return segmented_write(ctxt,
1678 op->bytes * op->count);
1681 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1684 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1692 return X86EMUL_CONTINUE;
1695 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1697 struct segmented_address addr;
1699 rsp_increment(ctxt, -bytes);
1700 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1701 addr.seg = VCPU_SREG_SS;
1703 return segmented_write(ctxt, addr, data, bytes);
1706 static int em_push(struct x86_emulate_ctxt *ctxt)
1708 /* Disable writeback. */
1709 ctxt->dst.type = OP_NONE;
1710 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1713 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1714 void *dest, int len)
1717 struct segmented_address addr;
1719 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1720 addr.seg = VCPU_SREG_SS;
1721 rc = segmented_read(ctxt, addr, dest, len);
1722 if (rc != X86EMUL_CONTINUE)
1725 rsp_increment(ctxt, len);
1729 static int em_pop(struct x86_emulate_ctxt *ctxt)
1731 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1734 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1735 void *dest, int len)
1738 unsigned long val, change_mask;
1739 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1740 int cpl = ctxt->ops->cpl(ctxt);
1742 rc = emulate_pop(ctxt, &val, len);
1743 if (rc != X86EMUL_CONTINUE)
1746 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1747 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1749 switch(ctxt->mode) {
1750 case X86EMUL_MODE_PROT64:
1751 case X86EMUL_MODE_PROT32:
1752 case X86EMUL_MODE_PROT16:
1754 change_mask |= EFLG_IOPL;
1756 change_mask |= EFLG_IF;
1758 case X86EMUL_MODE_VM86:
1760 return emulate_gp(ctxt, 0);
1761 change_mask |= EFLG_IF;
1763 default: /* real mode */
1764 change_mask |= (EFLG_IOPL | EFLG_IF);
1768 *(unsigned long *)dest =
1769 (ctxt->eflags & ~change_mask) | (val & change_mask);
1774 static int em_popf(struct x86_emulate_ctxt *ctxt)
1776 ctxt->dst.type = OP_REG;
1777 ctxt->dst.addr.reg = &ctxt->eflags;
1778 ctxt->dst.bytes = ctxt->op_bytes;
1779 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1782 static int em_enter(struct x86_emulate_ctxt *ctxt)
1785 unsigned frame_size = ctxt->src.val;
1786 unsigned nesting_level = ctxt->src2.val & 31;
1790 return X86EMUL_UNHANDLEABLE;
1792 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1793 rc = push(ctxt, &rbp, stack_size(ctxt));
1794 if (rc != X86EMUL_CONTINUE)
1796 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1798 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1799 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1801 return X86EMUL_CONTINUE;
1804 static int em_leave(struct x86_emulate_ctxt *ctxt)
1806 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1808 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1811 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1813 int seg = ctxt->src2.val;
1815 ctxt->src.val = get_segment_selector(ctxt, seg);
1816 if (ctxt->op_bytes == 4) {
1817 rsp_increment(ctxt, -2);
1821 return em_push(ctxt);
1824 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1826 int seg = ctxt->src2.val;
1827 unsigned long selector;
1830 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1831 if (rc != X86EMUL_CONTINUE)
1834 if (ctxt->modrm_reg == VCPU_SREG_SS)
1835 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1837 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1841 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1843 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1844 int rc = X86EMUL_CONTINUE;
1845 int reg = VCPU_REGS_RAX;
1847 while (reg <= VCPU_REGS_RDI) {
1848 (reg == VCPU_REGS_RSP) ?
1849 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1852 if (rc != X86EMUL_CONTINUE)
1861 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1863 ctxt->src.val = (unsigned long)ctxt->eflags;
1864 return em_push(ctxt);
1867 static int em_popa(struct x86_emulate_ctxt *ctxt)
1869 int rc = X86EMUL_CONTINUE;
1870 int reg = VCPU_REGS_RDI;
1872 while (reg >= VCPU_REGS_RAX) {
1873 if (reg == VCPU_REGS_RSP) {
1874 rsp_increment(ctxt, ctxt->op_bytes);
1878 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1879 if (rc != X86EMUL_CONTINUE)
1886 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1888 const struct x86_emulate_ops *ops = ctxt->ops;
1895 /* TODO: Add limit checks */
1896 ctxt->src.val = ctxt->eflags;
1898 if (rc != X86EMUL_CONTINUE)
1901 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1903 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1905 if (rc != X86EMUL_CONTINUE)
1908 ctxt->src.val = ctxt->_eip;
1910 if (rc != X86EMUL_CONTINUE)
1913 ops->get_idt(ctxt, &dt);
1915 eip_addr = dt.address + (irq << 2);
1916 cs_addr = dt.address + (irq << 2) + 2;
1918 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1919 if (rc != X86EMUL_CONTINUE)
1922 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1923 if (rc != X86EMUL_CONTINUE)
1926 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1927 if (rc != X86EMUL_CONTINUE)
1935 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1939 invalidate_registers(ctxt);
1940 rc = __emulate_int_real(ctxt, irq);
1941 if (rc == X86EMUL_CONTINUE)
1942 writeback_registers(ctxt);
1946 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1948 switch(ctxt->mode) {
1949 case X86EMUL_MODE_REAL:
1950 return __emulate_int_real(ctxt, irq);
1951 case X86EMUL_MODE_VM86:
1952 case X86EMUL_MODE_PROT16:
1953 case X86EMUL_MODE_PROT32:
1954 case X86EMUL_MODE_PROT64:
1956 /* Protected mode interrupts unimplemented yet */
1957 return X86EMUL_UNHANDLEABLE;
1961 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1963 int rc = X86EMUL_CONTINUE;
1964 unsigned long temp_eip = 0;
1965 unsigned long temp_eflags = 0;
1966 unsigned long cs = 0;
1967 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1968 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1969 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1970 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1972 /* TODO: Add stack limit check */
1974 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1976 if (rc != X86EMUL_CONTINUE)
1979 if (temp_eip & ~0xffff)
1980 return emulate_gp(ctxt, 0);
1982 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1984 if (rc != X86EMUL_CONTINUE)
1987 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1989 if (rc != X86EMUL_CONTINUE)
1992 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1994 if (rc != X86EMUL_CONTINUE)
1997 ctxt->_eip = temp_eip;
2000 if (ctxt->op_bytes == 4)
2001 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2002 else if (ctxt->op_bytes == 2) {
2003 ctxt->eflags &= ~0xffff;
2004 ctxt->eflags |= temp_eflags;
2007 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2008 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2013 static int em_iret(struct x86_emulate_ctxt *ctxt)
2015 switch(ctxt->mode) {
2016 case X86EMUL_MODE_REAL:
2017 return emulate_iret_real(ctxt);
2018 case X86EMUL_MODE_VM86:
2019 case X86EMUL_MODE_PROT16:
2020 case X86EMUL_MODE_PROT32:
2021 case X86EMUL_MODE_PROT64:
2023 /* iret from protected mode unimplemented yet */
2024 return X86EMUL_UNHANDLEABLE;
2028 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2031 unsigned short sel, old_sel;
2032 struct desc_struct old_desc, new_desc;
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 u8 cpl = ctxt->ops->cpl(ctxt);
2036 /* Assignment of RIP may only fail in 64-bit mode */
2037 if (ctxt->mode == X86EMUL_MODE_PROT64)
2038 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2041 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2043 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2045 if (rc != X86EMUL_CONTINUE)
2048 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2049 if (rc != X86EMUL_CONTINUE) {
2050 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2051 /* assigning eip failed; restore the old cs */
2052 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2058 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2060 return assign_eip_near(ctxt, ctxt->src.val);
2063 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2068 old_eip = ctxt->_eip;
2069 rc = assign_eip_near(ctxt, ctxt->src.val);
2070 if (rc != X86EMUL_CONTINUE)
2072 ctxt->src.val = old_eip;
2077 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2079 u64 old = ctxt->dst.orig_val64;
2081 if (ctxt->dst.bytes == 16)
2082 return X86EMUL_UNHANDLEABLE;
2084 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2085 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2086 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2087 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2088 ctxt->eflags &= ~EFLG_ZF;
2090 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2091 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2093 ctxt->eflags |= EFLG_ZF;
2095 return X86EMUL_CONTINUE;
2098 static int em_ret(struct x86_emulate_ctxt *ctxt)
2103 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2104 if (rc != X86EMUL_CONTINUE)
2107 return assign_eip_near(ctxt, eip);
2110 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2113 unsigned long eip, cs;
2115 int cpl = ctxt->ops->cpl(ctxt);
2116 struct desc_struct old_desc, new_desc;
2117 const struct x86_emulate_ops *ops = ctxt->ops;
2119 if (ctxt->mode == X86EMUL_MODE_PROT64)
2120 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2123 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2124 if (rc != X86EMUL_CONTINUE)
2126 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2127 if (rc != X86EMUL_CONTINUE)
2129 /* Outer-privilege level return is not implemented */
2130 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2131 return X86EMUL_UNHANDLEABLE;
2132 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2134 if (rc != X86EMUL_CONTINUE)
2136 rc = assign_eip_far(ctxt, eip, &new_desc);
2137 if (rc != X86EMUL_CONTINUE) {
2138 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2139 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2144 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2148 rc = em_ret_far(ctxt);
2149 if (rc != X86EMUL_CONTINUE)
2151 rsp_increment(ctxt, ctxt->src.val);
2152 return X86EMUL_CONTINUE;
2155 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2157 /* Save real source value, then compare EAX against destination. */
2158 ctxt->dst.orig_val = ctxt->dst.val;
2159 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2160 ctxt->src.orig_val = ctxt->src.val;
2161 ctxt->src.val = ctxt->dst.orig_val;
2162 fastop(ctxt, em_cmp);
2164 if (ctxt->eflags & EFLG_ZF) {
2165 /* Success: write back to memory. */
2166 ctxt->dst.val = ctxt->src.orig_val;
2168 /* Failure: write the value we saw to EAX. */
2169 ctxt->dst.type = OP_REG;
2170 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2171 ctxt->dst.val = ctxt->dst.orig_val;
2173 return X86EMUL_CONTINUE;
2176 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2178 int seg = ctxt->src2.val;
2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2184 rc = load_segment_descriptor(ctxt, sel, seg);
2185 if (rc != X86EMUL_CONTINUE)
2188 ctxt->dst.val = ctxt->src.val;
2193 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2194 struct desc_struct *cs, struct desc_struct *ss)
2196 cs->l = 0; /* will be adjusted later */
2197 set_desc_base(cs, 0); /* flat segment */
2198 cs->g = 1; /* 4kb granularity */
2199 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2200 cs->type = 0x0b; /* Read, Execute, Accessed */
2202 cs->dpl = 0; /* will be adjusted later */
2207 set_desc_base(ss, 0); /* flat segment */
2208 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2209 ss->g = 1; /* 4kb granularity */
2211 ss->type = 0x03; /* Read/Write, Accessed */
2212 ss->d = 1; /* 32bit stack segment */
2219 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2221 u32 eax, ebx, ecx, edx;
2224 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2225 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2226 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2227 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2230 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2232 const struct x86_emulate_ops *ops = ctxt->ops;
2233 u32 eax, ebx, ecx, edx;
2236 * syscall should always be enabled in longmode - so only become
2237 * vendor specific (cpuid) if other modes are active...
2239 if (ctxt->mode == X86EMUL_MODE_PROT64)
2244 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2246 * Intel ("GenuineIntel")
2247 * remark: Intel CPUs only support "syscall" in 64bit
2248 * longmode. Also an 64bit guest with a
2249 * 32bit compat-app running will #UD !! While this
2250 * behaviour can be fixed (by emulating) into AMD
2251 * response - CPUs of AMD can't behave like Intel.
2253 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2254 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2255 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2258 /* AMD ("AuthenticAMD") */
2259 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2260 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2261 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2264 /* AMD ("AMDisbetter!") */
2265 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2266 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2267 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2270 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2274 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2276 const struct x86_emulate_ops *ops = ctxt->ops;
2277 struct desc_struct cs, ss;
2282 /* syscall is not available in real mode */
2283 if (ctxt->mode == X86EMUL_MODE_REAL ||
2284 ctxt->mode == X86EMUL_MODE_VM86)
2285 return emulate_ud(ctxt);
2287 if (!(em_syscall_is_enabled(ctxt)))
2288 return emulate_ud(ctxt);
2290 ops->get_msr(ctxt, MSR_EFER, &efer);
2291 setup_syscalls_segments(ctxt, &cs, &ss);
2293 if (!(efer & EFER_SCE))
2294 return emulate_ud(ctxt);
2296 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2298 cs_sel = (u16)(msr_data & 0xfffc);
2299 ss_sel = (u16)(msr_data + 8);
2301 if (efer & EFER_LMA) {
2305 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2306 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2308 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2309 if (efer & EFER_LMA) {
2310 #ifdef CONFIG_X86_64
2311 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2314 ctxt->mode == X86EMUL_MODE_PROT64 ?
2315 MSR_LSTAR : MSR_CSTAR, &msr_data);
2316 ctxt->_eip = msr_data;
2318 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2319 ctxt->eflags &= ~msr_data;
2320 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2324 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2325 ctxt->_eip = (u32)msr_data;
2327 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2330 return X86EMUL_CONTINUE;
2333 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2335 const struct x86_emulate_ops *ops = ctxt->ops;
2336 struct desc_struct cs, ss;
2341 ops->get_msr(ctxt, MSR_EFER, &efer);
2342 /* inject #GP if in real mode */
2343 if (ctxt->mode == X86EMUL_MODE_REAL)
2344 return emulate_gp(ctxt, 0);
2347 * Not recognized on AMD in compat mode (but is recognized in legacy
2350 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2351 && !vendor_intel(ctxt))
2352 return emulate_ud(ctxt);
2354 /* sysenter/sysexit have not been tested in 64bit mode. */
2355 if (ctxt->mode == X86EMUL_MODE_PROT64)
2356 return X86EMUL_UNHANDLEABLE;
2358 setup_syscalls_segments(ctxt, &cs, &ss);
2360 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361 switch (ctxt->mode) {
2362 case X86EMUL_MODE_PROT32:
2363 if ((msr_data & 0xfffc) == 0x0)
2364 return emulate_gp(ctxt, 0);
2366 case X86EMUL_MODE_PROT64:
2367 if (msr_data == 0x0)
2368 return emulate_gp(ctxt, 0);
2374 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2375 cs_sel = (u16)msr_data;
2376 cs_sel &= ~SELECTOR_RPL_MASK;
2377 ss_sel = cs_sel + 8;
2378 ss_sel &= ~SELECTOR_RPL_MASK;
2379 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2384 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2385 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2387 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2388 ctxt->_eip = msr_data;
2390 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2391 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2393 return X86EMUL_CONTINUE;
2396 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2398 const struct x86_emulate_ops *ops = ctxt->ops;
2399 struct desc_struct cs, ss;
2400 u64 msr_data, rcx, rdx;
2402 u16 cs_sel = 0, ss_sel = 0;
2404 /* inject #GP if in real mode or Virtual 8086 mode */
2405 if (ctxt->mode == X86EMUL_MODE_REAL ||
2406 ctxt->mode == X86EMUL_MODE_VM86)
2407 return emulate_gp(ctxt, 0);
2409 setup_syscalls_segments(ctxt, &cs, &ss);
2411 if ((ctxt->rex_prefix & 0x8) != 0x0)
2412 usermode = X86EMUL_MODE_PROT64;
2414 usermode = X86EMUL_MODE_PROT32;
2416 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2421 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2423 case X86EMUL_MODE_PROT32:
2424 cs_sel = (u16)(msr_data + 16);
2425 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0);
2427 ss_sel = (u16)(msr_data + 24);
2431 case X86EMUL_MODE_PROT64:
2432 cs_sel = (u16)(msr_data + 32);
2433 if (msr_data == 0x0)
2434 return emulate_gp(ctxt, 0);
2435 ss_sel = cs_sel + 8;
2438 if (is_noncanonical_address(rcx) ||
2439 is_noncanonical_address(rdx))
2440 return emulate_gp(ctxt, 0);
2443 cs_sel |= SELECTOR_RPL_MASK;
2444 ss_sel |= SELECTOR_RPL_MASK;
2446 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2447 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2450 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2452 return X86EMUL_CONTINUE;
2455 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2458 if (ctxt->mode == X86EMUL_MODE_REAL)
2460 if (ctxt->mode == X86EMUL_MODE_VM86)
2462 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2463 return ctxt->ops->cpl(ctxt) > iopl;
2466 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2469 const struct x86_emulate_ops *ops = ctxt->ops;
2470 struct desc_struct tr_seg;
2473 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2474 unsigned mask = (1 << len) - 1;
2477 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2480 if (desc_limit_scaled(&tr_seg) < 103)
2482 base = get_desc_base(&tr_seg);
2483 #ifdef CONFIG_X86_64
2484 base |= ((u64)base3) << 32;
2486 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2487 if (r != X86EMUL_CONTINUE)
2489 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2491 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2492 if (r != X86EMUL_CONTINUE)
2494 if ((perm >> bit_idx) & mask)
2499 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2505 if (emulator_bad_iopl(ctxt))
2506 if (!emulator_io_port_access_allowed(ctxt, port, len))
2509 ctxt->perm_ok = true;
2514 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2515 struct tss_segment_16 *tss)
2517 tss->ip = ctxt->_eip;
2518 tss->flag = ctxt->eflags;
2519 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2520 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2521 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2522 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2523 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2524 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2525 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2526 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2528 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2529 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2530 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2531 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2532 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2535 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2536 struct tss_segment_16 *tss)
2541 ctxt->_eip = tss->ip;
2542 ctxt->eflags = tss->flag | 2;
2543 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2544 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2545 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2546 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2547 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2548 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2549 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2550 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2553 * SDM says that segment selectors are loaded before segment
2556 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2557 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2558 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2559 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2560 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2565 * Now load segment descriptors. If fault happens at this stage
2566 * it is handled in a context of new task
2568 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2570 if (ret != X86EMUL_CONTINUE)
2572 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2574 if (ret != X86EMUL_CONTINUE)
2576 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2578 if (ret != X86EMUL_CONTINUE)
2580 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2582 if (ret != X86EMUL_CONTINUE)
2584 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2586 if (ret != X86EMUL_CONTINUE)
2589 return X86EMUL_CONTINUE;
2592 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2593 u16 tss_selector, u16 old_tss_sel,
2594 ulong old_tss_base, struct desc_struct *new_desc)
2596 const struct x86_emulate_ops *ops = ctxt->ops;
2597 struct tss_segment_16 tss_seg;
2599 u32 new_tss_base = get_desc_base(new_desc);
2601 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2603 if (ret != X86EMUL_CONTINUE)
2604 /* FIXME: need to provide precise fault address */
2607 save_state_to_tss16(ctxt, &tss_seg);
2609 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2611 if (ret != X86EMUL_CONTINUE)
2612 /* FIXME: need to provide precise fault address */
2615 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2617 if (ret != X86EMUL_CONTINUE)
2618 /* FIXME: need to provide precise fault address */
2621 if (old_tss_sel != 0xffff) {
2622 tss_seg.prev_task_link = old_tss_sel;
2624 ret = ops->write_std(ctxt, new_tss_base,
2625 &tss_seg.prev_task_link,
2626 sizeof tss_seg.prev_task_link,
2628 if (ret != X86EMUL_CONTINUE)
2629 /* FIXME: need to provide precise fault address */
2633 return load_state_from_tss16(ctxt, &tss_seg);
2636 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2637 struct tss_segment_32 *tss)
2639 /* CR3 and ldt selector are not saved intentionally */
2640 tss->eip = ctxt->_eip;
2641 tss->eflags = ctxt->eflags;
2642 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2643 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2644 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2645 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2646 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2647 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2648 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2649 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2651 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2652 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2653 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2654 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2655 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2656 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2659 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2660 struct tss_segment_32 *tss)
2665 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2666 return emulate_gp(ctxt, 0);
2667 ctxt->_eip = tss->eip;
2668 ctxt->eflags = tss->eflags | 2;
2670 /* General purpose registers */
2671 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2672 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2673 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2674 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2675 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2676 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2677 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2678 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2681 * SDM says that segment selectors are loaded before segment
2682 * descriptors. This is important because CPL checks will
2685 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2686 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2687 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2688 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2689 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2690 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2691 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2694 * If we're switching between Protected Mode and VM86, we need to make
2695 * sure to update the mode before loading the segment descriptors so
2696 * that the selectors are interpreted correctly.
2698 if (ctxt->eflags & X86_EFLAGS_VM) {
2699 ctxt->mode = X86EMUL_MODE_VM86;
2702 ctxt->mode = X86EMUL_MODE_PROT32;
2707 * Now load segment descriptors. If fault happenes at this stage
2708 * it is handled in a context of new task
2710 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2712 if (ret != X86EMUL_CONTINUE)
2714 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2716 if (ret != X86EMUL_CONTINUE)
2718 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2720 if (ret != X86EMUL_CONTINUE)
2722 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2724 if (ret != X86EMUL_CONTINUE)
2726 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2728 if (ret != X86EMUL_CONTINUE)
2730 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2732 if (ret != X86EMUL_CONTINUE)
2734 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2736 if (ret != X86EMUL_CONTINUE)
2739 return X86EMUL_CONTINUE;
2742 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2743 u16 tss_selector, u16 old_tss_sel,
2744 ulong old_tss_base, struct desc_struct *new_desc)
2746 const struct x86_emulate_ops *ops = ctxt->ops;
2747 struct tss_segment_32 tss_seg;
2749 u32 new_tss_base = get_desc_base(new_desc);
2750 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2751 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2753 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2755 if (ret != X86EMUL_CONTINUE)
2756 /* FIXME: need to provide precise fault address */
2759 save_state_to_tss32(ctxt, &tss_seg);
2761 /* Only GP registers and segment selectors are saved */
2762 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2763 ldt_sel_offset - eip_offset, &ctxt->exception);
2764 if (ret != X86EMUL_CONTINUE)
2765 /* FIXME: need to provide precise fault address */
2768 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2770 if (ret != X86EMUL_CONTINUE)
2771 /* FIXME: need to provide precise fault address */
2774 if (old_tss_sel != 0xffff) {
2775 tss_seg.prev_task_link = old_tss_sel;
2777 ret = ops->write_std(ctxt, new_tss_base,
2778 &tss_seg.prev_task_link,
2779 sizeof tss_seg.prev_task_link,
2781 if (ret != X86EMUL_CONTINUE)
2782 /* FIXME: need to provide precise fault address */
2786 return load_state_from_tss32(ctxt, &tss_seg);
2789 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2790 u16 tss_selector, int idt_index, int reason,
2791 bool has_error_code, u32 error_code)
2793 const struct x86_emulate_ops *ops = ctxt->ops;
2794 struct desc_struct curr_tss_desc, next_tss_desc;
2796 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2797 ulong old_tss_base =
2798 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2802 /* FIXME: old_tss_base == ~0 ? */
2804 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2805 if (ret != X86EMUL_CONTINUE)
2807 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2808 if (ret != X86EMUL_CONTINUE)
2811 /* FIXME: check that next_tss_desc is tss */
2814 * Check privileges. The three cases are task switch caused by...
2816 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2817 * 2. Exception/IRQ/iret: No check is performed
2818 * 3. jmp/call to TSS/task-gate: No check is performed since the
2819 * hardware checks it before exiting.
2821 if (reason == TASK_SWITCH_GATE) {
2822 if (idt_index != -1) {
2823 /* Software interrupts */
2824 struct desc_struct task_gate_desc;
2827 ret = read_interrupt_descriptor(ctxt, idt_index,
2829 if (ret != X86EMUL_CONTINUE)
2832 dpl = task_gate_desc.dpl;
2833 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2834 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2838 desc_limit = desc_limit_scaled(&next_tss_desc);
2839 if (!next_tss_desc.p ||
2840 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2841 desc_limit < 0x2b)) {
2842 return emulate_ts(ctxt, tss_selector & 0xfffc);
2845 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2846 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2847 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2850 if (reason == TASK_SWITCH_IRET)
2851 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2853 /* set back link to prev task only if NT bit is set in eflags
2854 note that old_tss_sel is not used after this point */
2855 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2856 old_tss_sel = 0xffff;
2858 if (next_tss_desc.type & 8)
2859 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2860 old_tss_base, &next_tss_desc);
2862 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2863 old_tss_base, &next_tss_desc);
2864 if (ret != X86EMUL_CONTINUE)
2867 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2868 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2870 if (reason != TASK_SWITCH_IRET) {
2871 next_tss_desc.type |= (1 << 1); /* set busy flag */
2872 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2875 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2876 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2878 if (has_error_code) {
2879 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2880 ctxt->lock_prefix = 0;
2881 ctxt->src.val = (unsigned long) error_code;
2882 ret = em_push(ctxt);
2888 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2889 u16 tss_selector, int idt_index, int reason,
2890 bool has_error_code, u32 error_code)
2894 invalidate_registers(ctxt);
2895 ctxt->_eip = ctxt->eip;
2896 ctxt->dst.type = OP_NONE;
2898 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2899 has_error_code, error_code);
2901 if (rc == X86EMUL_CONTINUE) {
2902 ctxt->eip = ctxt->_eip;
2903 writeback_registers(ctxt);
2906 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2909 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2912 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2914 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2915 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2918 static int em_das(struct x86_emulate_ctxt *ctxt)
2921 bool af, cf, old_cf;
2923 cf = ctxt->eflags & X86_EFLAGS_CF;
2929 af = ctxt->eflags & X86_EFLAGS_AF;
2930 if ((al & 0x0f) > 9 || af) {
2932 cf = old_cf | (al >= 250);
2937 if (old_al > 0x99 || old_cf) {
2943 /* Set PF, ZF, SF */
2944 ctxt->src.type = OP_IMM;
2946 ctxt->src.bytes = 1;
2947 fastop(ctxt, em_or);
2948 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2950 ctxt->eflags |= X86_EFLAGS_CF;
2952 ctxt->eflags |= X86_EFLAGS_AF;
2953 return X86EMUL_CONTINUE;
2956 static int em_aam(struct x86_emulate_ctxt *ctxt)
2960 if (ctxt->src.val == 0)
2961 return emulate_de(ctxt);
2963 al = ctxt->dst.val & 0xff;
2964 ah = al / ctxt->src.val;
2965 al %= ctxt->src.val;
2967 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2969 /* Set PF, ZF, SF */
2970 ctxt->src.type = OP_IMM;
2972 ctxt->src.bytes = 1;
2973 fastop(ctxt, em_or);
2975 return X86EMUL_CONTINUE;
2978 static int em_aad(struct x86_emulate_ctxt *ctxt)
2980 u8 al = ctxt->dst.val & 0xff;
2981 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2983 al = (al + (ah * ctxt->src.val)) & 0xff;
2985 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2987 /* Set PF, ZF, SF */
2988 ctxt->src.type = OP_IMM;
2990 ctxt->src.bytes = 1;
2991 fastop(ctxt, em_or);
2993 return X86EMUL_CONTINUE;
2996 static int em_call(struct x86_emulate_ctxt *ctxt)
2999 long rel = ctxt->src.val;
3001 ctxt->src.val = (unsigned long)ctxt->_eip;
3002 rc = jmp_rel(ctxt, rel);
3003 if (rc != X86EMUL_CONTINUE)
3005 return em_push(ctxt);
3008 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3013 struct desc_struct old_desc, new_desc;
3014 const struct x86_emulate_ops *ops = ctxt->ops;
3015 int cpl = ctxt->ops->cpl(ctxt);
3017 old_eip = ctxt->_eip;
3018 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3020 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3021 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3023 if (rc != X86EMUL_CONTINUE)
3024 return X86EMUL_CONTINUE;
3026 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3027 if (rc != X86EMUL_CONTINUE)
3030 ctxt->src.val = old_cs;
3032 if (rc != X86EMUL_CONTINUE)
3035 ctxt->src.val = old_eip;
3037 /* If we failed, we tainted the memory, but the very least we should
3039 if (rc != X86EMUL_CONTINUE)
3043 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3048 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3053 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3054 if (rc != X86EMUL_CONTINUE)
3056 rc = assign_eip_near(ctxt, eip);
3057 if (rc != X86EMUL_CONTINUE)
3059 rsp_increment(ctxt, ctxt->src.val);
3060 return X86EMUL_CONTINUE;
3063 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3065 /* Write back the register source. */
3066 ctxt->src.val = ctxt->dst.val;
3067 write_register_operand(&ctxt->src);
3069 /* Write back the memory destination with implicit LOCK prefix. */
3070 ctxt->dst.val = ctxt->src.orig_val;
3071 ctxt->lock_prefix = 1;
3072 return X86EMUL_CONTINUE;
3075 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3077 ctxt->dst.val = ctxt->src2.val;
3078 return fastop(ctxt, em_imul);
3081 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3083 ctxt->dst.type = OP_REG;
3084 ctxt->dst.bytes = ctxt->src.bytes;
3085 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3086 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3088 return X86EMUL_CONTINUE;
3091 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3095 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3096 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3097 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3098 return X86EMUL_CONTINUE;
3101 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3105 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3106 return emulate_gp(ctxt, 0);
3107 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3108 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3109 return X86EMUL_CONTINUE;
3112 static int em_mov(struct x86_emulate_ctxt *ctxt)
3114 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3115 return X86EMUL_CONTINUE;
3118 #define FFL(x) bit(X86_FEATURE_##x)
3120 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3122 u32 ebx, ecx, edx, eax = 1;
3126 * Check MOVBE is set in the guest-visible CPUID leaf.
3128 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3129 if (!(ecx & FFL(MOVBE)))
3130 return emulate_ud(ctxt);
3132 switch (ctxt->op_bytes) {
3135 * From MOVBE definition: "...When the operand size is 16 bits,
3136 * the upper word of the destination register remains unchanged
3139 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3140 * rules so we have to do the operation almost per hand.
3142 tmp = (u16)ctxt->src.val;
3143 ctxt->dst.val &= ~0xffffUL;
3144 ctxt->dst.val |= (unsigned long)swab16(tmp);
3147 ctxt->dst.val = swab32((u32)ctxt->src.val);
3150 ctxt->dst.val = swab64(ctxt->src.val);
3155 return X86EMUL_CONTINUE;
3158 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3160 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3161 return emulate_gp(ctxt, 0);
3163 /* Disable writeback. */
3164 ctxt->dst.type = OP_NONE;
3165 return X86EMUL_CONTINUE;
3168 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3172 if (ctxt->mode == X86EMUL_MODE_PROT64)
3173 val = ctxt->src.val & ~0ULL;
3175 val = ctxt->src.val & ~0U;
3177 /* #UD condition is already handled. */
3178 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3179 return emulate_gp(ctxt, 0);
3181 /* Disable writeback. */
3182 ctxt->dst.type = OP_NONE;
3183 return X86EMUL_CONTINUE;
3186 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3190 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3191 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3192 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3193 return emulate_gp(ctxt, 0);
3195 return X86EMUL_CONTINUE;
3198 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3202 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3203 return emulate_gp(ctxt, 0);
3205 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3206 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3207 return X86EMUL_CONTINUE;
3210 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3212 if (ctxt->modrm_reg > VCPU_SREG_GS)
3213 return emulate_ud(ctxt);
3215 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3216 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3217 ctxt->dst.bytes = 2;
3218 return X86EMUL_CONTINUE;
3221 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3223 u16 sel = ctxt->src.val;
3225 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3226 return emulate_ud(ctxt);
3228 if (ctxt->modrm_reg == VCPU_SREG_SS)
3229 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3231 /* Disable writeback. */
3232 ctxt->dst.type = OP_NONE;
3233 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3236 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3238 u16 sel = ctxt->src.val;
3240 /* Disable writeback. */
3241 ctxt->dst.type = OP_NONE;
3242 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3245 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3247 u16 sel = ctxt->src.val;
3249 /* Disable writeback. */
3250 ctxt->dst.type = OP_NONE;
3251 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3254 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3259 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3260 if (rc == X86EMUL_CONTINUE)
3261 ctxt->ops->invlpg(ctxt, linear);
3262 /* Disable writeback. */
3263 ctxt->dst.type = OP_NONE;
3264 return X86EMUL_CONTINUE;
3267 static int em_clts(struct x86_emulate_ctxt *ctxt)
3271 cr0 = ctxt->ops->get_cr(ctxt, 0);
3273 ctxt->ops->set_cr(ctxt, 0, cr0);
3274 return X86EMUL_CONTINUE;
3277 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3279 int rc = ctxt->ops->fix_hypercall(ctxt);
3281 if (rc != X86EMUL_CONTINUE)
3284 /* Let the processor re-execute the fixed hypercall */
3285 ctxt->_eip = ctxt->eip;
3286 /* Disable writeback. */
3287 ctxt->dst.type = OP_NONE;
3288 return X86EMUL_CONTINUE;
3291 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3292 void (*get)(struct x86_emulate_ctxt *ctxt,
3293 struct desc_ptr *ptr))
3295 struct desc_ptr desc_ptr;
3297 if (ctxt->mode == X86EMUL_MODE_PROT64)
3299 get(ctxt, &desc_ptr);
3300 if (ctxt->op_bytes == 2) {
3302 desc_ptr.address &= 0x00ffffff;
3304 /* Disable writeback. */
3305 ctxt->dst.type = OP_NONE;
3306 return segmented_write(ctxt, ctxt->dst.addr.mem,
3307 &desc_ptr, 2 + ctxt->op_bytes);
3310 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3312 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3315 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3317 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3320 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3322 struct desc_ptr desc_ptr;
3325 if (ctxt->mode == X86EMUL_MODE_PROT64)
3327 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3328 &desc_ptr.size, &desc_ptr.address,
3330 if (rc != X86EMUL_CONTINUE)
3332 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3333 is_noncanonical_address(desc_ptr.address))
3334 return emulate_gp(ctxt, 0);
3336 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3338 ctxt->ops->set_idt(ctxt, &desc_ptr);
3339 /* Disable writeback. */
3340 ctxt->dst.type = OP_NONE;
3341 return X86EMUL_CONTINUE;
3344 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3346 return em_lgdt_lidt(ctxt, true);
3349 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3353 rc = ctxt->ops->fix_hypercall(ctxt);
3355 /* Disable writeback. */
3356 ctxt->dst.type = OP_NONE;
3360 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3362 return em_lgdt_lidt(ctxt, false);
3365 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3367 if (ctxt->dst.type == OP_MEM)
3368 ctxt->dst.bytes = 2;
3369 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3370 return X86EMUL_CONTINUE;
3373 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3375 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3376 | (ctxt->src.val & 0x0f));
3377 ctxt->dst.type = OP_NONE;
3378 return X86EMUL_CONTINUE;
3381 static int em_loop(struct x86_emulate_ctxt *ctxt)
3383 int rc = X86EMUL_CONTINUE;
3385 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3386 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3387 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3388 rc = jmp_rel(ctxt, ctxt->src.val);
3393 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3395 int rc = X86EMUL_CONTINUE;
3397 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3398 rc = jmp_rel(ctxt, ctxt->src.val);
3403 static int em_in(struct x86_emulate_ctxt *ctxt)
3405 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3407 return X86EMUL_IO_NEEDED;
3409 return X86EMUL_CONTINUE;
3412 static int em_out(struct x86_emulate_ctxt *ctxt)
3414 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3416 /* Disable writeback. */
3417 ctxt->dst.type = OP_NONE;
3418 return X86EMUL_CONTINUE;
3421 static int em_cli(struct x86_emulate_ctxt *ctxt)
3423 if (emulator_bad_iopl(ctxt))
3424 return emulate_gp(ctxt, 0);
3426 ctxt->eflags &= ~X86_EFLAGS_IF;
3427 return X86EMUL_CONTINUE;
3430 static int em_sti(struct x86_emulate_ctxt *ctxt)
3432 if (emulator_bad_iopl(ctxt))
3433 return emulate_gp(ctxt, 0);
3435 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3436 ctxt->eflags |= X86_EFLAGS_IF;
3437 return X86EMUL_CONTINUE;
3440 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3442 u32 eax, ebx, ecx, edx;
3444 eax = reg_read(ctxt, VCPU_REGS_RAX);
3445 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3446 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3447 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3448 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3449 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3450 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3451 return X86EMUL_CONTINUE;
3454 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3458 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3459 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3461 ctxt->eflags &= ~0xffUL;
3462 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3463 return X86EMUL_CONTINUE;
3466 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3468 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3469 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3470 return X86EMUL_CONTINUE;
3473 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3475 switch (ctxt->op_bytes) {
3476 #ifdef CONFIG_X86_64
3478 asm("bswap %0" : "+r"(ctxt->dst.val));
3482 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3485 return X86EMUL_CONTINUE;
3488 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3490 /* emulating clflush regardless of cpuid */
3491 return X86EMUL_CONTINUE;
3494 static bool valid_cr(int nr)
3506 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3508 if (!valid_cr(ctxt->modrm_reg))
3509 return emulate_ud(ctxt);
3511 return X86EMUL_CONTINUE;
3514 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3516 u64 new_val = ctxt->src.val64;
3517 int cr = ctxt->modrm_reg;
3520 static u64 cr_reserved_bits[] = {
3521 0xffffffff00000000ULL,
3522 0, 0, 0, /* CR3 checked later */
3529 return emulate_ud(ctxt);
3531 if (new_val & cr_reserved_bits[cr])
3532 return emulate_gp(ctxt, 0);
3537 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3538 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3539 return emulate_gp(ctxt, 0);
3541 cr4 = ctxt->ops->get_cr(ctxt, 4);
3542 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3544 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3545 !(cr4 & X86_CR4_PAE))
3546 return emulate_gp(ctxt, 0);
3553 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3554 if (efer & EFER_LMA)
3555 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3558 return emulate_gp(ctxt, 0);
3563 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3565 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3566 return emulate_gp(ctxt, 0);
3572 return X86EMUL_CONTINUE;
3575 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3579 ctxt->ops->get_dr(ctxt, 7, &dr7);
3581 /* Check if DR7.Global_Enable is set */
3582 return dr7 & (1 << 13);
3585 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3587 int dr = ctxt->modrm_reg;
3591 return emulate_ud(ctxt);
3593 cr4 = ctxt->ops->get_cr(ctxt, 4);
3594 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3595 return emulate_ud(ctxt);
3597 if (check_dr7_gd(ctxt)) {
3600 ctxt->ops->get_dr(ctxt, 6, &dr6);
3602 dr6 |= DR6_BD | DR6_RTM;
3603 ctxt->ops->set_dr(ctxt, 6, dr6);
3604 return emulate_db(ctxt);
3607 return X86EMUL_CONTINUE;
3610 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3612 u64 new_val = ctxt->src.val64;
3613 int dr = ctxt->modrm_reg;
3615 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3616 return emulate_gp(ctxt, 0);
3618 return check_dr_read(ctxt);
3621 static int check_svme(struct x86_emulate_ctxt *ctxt)
3625 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3627 if (!(efer & EFER_SVME))
3628 return emulate_ud(ctxt);
3630 return X86EMUL_CONTINUE;
3633 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3635 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3637 /* Valid physical address? */
3638 if (rax & 0xffff000000000000ULL)
3639 return emulate_gp(ctxt, 0);
3641 return check_svme(ctxt);
3644 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3646 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3648 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3649 return emulate_ud(ctxt);
3651 return X86EMUL_CONTINUE;
3654 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3656 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3657 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3659 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3660 ctxt->ops->check_pmc(ctxt, rcx))
3661 return emulate_gp(ctxt, 0);
3663 return X86EMUL_CONTINUE;
3666 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3668 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3669 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3670 return emulate_gp(ctxt, 0);
3672 return X86EMUL_CONTINUE;
3675 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3677 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3678 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3679 return emulate_gp(ctxt, 0);
3681 return X86EMUL_CONTINUE;
3684 #define D(_y) { .flags = (_y) }
3685 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3686 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3687 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3688 #define N D(NotImpl)
3689 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3690 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3691 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3692 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3693 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3694 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3695 #define II(_f, _e, _i) \
3696 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3697 #define IIP(_f, _e, _i, _p) \
3698 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3699 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3700 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3702 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3703 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3704 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3705 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3706 #define I2bvIP(_f, _e, _i, _p) \
3707 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3709 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3710 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3711 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3713 static const struct opcode group7_rm0[] = {
3715 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3719 static const struct opcode group7_rm1[] = {
3720 DI(SrcNone | Priv, monitor),
3721 DI(SrcNone | Priv, mwait),
3725 static const struct opcode group7_rm3[] = {
3726 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3727 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3728 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3729 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3730 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3731 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3732 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3733 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3736 static const struct opcode group7_rm7[] = {
3738 DIP(SrcNone, rdtscp, check_rdtsc),
3742 static const struct opcode group1[] = {
3744 F(Lock | PageTable, em_or),
3747 F(Lock | PageTable, em_and),
3753 static const struct opcode group1A[] = {
3754 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3757 static const struct opcode group2[] = {
3758 F(DstMem | ModRM, em_rol),
3759 F(DstMem | ModRM, em_ror),
3760 F(DstMem | ModRM, em_rcl),
3761 F(DstMem | ModRM, em_rcr),
3762 F(DstMem | ModRM, em_shl),
3763 F(DstMem | ModRM, em_shr),
3764 F(DstMem | ModRM, em_shl),
3765 F(DstMem | ModRM, em_sar),
3768 static const struct opcode group3[] = {
3769 F(DstMem | SrcImm | NoWrite, em_test),
3770 F(DstMem | SrcImm | NoWrite, em_test),
3771 F(DstMem | SrcNone | Lock, em_not),
3772 F(DstMem | SrcNone | Lock, em_neg),
3773 F(DstXacc | Src2Mem, em_mul_ex),
3774 F(DstXacc | Src2Mem, em_imul_ex),
3775 F(DstXacc | Src2Mem, em_div_ex),
3776 F(DstXacc | Src2Mem, em_idiv_ex),
3779 static const struct opcode group4[] = {
3780 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3781 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3785 static const struct opcode group5[] = {
3786 F(DstMem | SrcNone | Lock, em_inc),
3787 F(DstMem | SrcNone | Lock, em_dec),
3788 I(SrcMem | NearBranch, em_call_near_abs),
3789 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3790 I(SrcMem | NearBranch, em_jmp_abs),
3791 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3792 I(SrcMem | Stack, em_push), D(Undefined),
3795 static const struct opcode group6[] = {
3798 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3799 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3803 static const struct group_dual group7 = { {
3804 II(Mov | DstMem, em_sgdt, sgdt),
3805 II(Mov | DstMem, em_sidt, sidt),
3806 II(SrcMem | Priv, em_lgdt, lgdt),
3807 II(SrcMem | Priv, em_lidt, lidt),
3808 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3809 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3810 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3814 N, EXT(0, group7_rm3),
3815 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3816 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3820 static const struct opcode group8[] = {
3822 F(DstMem | SrcImmByte | NoWrite, em_bt),
3823 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3824 F(DstMem | SrcImmByte | Lock, em_btr),
3825 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3828 static const struct group_dual group9 = { {
3829 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3831 N, N, N, N, N, N, N, N,
3834 static const struct opcode group11[] = {
3835 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3839 static const struct gprefix pfx_0f_ae_7 = {
3840 I(SrcMem | ByteOp, em_clflush), N, N, N,
3843 static const struct group_dual group15 = { {
3844 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3846 N, N, N, N, N, N, N, N,
3849 static const struct gprefix pfx_0f_6f_0f_7f = {
3850 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3853 static const struct gprefix pfx_0f_2b = {
3854 I(0, em_mov), I(0, em_mov), N, N,
3857 static const struct gprefix pfx_0f_28_0f_29 = {
3858 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3861 static const struct gprefix pfx_0f_e7 = {
3862 N, I(Sse, em_mov), N, N,
3865 static const struct escape escape_d9 = { {
3866 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3869 N, N, N, N, N, N, N, N,
3871 N, N, N, N, N, N, N, N,
3873 N, N, N, N, N, N, N, N,
3875 N, N, N, N, N, N, N, N,
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3886 static const struct escape escape_db = { {
3887 N, N, N, N, N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, N, N, N, N, N,
3896 N, N, N, N, N, N, N, N,
3898 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3900 N, N, N, N, N, N, N, N,
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3907 static const struct escape escape_dd = { {
3908 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3911 N, N, N, N, N, N, N, N,
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3917 N, N, N, N, N, N, N, N,
3919 N, N, N, N, N, N, N, N,
3921 N, N, N, N, N, N, N, N,
3923 N, N, N, N, N, N, N, N,
3925 N, N, N, N, N, N, N, N,
3928 static const struct opcode opcode_table[256] = {
3930 F6ALU(Lock, em_add),
3931 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3932 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3934 F6ALU(Lock | PageTable, em_or),
3935 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3938 F6ALU(Lock, em_adc),
3939 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3940 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3942 F6ALU(Lock, em_sbb),
3943 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3944 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3946 F6ALU(Lock | PageTable, em_and), N, N,
3948 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3950 F6ALU(Lock, em_xor), N, N,
3952 F6ALU(NoWrite, em_cmp), N, N,
3954 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3956 X8(I(SrcReg | Stack, em_push)),
3958 X8(I(DstReg | Stack, em_pop)),
3960 I(ImplicitOps | Stack | No64, em_pusha),
3961 I(ImplicitOps | Stack | No64, em_popa),
3962 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3965 I(SrcImm | Mov | Stack, em_push),
3966 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3967 I(SrcImmByte | Mov | Stack, em_push),
3968 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3969 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3970 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3972 X16(D(SrcImmByte | NearBranch)),
3974 G(ByteOp | DstMem | SrcImm, group1),
3975 G(DstMem | SrcImm, group1),
3976 G(ByteOp | DstMem | SrcImm | No64, group1),
3977 G(DstMem | SrcImmByte, group1),
3978 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3979 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3981 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3982 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3983 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3984 D(ModRM | SrcMem | NoAccess | DstReg),
3985 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3988 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3990 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3991 I(SrcImmFAddr | No64, em_call_far), N,
3992 II(ImplicitOps | Stack, em_pushf, pushf),
3993 II(ImplicitOps | Stack, em_popf, popf),
3994 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3996 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3997 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3998 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3999 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4001 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4002 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4003 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4004 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4006 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4008 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4010 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4011 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4012 I(ImplicitOps | NearBranch, em_ret),
4013 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4014 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4015 G(ByteOp, group11), G(0, group11),
4017 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4018 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4019 I(ImplicitOps | Stack, em_ret_far),
4020 D(ImplicitOps), DI(SrcImmByte, intn),
4021 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4023 G(Src2One | ByteOp, group2), G(Src2One, group2),
4024 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4025 I(DstAcc | SrcImmUByte | No64, em_aam),
4026 I(DstAcc | SrcImmUByte | No64, em_aad),
4027 F(DstAcc | ByteOp | No64, em_salc),
4028 I(DstAcc | SrcXLat | ByteOp, em_mov),
4030 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4032 X3(I(SrcImmByte | NearBranch, em_loop)),
4033 I(SrcImmByte | NearBranch, em_jcxz),
4034 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4035 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4037 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4038 I(SrcImmFAddr | No64, em_jmp_far),
4039 D(SrcImmByte | ImplicitOps | NearBranch),
4040 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4041 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4043 N, DI(ImplicitOps, icebp), N, N,
4044 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4045 G(ByteOp, group3), G(0, group3),
4047 D(ImplicitOps), D(ImplicitOps),
4048 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4049 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4052 static const struct opcode twobyte_table[256] = {
4054 G(0, group6), GD(0, &group7), N, N,
4055 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4056 II(ImplicitOps | Priv, em_clts, clts), N,
4057 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4058 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4060 N, N, N, N, N, N, N, N,
4061 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4062 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4064 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4065 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4066 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4068 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4071 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4072 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4073 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4076 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4077 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4078 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4079 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4080 I(ImplicitOps | EmulateOnUD, em_sysenter),
4081 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4083 N, N, N, N, N, N, N, N,
4085 X16(D(DstReg | SrcMem | ModRM)),
4087 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4092 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4097 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4099 X16(D(SrcImm | NearBranch)),
4101 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4103 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4104 II(ImplicitOps, em_cpuid, cpuid),
4105 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4106 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4107 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4109 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4110 DI(ImplicitOps, rsm),
4111 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4112 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4113 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4114 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4116 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4117 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4118 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4119 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4120 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4121 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4125 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4126 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4127 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4129 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4130 N, I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov),
4131 N, N, N, GD(0, &group9),
4133 X8(I(DstReg, em_bswap)),
4135 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4137 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4138 N, N, N, N, N, N, N, N,
4140 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4143 static const struct gprefix three_byte_0f_38_f0 = {
4144 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4147 static const struct gprefix three_byte_0f_38_f1 = {
4148 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4152 * Insns below are selected by the prefix which indexed by the third opcode
4155 static const struct opcode opcode_map_0f_38[256] = {
4157 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4159 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4161 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4162 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4181 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4185 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4191 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4192 unsigned size, bool sign_extension)
4194 int rc = X86EMUL_CONTINUE;
4198 op->addr.mem.ea = ctxt->_eip;
4199 /* NB. Immediates are sign-extended as necessary. */
4200 switch (op->bytes) {
4202 op->val = insn_fetch(s8, ctxt);
4205 op->val = insn_fetch(s16, ctxt);
4208 op->val = insn_fetch(s32, ctxt);
4211 op->val = insn_fetch(s64, ctxt);
4214 if (!sign_extension) {
4215 switch (op->bytes) {
4223 op->val &= 0xffffffff;
4231 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4234 int rc = X86EMUL_CONTINUE;
4238 decode_register_operand(ctxt, op);
4241 rc = decode_imm(ctxt, op, 1, false);
4244 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4248 if (ctxt->d & BitOp)
4249 fetch_bit_operand(ctxt);
4250 op->orig_val = op->val;
4253 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4257 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4258 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4259 fetch_register_operand(op);
4260 op->orig_val = op->val;
4264 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4265 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4266 fetch_register_operand(op);
4267 op->orig_val = op->val;
4270 if (ctxt->d & ByteOp) {
4275 op->bytes = ctxt->op_bytes;
4276 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4277 fetch_register_operand(op);
4278 op->orig_val = op->val;
4282 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4284 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4285 op->addr.mem.seg = VCPU_SREG_ES;
4292 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4293 fetch_register_operand(op);
4297 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4300 rc = decode_imm(ctxt, op, 1, true);
4307 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4310 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4313 ctxt->memop.bytes = 1;
4314 if (ctxt->memop.type == OP_REG) {
4315 ctxt->memop.addr.reg = decode_register(ctxt,
4316 ctxt->modrm_rm, true);
4317 fetch_register_operand(&ctxt->memop);
4321 ctxt->memop.bytes = 2;
4324 ctxt->memop.bytes = 4;
4327 rc = decode_imm(ctxt, op, 2, false);
4330 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4334 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4336 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4337 op->addr.mem.seg = ctxt->seg_override;
4343 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4345 register_address(ctxt,
4346 reg_read(ctxt, VCPU_REGS_RBX) +
4347 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4348 op->addr.mem.seg = ctxt->seg_override;
4353 op->addr.mem.ea = ctxt->_eip;
4354 op->bytes = ctxt->op_bytes + 2;
4355 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4358 ctxt->memop.bytes = ctxt->op_bytes + 2;
4361 op->val = VCPU_SREG_ES;
4364 op->val = VCPU_SREG_CS;
4367 op->val = VCPU_SREG_SS;
4370 op->val = VCPU_SREG_DS;
4373 op->val = VCPU_SREG_FS;
4376 op->val = VCPU_SREG_GS;
4379 /* Special instructions do their own operand decoding. */
4381 op->type = OP_NONE; /* Disable writeback. */
4389 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4391 int rc = X86EMUL_CONTINUE;
4392 int mode = ctxt->mode;
4393 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4394 bool op_prefix = false;
4395 bool has_seg_override = false;
4396 struct opcode opcode;
4398 ctxt->memop.type = OP_NONE;
4399 ctxt->memopp = NULL;
4400 ctxt->_eip = ctxt->eip;
4401 ctxt->fetch.ptr = ctxt->fetch.data;
4402 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4403 ctxt->opcode_len = 1;
4405 memcpy(ctxt->fetch.data, insn, insn_len);
4407 rc = __do_insn_fetch_bytes(ctxt, 1);
4408 if (rc != X86EMUL_CONTINUE)
4413 case X86EMUL_MODE_REAL:
4414 case X86EMUL_MODE_VM86:
4415 case X86EMUL_MODE_PROT16:
4416 def_op_bytes = def_ad_bytes = 2;
4418 case X86EMUL_MODE_PROT32:
4419 def_op_bytes = def_ad_bytes = 4;
4421 #ifdef CONFIG_X86_64
4422 case X86EMUL_MODE_PROT64:
4428 return EMULATION_FAILED;
4431 ctxt->op_bytes = def_op_bytes;
4432 ctxt->ad_bytes = def_ad_bytes;
4434 /* Legacy prefixes. */
4436 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4437 case 0x66: /* operand-size override */
4439 /* switch between 2/4 bytes */
4440 ctxt->op_bytes = def_op_bytes ^ 6;
4442 case 0x67: /* address-size override */
4443 if (mode == X86EMUL_MODE_PROT64)
4444 /* switch between 4/8 bytes */
4445 ctxt->ad_bytes = def_ad_bytes ^ 12;
4447 /* switch between 2/4 bytes */
4448 ctxt->ad_bytes = def_ad_bytes ^ 6;
4450 case 0x26: /* ES override */
4451 case 0x2e: /* CS override */
4452 case 0x36: /* SS override */
4453 case 0x3e: /* DS override */
4454 has_seg_override = true;
4455 ctxt->seg_override = (ctxt->b >> 3) & 3;
4457 case 0x64: /* FS override */
4458 case 0x65: /* GS override */
4459 has_seg_override = true;
4460 ctxt->seg_override = ctxt->b & 7;
4462 case 0x40 ... 0x4f: /* REX */
4463 if (mode != X86EMUL_MODE_PROT64)
4465 ctxt->rex_prefix = ctxt->b;
4467 case 0xf0: /* LOCK */
4468 ctxt->lock_prefix = 1;
4470 case 0xf2: /* REPNE/REPNZ */
4471 case 0xf3: /* REP/REPE/REPZ */
4472 ctxt->rep_prefix = ctxt->b;
4478 /* Any legacy prefix after a REX prefix nullifies its effect. */
4480 ctxt->rex_prefix = 0;
4486 if (ctxt->rex_prefix & 8)
4487 ctxt->op_bytes = 8; /* REX.W */
4489 /* Opcode byte(s). */
4490 opcode = opcode_table[ctxt->b];
4491 /* Two-byte opcode? */
4492 if (ctxt->b == 0x0f) {
4493 ctxt->opcode_len = 2;
4494 ctxt->b = insn_fetch(u8, ctxt);
4495 opcode = twobyte_table[ctxt->b];
4497 /* 0F_38 opcode map */
4498 if (ctxt->b == 0x38) {
4499 ctxt->opcode_len = 3;
4500 ctxt->b = insn_fetch(u8, ctxt);
4501 opcode = opcode_map_0f_38[ctxt->b];
4504 ctxt->d = opcode.flags;
4506 if (ctxt->d & ModRM)
4507 ctxt->modrm = insn_fetch(u8, ctxt);
4509 /* vex-prefix instructions are not implemented */
4510 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4511 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4515 while (ctxt->d & GroupMask) {
4516 switch (ctxt->d & GroupMask) {
4518 goffset = (ctxt->modrm >> 3) & 7;
4519 opcode = opcode.u.group[goffset];
4522 goffset = (ctxt->modrm >> 3) & 7;
4523 if ((ctxt->modrm >> 6) == 3)
4524 opcode = opcode.u.gdual->mod3[goffset];
4526 opcode = opcode.u.gdual->mod012[goffset];
4529 goffset = ctxt->modrm & 7;
4530 opcode = opcode.u.group[goffset];
4533 if (ctxt->rep_prefix && op_prefix)
4534 return EMULATION_FAILED;
4535 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4536 switch (simd_prefix) {
4537 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4538 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4539 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4540 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4544 if (ctxt->modrm > 0xbf)
4545 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4547 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4550 return EMULATION_FAILED;
4553 ctxt->d &= ~(u64)GroupMask;
4554 ctxt->d |= opcode.flags;
4559 return EMULATION_FAILED;
4561 ctxt->execute = opcode.u.execute;
4563 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4564 return EMULATION_FAILED;
4566 if (unlikely(ctxt->d &
4567 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4570 * These are copied unconditionally here, and checked unconditionally
4571 * in x86_emulate_insn.
4573 ctxt->check_perm = opcode.check_perm;
4574 ctxt->intercept = opcode.intercept;
4576 if (ctxt->d & NotImpl)
4577 return EMULATION_FAILED;
4579 if (mode == X86EMUL_MODE_PROT64) {
4580 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4582 else if (ctxt->d & NearBranch)
4586 if (ctxt->d & Op3264) {
4587 if (mode == X86EMUL_MODE_PROT64)
4593 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4597 ctxt->op_bytes = 16;
4598 else if (ctxt->d & Mmx)
4602 /* ModRM and SIB bytes. */
4603 if (ctxt->d & ModRM) {
4604 rc = decode_modrm(ctxt, &ctxt->memop);
4605 if (!has_seg_override) {
4606 has_seg_override = true;
4607 ctxt->seg_override = ctxt->modrm_seg;
4609 } else if (ctxt->d & MemAbs)
4610 rc = decode_abs(ctxt, &ctxt->memop);
4611 if (rc != X86EMUL_CONTINUE)
4614 if (!has_seg_override)
4615 ctxt->seg_override = VCPU_SREG_DS;
4617 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4620 * Decode and fetch the source operand: register, memory
4623 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4624 if (rc != X86EMUL_CONTINUE)
4628 * Decode and fetch the second source operand: register, memory
4631 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4632 if (rc != X86EMUL_CONTINUE)
4635 /* Decode and fetch the destination operand: register or memory. */
4636 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4638 if (ctxt->rip_relative)
4639 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4640 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4643 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4646 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4648 return ctxt->d & PageTable;
4651 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4653 /* The second termination condition only applies for REPE
4654 * and REPNE. Test if the repeat string operation prefix is
4655 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4656 * corresponding termination condition according to:
4657 * - if REPE/REPZ and ZF = 0 then done
4658 * - if REPNE/REPNZ and ZF = 1 then done
4660 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4661 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4662 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4663 ((ctxt->eflags & EFLG_ZF) == 0))
4664 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4665 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4671 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4675 ctxt->ops->get_fpu(ctxt);
4676 asm volatile("1: fwait \n\t"
4678 ".pushsection .fixup,\"ax\" \n\t"
4680 "movb $1, %[fault] \n\t"
4683 _ASM_EXTABLE(1b, 3b)
4684 : [fault]"+qm"(fault));
4685 ctxt->ops->put_fpu(ctxt);
4687 if (unlikely(fault))
4688 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4690 return X86EMUL_CONTINUE;
4693 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4696 if (op->type == OP_MM)
4697 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4700 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4702 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4703 if (!(ctxt->d & ByteOp))
4704 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4705 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4706 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4708 : "c"(ctxt->src2.val));
4709 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4710 if (!fop) /* exception is returned in fop variable */
4711 return emulate_de(ctxt);
4712 return X86EMUL_CONTINUE;
4715 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4717 memset(&ctxt->rip_relative, 0,
4718 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4720 ctxt->io_read.pos = 0;
4721 ctxt->io_read.end = 0;
4722 ctxt->mem_read.end = 0;
4725 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4727 const struct x86_emulate_ops *ops = ctxt->ops;
4728 int rc = X86EMUL_CONTINUE;
4729 int saved_dst_type = ctxt->dst.type;
4731 ctxt->mem_read.pos = 0;
4733 /* LOCK prefix is allowed only with some instructions */
4734 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4735 rc = emulate_ud(ctxt);
4739 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4740 rc = emulate_ud(ctxt);
4744 if (unlikely(ctxt->d &
4745 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4746 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4747 (ctxt->d & Undefined)) {
4748 rc = emulate_ud(ctxt);
4752 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4753 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4754 rc = emulate_ud(ctxt);
4758 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4759 rc = emulate_nm(ctxt);
4763 if (ctxt->d & Mmx) {
4764 rc = flush_pending_x87_faults(ctxt);
4765 if (rc != X86EMUL_CONTINUE)
4768 * Now that we know the fpu is exception safe, we can fetch
4771 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4772 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4773 if (!(ctxt->d & Mov))
4774 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4777 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4778 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4779 X86_ICPT_PRE_EXCEPT);
4780 if (rc != X86EMUL_CONTINUE)
4784 /* Privileged instruction can be executed only in CPL=0 */
4785 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4786 if (ctxt->d & PrivUD)
4787 rc = emulate_ud(ctxt);
4789 rc = emulate_gp(ctxt, 0);
4793 /* Instruction can only be executed in protected mode */
4794 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4795 rc = emulate_ud(ctxt);
4799 /* Do instruction specific permission checks */
4800 if (ctxt->d & CheckPerm) {
4801 rc = ctxt->check_perm(ctxt);
4802 if (rc != X86EMUL_CONTINUE)
4806 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4807 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4808 X86_ICPT_POST_EXCEPT);
4809 if (rc != X86EMUL_CONTINUE)
4813 if (ctxt->rep_prefix && (ctxt->d & String)) {
4814 /* All REP prefixes have the same first termination condition */
4815 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4816 ctxt->eip = ctxt->_eip;
4817 ctxt->eflags &= ~EFLG_RF;
4823 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4824 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4825 ctxt->src.valptr, ctxt->src.bytes);
4826 if (rc != X86EMUL_CONTINUE)
4828 ctxt->src.orig_val64 = ctxt->src.val64;
4831 if (ctxt->src2.type == OP_MEM) {
4832 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4833 &ctxt->src2.val, ctxt->src2.bytes);
4834 if (rc != X86EMUL_CONTINUE)
4838 if ((ctxt->d & DstMask) == ImplicitOps)
4842 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4843 /* optimisation - avoid slow emulated read if Mov */
4844 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4845 &ctxt->dst.val, ctxt->dst.bytes);
4846 if (rc != X86EMUL_CONTINUE)
4849 ctxt->dst.orig_val = ctxt->dst.val;
4853 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4854 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4855 X86_ICPT_POST_MEMACCESS);
4856 if (rc != X86EMUL_CONTINUE)
4860 if (ctxt->rep_prefix && (ctxt->d & String))
4861 ctxt->eflags |= EFLG_RF;
4863 ctxt->eflags &= ~EFLG_RF;
4865 if (ctxt->execute) {
4866 if (ctxt->d & Fastop) {
4867 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4868 rc = fastop(ctxt, fop);
4869 if (rc != X86EMUL_CONTINUE)
4873 rc = ctxt->execute(ctxt);
4874 if (rc != X86EMUL_CONTINUE)
4879 if (ctxt->opcode_len == 2)
4881 else if (ctxt->opcode_len == 3)
4882 goto threebyte_insn;
4885 case 0x63: /* movsxd */
4886 if (ctxt->mode != X86EMUL_MODE_PROT64)
4887 goto cannot_emulate;
4888 ctxt->dst.val = (s32) ctxt->src.val;
4890 case 0x70 ... 0x7f: /* jcc (short) */
4891 if (test_cc(ctxt->b, ctxt->eflags))
4892 rc = jmp_rel(ctxt, ctxt->src.val);
4894 case 0x8d: /* lea r16/r32, m */
4895 ctxt->dst.val = ctxt->src.addr.mem.ea;
4897 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4898 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4899 ctxt->dst.type = OP_NONE;
4903 case 0x98: /* cbw/cwde/cdqe */
4904 switch (ctxt->op_bytes) {
4905 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4906 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4907 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4910 case 0xcc: /* int3 */
4911 rc = emulate_int(ctxt, 3);
4913 case 0xcd: /* int n */
4914 rc = emulate_int(ctxt, ctxt->src.val);
4916 case 0xce: /* into */
4917 if (ctxt->eflags & EFLG_OF)
4918 rc = emulate_int(ctxt, 4);
4920 case 0xe9: /* jmp rel */
4921 case 0xeb: /* jmp rel short */
4922 rc = jmp_rel(ctxt, ctxt->src.val);
4923 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4925 case 0xf4: /* hlt */
4926 ctxt->ops->halt(ctxt);
4928 case 0xf5: /* cmc */
4929 /* complement carry flag from eflags reg */
4930 ctxt->eflags ^= EFLG_CF;
4932 case 0xf8: /* clc */
4933 ctxt->eflags &= ~EFLG_CF;
4935 case 0xf9: /* stc */
4936 ctxt->eflags |= EFLG_CF;
4938 case 0xfc: /* cld */
4939 ctxt->eflags &= ~EFLG_DF;
4941 case 0xfd: /* std */
4942 ctxt->eflags |= EFLG_DF;
4945 goto cannot_emulate;
4948 if (rc != X86EMUL_CONTINUE)
4952 if (ctxt->d & SrcWrite) {
4953 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4954 rc = writeback(ctxt, &ctxt->src);
4955 if (rc != X86EMUL_CONTINUE)
4958 if (!(ctxt->d & NoWrite)) {
4959 rc = writeback(ctxt, &ctxt->dst);
4960 if (rc != X86EMUL_CONTINUE)
4965 * restore dst type in case the decoding will be reused
4966 * (happens for string instruction )
4968 ctxt->dst.type = saved_dst_type;
4970 if ((ctxt->d & SrcMask) == SrcSI)
4971 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4973 if ((ctxt->d & DstMask) == DstDI)
4974 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4976 if (ctxt->rep_prefix && (ctxt->d & String)) {
4978 struct read_cache *r = &ctxt->io_read;
4979 if ((ctxt->d & SrcMask) == SrcSI)
4980 count = ctxt->src.count;
4982 count = ctxt->dst.count;
4983 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4986 if (!string_insn_completed(ctxt)) {
4988 * Re-enter guest when pio read ahead buffer is empty
4989 * or, if it is not used, after each 1024 iteration.
4991 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4992 (r->end == 0 || r->end != r->pos)) {
4994 * Reset read cache. Usually happens before
4995 * decode, but since instruction is restarted
4996 * we have to do it here.
4998 ctxt->mem_read.end = 0;
4999 writeback_registers(ctxt);
5000 return EMULATION_RESTART;
5002 goto done; /* skip rip writeback */
5004 ctxt->eflags &= ~EFLG_RF;
5007 ctxt->eip = ctxt->_eip;
5010 if (rc == X86EMUL_PROPAGATE_FAULT) {
5011 WARN_ON(ctxt->exception.vector > 0x1f);
5012 ctxt->have_exception = true;
5014 if (rc == X86EMUL_INTERCEPTED)
5015 return EMULATION_INTERCEPTED;
5017 if (rc == X86EMUL_CONTINUE)
5018 writeback_registers(ctxt);
5020 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5024 case 0x09: /* wbinvd */
5025 (ctxt->ops->wbinvd)(ctxt);
5027 case 0x08: /* invd */
5028 case 0x0d: /* GrpP (prefetch) */
5029 case 0x18: /* Grp16 (prefetch/nop) */
5030 case 0x1f: /* nop */
5032 case 0x20: /* mov cr, reg */
5033 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5035 case 0x21: /* mov from dr to reg */
5036 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5038 case 0x40 ... 0x4f: /* cmov */
5039 if (test_cc(ctxt->b, ctxt->eflags))
5040 ctxt->dst.val = ctxt->src.val;
5041 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5042 ctxt->op_bytes != 4)
5043 ctxt->dst.type = OP_NONE; /* no writeback */
5045 case 0x80 ... 0x8f: /* jnz rel, etc*/
5046 if (test_cc(ctxt->b, ctxt->eflags))
5047 rc = jmp_rel(ctxt, ctxt->src.val);
5049 case 0x90 ... 0x9f: /* setcc r/m8 */
5050 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5052 case 0xb6 ... 0xb7: /* movzx */
5053 ctxt->dst.bytes = ctxt->op_bytes;
5054 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5055 : (u16) ctxt->src.val;
5057 case 0xbe ... 0xbf: /* movsx */
5058 ctxt->dst.bytes = ctxt->op_bytes;
5059 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5060 (s16) ctxt->src.val;
5063 goto cannot_emulate;
5068 if (rc != X86EMUL_CONTINUE)
5074 return EMULATION_FAILED;
5077 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5079 invalidate_registers(ctxt);
5082 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5084 writeback_registers(ctxt);