1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstMem16 (OpMem16 << DstShift)
90 #define DstImmUByte (OpImmUByte << DstShift)
91 #define DstDX (OpDX << DstShift)
92 #define DstAccLo (OpAccLo << DstShift)
93 #define DstMask (OpMask << DstShift)
94 /* Source operand type. */
96 #define SrcNone (OpNone << SrcShift)
97 #define SrcReg (OpReg << SrcShift)
98 #define SrcMem (OpMem << SrcShift)
99 #define SrcMem16 (OpMem16 << SrcShift)
100 #define SrcMem32 (OpMem32 << SrcShift)
101 #define SrcImm (OpImm << SrcShift)
102 #define SrcImmByte (OpImmByte << SrcShift)
103 #define SrcOne (OpOne << SrcShift)
104 #define SrcImmUByte (OpImmUByte << SrcShift)
105 #define SrcImmU (OpImmU << SrcShift)
106 #define SrcSI (OpSI << SrcShift)
107 #define SrcXLat (OpXLat << SrcShift)
108 #define SrcImmFAddr (OpImmFAddr << SrcShift)
109 #define SrcMemFAddr (OpMemFAddr << SrcShift)
110 #define SrcAcc (OpAcc << SrcShift)
111 #define SrcImmU16 (OpImmU16 << SrcShift)
112 #define SrcImm64 (OpImm64 << SrcShift)
113 #define SrcDX (OpDX << SrcShift)
114 #define SrcMem8 (OpMem8 << SrcShift)
115 #define SrcAccHi (OpAccHi << SrcShift)
116 #define SrcMask (OpMask << SrcShift)
117 #define BitOp (1<<11)
118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
119 #define String (1<<13) /* String instruction (rep capable) */
120 #define Stack (1<<14) /* Stack instruction (push/pop) */
121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
126 #define Escape (5<<15) /* Escape to coprocessor instruction */
127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
128 #define Sse (1<<18) /* SSE Vector instruction */
129 /* Generic ModRM decode. */
130 #define ModRM (1<<19)
131 /* Destination is only written; never read. */
134 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
135 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
136 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
137 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
138 #define Undefined (1<<25) /* No Such Instruction */
139 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
140 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
142 #define PageTable (1 << 29) /* instruction used to write page table */
143 #define NotImpl (1 << 30) /* instruction is not implemented */
144 /* Source 2 operand type */
145 #define Src2Shift (31)
146 #define Src2None (OpNone << Src2Shift)
147 #define Src2Mem (OpMem << Src2Shift)
148 #define Src2CL (OpCL << Src2Shift)
149 #define Src2ImmByte (OpImmByte << Src2Shift)
150 #define Src2One (OpOne << Src2Shift)
151 #define Src2Imm (OpImm << Src2Shift)
152 #define Src2ES (OpES << Src2Shift)
153 #define Src2CS (OpCS << Src2Shift)
154 #define Src2SS (OpSS << Src2Shift)
155 #define Src2DS (OpDS << Src2Shift)
156 #define Src2FS (OpFS << Src2Shift)
157 #define Src2GS (OpGS << Src2Shift)
158 #define Src2Mask (OpMask << Src2Shift)
159 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
160 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
161 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
162 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
163 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
164 #define NoWrite ((u64)1 << 45) /* No writeback */
165 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
166 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
167 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
168 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
169 #define NoBigReal ((u64)1 << 50) /* No big real mode */
170 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
171 #define NearBranch ((u64)1 << 52) /* Near branches */
172 #define No16 ((u64)1 << 53) /* No 16 bit operand */
174 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
176 #define X2(x...) x, x
177 #define X3(x...) X2(x), x
178 #define X4(x...) X2(x), X2(x)
179 #define X5(x...) X4(x), x
180 #define X6(x...) X4(x), X2(x)
181 #define X7(x...) X4(x), X3(x)
182 #define X8(x...) X4(x), X4(x)
183 #define X16(x...) X8(x), X8(x)
185 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
186 #define FASTOP_SIZE 8
189 * fastop functions have a special calling convention:
194 * flags: rflags (in/out)
195 * ex: rsi (in:fastop pointer, out:zero if exception)
197 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
198 * different operand sizes can be reached by calculation, rather than a jump
199 * table (which would be bigger than the code).
201 * fastop functions are declared as taking a never-defined fastop parameter,
202 * so they can't be called from C directly.
211 int (*execute)(struct x86_emulate_ctxt *ctxt);
212 const struct opcode *group;
213 const struct group_dual *gdual;
214 const struct gprefix *gprefix;
215 const struct escape *esc;
216 const struct instr_dual *idual;
217 void (*fastop)(struct fastop *fake);
219 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
223 struct opcode mod012[8];
224 struct opcode mod3[8];
228 struct opcode pfx_no;
229 struct opcode pfx_66;
230 struct opcode pfx_f2;
231 struct opcode pfx_f3;
236 struct opcode high[64];
240 struct opcode mod012;
244 /* EFLAGS bit definitions. */
245 #define EFLG_ID (1<<21)
246 #define EFLG_VIP (1<<20)
247 #define EFLG_VIF (1<<19)
248 #define EFLG_AC (1<<18)
249 #define EFLG_VM (1<<17)
250 #define EFLG_RF (1<<16)
251 #define EFLG_IOPL (3<<12)
252 #define EFLG_NT (1<<14)
253 #define EFLG_OF (1<<11)
254 #define EFLG_DF (1<<10)
255 #define EFLG_IF (1<<9)
256 #define EFLG_TF (1<<8)
257 #define EFLG_SF (1<<7)
258 #define EFLG_ZF (1<<6)
259 #define EFLG_AF (1<<4)
260 #define EFLG_PF (1<<2)
261 #define EFLG_CF (1<<0)
263 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
264 #define EFLG_RESERVED_ONE_MASK 2
266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
272 return ctxt->_regs[nr];
275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
282 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
285 return reg_write(ctxt, nr);
288 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
296 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
306 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
314 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
316 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
317 #define FOP_RET "ret \n\t"
319 #define FOP_START(op) \
320 extern void em_##op(struct fastop *fake); \
321 asm(".pushsection .text, \"ax\" \n\t" \
322 ".global em_" #op " \n\t" \
329 #define FOPNOP() FOP_ALIGN FOP_RET
331 #define FOP1E(op, dst) \
332 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
334 #define FOP1EEX(op, dst) \
335 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
337 #define FASTOP1(op) \
342 ON64(FOP1E(op##q, rax)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m) */
346 #define FASTOP1SRC2(op, name) \
351 ON64(FOP1E(op, rcx)) \
354 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
355 #define FASTOP1SRC2EX(op, name) \
360 ON64(FOP1EEX(op, rcx)) \
363 #define FOP2E(op, dst, src) \
364 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
366 #define FASTOP2(op) \
368 FOP2E(op##b, al, dl) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, word only */
375 #define FASTOP2W(op) \
378 FOP2E(op##w, ax, dx) \
379 FOP2E(op##l, eax, edx) \
380 ON64(FOP2E(op##q, rax, rdx)) \
383 /* 2 operand, src is CL */
384 #define FASTOP2CL(op) \
386 FOP2E(op##b, al, cl) \
387 FOP2E(op##w, ax, cl) \
388 FOP2E(op##l, eax, cl) \
389 ON64(FOP2E(op##q, rax, cl)) \
392 /* 2 operand, src and dest are reversed */
393 #define FASTOP2R(op, name) \
395 FOP2E(op##b, dl, al) \
396 FOP2E(op##w, dx, ax) \
397 FOP2E(op##l, edx, eax) \
398 ON64(FOP2E(op##q, rdx, rax)) \
401 #define FOP3E(op, dst, src, src2) \
402 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
404 /* 3-operand, word-only, src2=cl */
405 #define FASTOP3WCL(op) \
408 FOP3E(op##w, ax, dx, cl) \
409 FOP3E(op##l, eax, edx, cl) \
410 ON64(FOP3E(op##q, rax, rdx, cl)) \
413 /* Special case for SETcc - 1 instruction per cc */
414 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
416 asm(".global kvm_fastop_exception \n"
417 "kvm_fastop_exception: xor %esi, %esi; ret");
438 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
441 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
442 enum x86_intercept intercept,
443 enum x86_intercept_stage stage)
445 struct x86_instruction_info info = {
446 .intercept = intercept,
447 .rep_prefix = ctxt->rep_prefix,
448 .modrm_mod = ctxt->modrm_mod,
449 .modrm_reg = ctxt->modrm_reg,
450 .modrm_rm = ctxt->modrm_rm,
451 .src_val = ctxt->src.val64,
452 .dst_val = ctxt->dst.val64,
453 .src_bytes = ctxt->src.bytes,
454 .dst_bytes = ctxt->dst.bytes,
455 .ad_bytes = ctxt->ad_bytes,
456 .next_rip = ctxt->eip,
459 return ctxt->ops->intercept(ctxt, &info, stage);
462 static void assign_masked(ulong *dest, ulong src, ulong mask)
464 *dest = (*dest & ~mask) | (src & mask);
467 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
469 return (1UL << (ctxt->ad_bytes << 3)) - 1;
472 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
475 struct desc_struct ss;
477 if (ctxt->mode == X86EMUL_MODE_PROT64)
479 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
480 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
483 static int stack_size(struct x86_emulate_ctxt *ctxt)
485 return (__fls(stack_mask(ctxt)) + 1) >> 3;
488 /* Access/update address held in a register, based on addressing mode. */
489 static inline unsigned long
490 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
492 if (ctxt->ad_bytes == sizeof(unsigned long))
495 return reg & ad_mask(ctxt);
498 static inline unsigned long
499 register_address(struct x86_emulate_ctxt *ctxt, int reg)
501 return address_mask(ctxt, reg_read(ctxt, reg));
504 static void masked_increment(ulong *reg, ulong mask, int inc)
506 assign_masked(reg, *reg + inc, mask);
510 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
514 if (ctxt->ad_bytes == sizeof(unsigned long))
517 mask = ad_mask(ctxt);
518 masked_increment(reg_rmw(ctxt, reg), mask, inc);
521 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
523 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
526 static u32 desc_limit_scaled(struct desc_struct *desc)
528 u32 limit = get_desc_limit(desc);
530 return desc->g ? (limit << 12) | 0xfff : limit;
533 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
535 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
538 return ctxt->ops->get_cached_segment_base(ctxt, seg);
541 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
542 u32 error, bool valid)
545 ctxt->exception.vector = vec;
546 ctxt->exception.error_code = error;
547 ctxt->exception.error_code_valid = valid;
548 return X86EMUL_PROPAGATE_FAULT;
551 static int emulate_db(struct x86_emulate_ctxt *ctxt)
553 return emulate_exception(ctxt, DB_VECTOR, 0, false);
556 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
558 return emulate_exception(ctxt, GP_VECTOR, err, true);
561 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
563 return emulate_exception(ctxt, SS_VECTOR, err, true);
566 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
568 return emulate_exception(ctxt, UD_VECTOR, 0, false);
571 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
573 return emulate_exception(ctxt, TS_VECTOR, err, true);
576 static int emulate_de(struct x86_emulate_ctxt *ctxt)
578 return emulate_exception(ctxt, DE_VECTOR, 0, false);
581 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
583 return emulate_exception(ctxt, NM_VECTOR, 0, false);
586 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
589 struct desc_struct desc;
591 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
595 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
600 struct desc_struct desc;
602 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
603 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
607 * x86 defines three classes of vector instructions: explicitly
608 * aligned, explicitly unaligned, and the rest, which change behaviour
609 * depending on whether they're AVX encoded or not.
611 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
612 * subject to the same check.
614 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
616 if (likely(size < 16))
619 if (ctxt->d & Aligned)
621 else if (ctxt->d & Unaligned)
623 else if (ctxt->d & Avx)
629 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
630 struct segmented_address addr,
631 unsigned *max_size, unsigned size,
632 bool write, bool fetch,
633 enum x86emul_mode mode, ulong *linear)
635 struct desc_struct desc;
641 la = seg_base(ctxt, addr.seg) + addr.ea;
644 case X86EMUL_MODE_PROT64:
645 if (is_noncanonical_address(la))
648 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
649 if (size > *max_size)
653 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
657 /* code segment in protected mode or read-only data segment */
658 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
659 || !(desc.type & 2)) && write)
661 /* unreadable code segment */
662 if (!fetch && (desc.type & 8) && !(desc.type & 2))
664 lim = desc_limit_scaled(&desc);
665 if (!(desc.type & 8) && (desc.type & 4)) {
666 /* expand-down segment */
669 lim = desc.d ? 0xffffffff : 0xffff;
673 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
674 if (size > *max_size)
679 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
680 return emulate_gp(ctxt, 0);
682 return X86EMUL_CONTINUE;
684 if (addr.seg == VCPU_SREG_SS)
685 return emulate_ss(ctxt, 0);
687 return emulate_gp(ctxt, 0);
690 static int linearize(struct x86_emulate_ctxt *ctxt,
691 struct segmented_address addr,
692 unsigned size, bool write,
696 return __linearize(ctxt, addr, &max_size, size, write, false,
700 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
701 enum x86emul_mode mode)
706 struct segmented_address addr = { .seg = VCPU_SREG_CS,
709 if (ctxt->op_bytes != sizeof(unsigned long))
710 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
711 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
712 if (rc == X86EMUL_CONTINUE)
713 ctxt->_eip = addr.ea;
717 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
719 return assign_eip(ctxt, dst, ctxt->mode);
722 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
723 const struct desc_struct *cs_desc)
725 enum x86emul_mode mode = ctxt->mode;
728 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
731 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
733 mode = X86EMUL_MODE_PROT64;
736 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
737 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
738 return assign_eip(ctxt, dst, mode);
741 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
743 return assign_eip_near(ctxt, ctxt->_eip + rel);
746 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
747 struct segmented_address addr,
754 rc = linearize(ctxt, addr, size, false, &linear);
755 if (rc != X86EMUL_CONTINUE)
757 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
761 * Prefetch the remaining bytes of the instruction without crossing page
762 * boundary if they are not in fetch_cache yet.
764 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
767 unsigned size, max_size;
768 unsigned long linear;
769 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
770 struct segmented_address addr = { .seg = VCPU_SREG_CS,
771 .ea = ctxt->eip + cur_size };
774 * We do not know exactly how many bytes will be needed, and
775 * __linearize is expensive, so fetch as much as possible. We
776 * just have to avoid going beyond the 15 byte limit, the end
777 * of the segment, or the end of the page.
779 * __linearize is called with size 0 so that it does not do any
780 * boundary check itself. Instead, we use max_size to check
783 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
785 if (unlikely(rc != X86EMUL_CONTINUE))
788 size = min_t(unsigned, 15UL ^ cur_size, max_size);
789 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
792 * One instruction can only straddle two pages,
793 * and one has been loaded at the beginning of
794 * x86_decode_insn. So, if not enough bytes
795 * still, we must have hit the 15-byte boundary.
797 if (unlikely(size < op_size))
798 return emulate_gp(ctxt, 0);
800 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
801 size, &ctxt->exception);
802 if (unlikely(rc != X86EMUL_CONTINUE))
804 ctxt->fetch.end += size;
805 return X86EMUL_CONTINUE;
808 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
811 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
813 if (unlikely(done_size < size))
814 return __do_insn_fetch_bytes(ctxt, size - done_size);
816 return X86EMUL_CONTINUE;
819 /* Fetch next part of the instruction being emulated. */
820 #define insn_fetch(_type, _ctxt) \
823 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
824 if (rc != X86EMUL_CONTINUE) \
826 ctxt->_eip += sizeof(_type); \
827 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
828 ctxt->fetch.ptr += sizeof(_type); \
832 #define insn_fetch_arr(_arr, _size, _ctxt) \
834 rc = do_insn_fetch_bytes(_ctxt, _size); \
835 if (rc != X86EMUL_CONTINUE) \
837 ctxt->_eip += (_size); \
838 memcpy(_arr, ctxt->fetch.ptr, _size); \
839 ctxt->fetch.ptr += (_size); \
843 * Given the 'reg' portion of a ModRM byte, and a register block, return a
844 * pointer into the block that addresses the relevant register.
845 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
847 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
851 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
853 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
854 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
856 p = reg_rmw(ctxt, modrm_reg);
860 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
861 struct segmented_address addr,
862 u16 *size, unsigned long *address, int op_bytes)
869 rc = segmented_read_std(ctxt, addr, size, 2);
870 if (rc != X86EMUL_CONTINUE)
873 rc = segmented_read_std(ctxt, addr, address, op_bytes);
887 FASTOP1SRC2(mul, mul_ex);
888 FASTOP1SRC2(imul, imul_ex);
889 FASTOP1SRC2EX(div, div_ex);
890 FASTOP1SRC2EX(idiv, idiv_ex);
919 FASTOP2R(cmp, cmp_r);
921 static u8 test_cc(unsigned int condition, unsigned long flags)
924 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
926 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
927 asm("push %[flags]; popf; call *%[fastop]"
928 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
932 static void fetch_register_operand(struct operand *op)
936 op->val = *(u8 *)op->addr.reg;
939 op->val = *(u16 *)op->addr.reg;
942 op->val = *(u32 *)op->addr.reg;
945 op->val = *(u64 *)op->addr.reg;
950 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
952 ctxt->ops->get_fpu(ctxt);
954 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
955 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
956 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
957 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
958 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
959 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
960 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
961 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
963 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
964 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
965 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
966 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
967 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
968 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
969 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
970 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
974 ctxt->ops->put_fpu(ctxt);
977 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
980 ctxt->ops->get_fpu(ctxt);
982 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
983 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
984 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
985 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
986 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
987 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
988 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
989 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
991 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
992 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
993 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
994 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
995 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
996 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
997 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
998 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1002 ctxt->ops->put_fpu(ctxt);
1005 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1007 ctxt->ops->get_fpu(ctxt);
1009 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1010 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1011 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1012 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1013 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1014 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1015 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1016 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1019 ctxt->ops->put_fpu(ctxt);
1022 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1024 ctxt->ops->get_fpu(ctxt);
1026 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1027 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1028 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1029 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1030 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1031 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1032 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1033 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1036 ctxt->ops->put_fpu(ctxt);
1039 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1041 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1042 return emulate_nm(ctxt);
1044 ctxt->ops->get_fpu(ctxt);
1045 asm volatile("fninit");
1046 ctxt->ops->put_fpu(ctxt);
1047 return X86EMUL_CONTINUE;
1050 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1054 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1055 return emulate_nm(ctxt);
1057 ctxt->ops->get_fpu(ctxt);
1058 asm volatile("fnstcw %0": "+m"(fcw));
1059 ctxt->ops->put_fpu(ctxt);
1061 ctxt->dst.val = fcw;
1063 return X86EMUL_CONTINUE;
1066 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1070 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1071 return emulate_nm(ctxt);
1073 ctxt->ops->get_fpu(ctxt);
1074 asm volatile("fnstsw %0": "+m"(fsw));
1075 ctxt->ops->put_fpu(ctxt);
1077 ctxt->dst.val = fsw;
1079 return X86EMUL_CONTINUE;
1082 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1085 unsigned reg = ctxt->modrm_reg;
1087 if (!(ctxt->d & ModRM))
1088 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1090 if (ctxt->d & Sse) {
1094 read_sse_reg(ctxt, &op->vec_val, reg);
1097 if (ctxt->d & Mmx) {
1106 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1107 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1109 fetch_register_operand(op);
1110 op->orig_val = op->val;
1113 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1115 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1116 ctxt->modrm_seg = VCPU_SREG_SS;
1119 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1123 int index_reg, base_reg, scale;
1124 int rc = X86EMUL_CONTINUE;
1127 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1128 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1129 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1131 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1132 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1133 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1134 ctxt->modrm_seg = VCPU_SREG_DS;
1136 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1138 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1139 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1141 if (ctxt->d & Sse) {
1144 op->addr.xmm = ctxt->modrm_rm;
1145 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1148 if (ctxt->d & Mmx) {
1151 op->addr.mm = ctxt->modrm_rm & 7;
1154 fetch_register_operand(op);
1160 if (ctxt->ad_bytes == 2) {
1161 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1162 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1163 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1164 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1166 /* 16-bit ModR/M decode. */
1167 switch (ctxt->modrm_mod) {
1169 if (ctxt->modrm_rm == 6)
1170 modrm_ea += insn_fetch(u16, ctxt);
1173 modrm_ea += insn_fetch(s8, ctxt);
1176 modrm_ea += insn_fetch(u16, ctxt);
1179 switch (ctxt->modrm_rm) {
1181 modrm_ea += bx + si;
1184 modrm_ea += bx + di;
1187 modrm_ea += bp + si;
1190 modrm_ea += bp + di;
1199 if (ctxt->modrm_mod != 0)
1206 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1207 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1208 ctxt->modrm_seg = VCPU_SREG_SS;
1209 modrm_ea = (u16)modrm_ea;
1211 /* 32/64-bit ModR/M decode. */
1212 if ((ctxt->modrm_rm & 7) == 4) {
1213 sib = insn_fetch(u8, ctxt);
1214 index_reg |= (sib >> 3) & 7;
1215 base_reg |= sib & 7;
1218 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1219 modrm_ea += insn_fetch(s32, ctxt);
1221 modrm_ea += reg_read(ctxt, base_reg);
1222 adjust_modrm_seg(ctxt, base_reg);
1225 modrm_ea += reg_read(ctxt, index_reg) << scale;
1226 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1227 modrm_ea += insn_fetch(s32, ctxt);
1228 if (ctxt->mode == X86EMUL_MODE_PROT64)
1229 ctxt->rip_relative = 1;
1231 base_reg = ctxt->modrm_rm;
1232 modrm_ea += reg_read(ctxt, base_reg);
1233 adjust_modrm_seg(ctxt, base_reg);
1235 switch (ctxt->modrm_mod) {
1237 modrm_ea += insn_fetch(s8, ctxt);
1240 modrm_ea += insn_fetch(s32, ctxt);
1244 op->addr.mem.ea = modrm_ea;
1245 if (ctxt->ad_bytes != 8)
1246 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1252 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1255 int rc = X86EMUL_CONTINUE;
1258 switch (ctxt->ad_bytes) {
1260 op->addr.mem.ea = insn_fetch(u16, ctxt);
1263 op->addr.mem.ea = insn_fetch(u32, ctxt);
1266 op->addr.mem.ea = insn_fetch(u64, ctxt);
1273 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1277 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1278 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1280 if (ctxt->src.bytes == 2)
1281 sv = (s16)ctxt->src.val & (s16)mask;
1282 else if (ctxt->src.bytes == 4)
1283 sv = (s32)ctxt->src.val & (s32)mask;
1285 sv = (s64)ctxt->src.val & (s64)mask;
1287 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1288 ctxt->dst.addr.mem.ea + (sv >> 3));
1291 /* only subword offset */
1292 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1295 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1296 unsigned long addr, void *dest, unsigned size)
1299 struct read_cache *mc = &ctxt->mem_read;
1301 if (mc->pos < mc->end)
1304 WARN_ON((mc->end + size) >= sizeof(mc->data));
1306 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1308 if (rc != X86EMUL_CONTINUE)
1314 memcpy(dest, mc->data + mc->pos, size);
1316 return X86EMUL_CONTINUE;
1319 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1320 struct segmented_address addr,
1327 rc = linearize(ctxt, addr, size, false, &linear);
1328 if (rc != X86EMUL_CONTINUE)
1330 return read_emulated(ctxt, linear, data, size);
1333 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1334 struct segmented_address addr,
1341 rc = linearize(ctxt, addr, size, true, &linear);
1342 if (rc != X86EMUL_CONTINUE)
1344 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1348 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1349 struct segmented_address addr,
1350 const void *orig_data, const void *data,
1356 rc = linearize(ctxt, addr, size, true, &linear);
1357 if (rc != X86EMUL_CONTINUE)
1359 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1360 size, &ctxt->exception);
1363 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1364 unsigned int size, unsigned short port,
1367 struct read_cache *rc = &ctxt->io_read;
1369 if (rc->pos == rc->end) { /* refill pio read ahead */
1370 unsigned int in_page, n;
1371 unsigned int count = ctxt->rep_prefix ?
1372 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1373 in_page = (ctxt->eflags & EFLG_DF) ?
1374 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1375 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1376 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1379 rc->pos = rc->end = 0;
1380 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1385 if (ctxt->rep_prefix && (ctxt->d & String) &&
1386 !(ctxt->eflags & EFLG_DF)) {
1387 ctxt->dst.data = rc->data + rc->pos;
1388 ctxt->dst.type = OP_MEM_STR;
1389 ctxt->dst.count = (rc->end - rc->pos) / size;
1392 memcpy(dest, rc->data + rc->pos, size);
1398 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1399 u16 index, struct desc_struct *desc)
1404 ctxt->ops->get_idt(ctxt, &dt);
1406 if (dt.size < index * 8 + 7)
1407 return emulate_gp(ctxt, index << 3 | 0x2);
1409 addr = dt.address + index * 8;
1410 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1414 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1415 u16 selector, struct desc_ptr *dt)
1417 const struct x86_emulate_ops *ops = ctxt->ops;
1420 if (selector & 1 << 2) {
1421 struct desc_struct desc;
1424 memset (dt, 0, sizeof *dt);
1425 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1429 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1430 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1432 ops->get_gdt(ctxt, dt);
1435 /* allowed just for 8 bytes segments */
1436 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1437 u16 selector, struct desc_struct *desc,
1441 u16 index = selector >> 3;
1444 get_descriptor_table_ptr(ctxt, selector, &dt);
1446 if (dt.size < index * 8 + 7)
1447 return emulate_gp(ctxt, selector & 0xfffc);
1449 *desc_addr_p = addr = dt.address + index * 8;
1450 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1454 /* allowed just for 8 bytes segments */
1455 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1456 u16 selector, struct desc_struct *desc)
1459 u16 index = selector >> 3;
1462 get_descriptor_table_ptr(ctxt, selector, &dt);
1464 if (dt.size < index * 8 + 7)
1465 return emulate_gp(ctxt, selector & 0xfffc);
1467 addr = dt.address + index * 8;
1468 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1472 /* Does not support long mode */
1473 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1474 u16 selector, int seg, u8 cpl,
1475 bool in_task_switch,
1476 struct desc_struct *desc)
1478 struct desc_struct seg_desc, old_desc;
1480 unsigned err_vec = GP_VECTOR;
1482 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1488 memset(&seg_desc, 0, sizeof seg_desc);
1490 if (ctxt->mode == X86EMUL_MODE_REAL) {
1491 /* set real mode segment descriptor (keep limit etc. for
1493 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1494 set_desc_base(&seg_desc, selector << 4);
1496 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1497 /* VM86 needs a clean new segment descriptor */
1498 set_desc_base(&seg_desc, selector << 4);
1499 set_desc_limit(&seg_desc, 0xffff);
1509 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1510 if ((seg == VCPU_SREG_CS
1511 || (seg == VCPU_SREG_SS
1512 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1513 || seg == VCPU_SREG_TR)
1517 /* TR should be in GDT only */
1518 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1521 if (null_selector) /* for NULL selector skip all following checks */
1524 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1525 if (ret != X86EMUL_CONTINUE)
1528 err_code = selector & 0xfffc;
1529 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1531 /* can't load system descriptor into segment selector */
1532 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1536 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1545 * segment is not a writable data segment or segment
1546 * selector's RPL != CPL or segment selector's RPL != CPL
1548 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1552 if (!(seg_desc.type & 8))
1555 if (seg_desc.type & 4) {
1561 if (rpl > cpl || dpl != cpl)
1564 /* in long-mode d/b must be clear if l is set */
1565 if (seg_desc.d && seg_desc.l) {
1568 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1569 if (efer & EFER_LMA)
1573 /* CS(RPL) <- CPL */
1574 selector = (selector & 0xfffc) | cpl;
1577 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1579 old_desc = seg_desc;
1580 seg_desc.type |= 2; /* busy */
1581 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1582 sizeof(seg_desc), &ctxt->exception);
1583 if (ret != X86EMUL_CONTINUE)
1586 case VCPU_SREG_LDTR:
1587 if (seg_desc.s || seg_desc.type != 2)
1590 default: /* DS, ES, FS, or GS */
1592 * segment is not a data or readable code segment or
1593 * ((segment is a data or nonconforming code segment)
1594 * and (both RPL and CPL > DPL))
1596 if ((seg_desc.type & 0xa) == 0x8 ||
1597 (((seg_desc.type & 0xc) != 0xc) &&
1598 (rpl > dpl && cpl > dpl)))
1604 /* mark segment as accessed */
1606 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1607 if (ret != X86EMUL_CONTINUE)
1609 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1610 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1611 sizeof(base3), &ctxt->exception);
1612 if (ret != X86EMUL_CONTINUE)
1614 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1615 ((u64)base3 << 32)))
1616 return emulate_gp(ctxt, 0);
1619 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1622 return X86EMUL_CONTINUE;
1624 return emulate_exception(ctxt, err_vec, err_code, true);
1627 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1628 u16 selector, int seg)
1630 u8 cpl = ctxt->ops->cpl(ctxt);
1631 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1634 static void write_register_operand(struct operand *op)
1636 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1637 switch (op->bytes) {
1639 *(u8 *)op->addr.reg = (u8)op->val;
1642 *(u16 *)op->addr.reg = (u16)op->val;
1645 *op->addr.reg = (u32)op->val;
1646 break; /* 64b: zero-extend */
1648 *op->addr.reg = op->val;
1653 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1657 write_register_operand(op);
1660 if (ctxt->lock_prefix)
1661 return segmented_cmpxchg(ctxt,
1667 return segmented_write(ctxt,
1673 return segmented_write(ctxt,
1676 op->bytes * op->count);
1679 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1682 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1690 return X86EMUL_CONTINUE;
1693 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1695 struct segmented_address addr;
1697 rsp_increment(ctxt, -bytes);
1698 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1699 addr.seg = VCPU_SREG_SS;
1701 return segmented_write(ctxt, addr, data, bytes);
1704 static int em_push(struct x86_emulate_ctxt *ctxt)
1706 /* Disable writeback. */
1707 ctxt->dst.type = OP_NONE;
1708 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1711 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1712 void *dest, int len)
1715 struct segmented_address addr;
1717 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1718 addr.seg = VCPU_SREG_SS;
1719 rc = segmented_read(ctxt, addr, dest, len);
1720 if (rc != X86EMUL_CONTINUE)
1723 rsp_increment(ctxt, len);
1727 static int em_pop(struct x86_emulate_ctxt *ctxt)
1729 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1732 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1733 void *dest, int len)
1736 unsigned long val, change_mask;
1737 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1738 int cpl = ctxt->ops->cpl(ctxt);
1740 rc = emulate_pop(ctxt, &val, len);
1741 if (rc != X86EMUL_CONTINUE)
1744 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1745 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1747 switch(ctxt->mode) {
1748 case X86EMUL_MODE_PROT64:
1749 case X86EMUL_MODE_PROT32:
1750 case X86EMUL_MODE_PROT16:
1752 change_mask |= EFLG_IOPL;
1754 change_mask |= EFLG_IF;
1756 case X86EMUL_MODE_VM86:
1758 return emulate_gp(ctxt, 0);
1759 change_mask |= EFLG_IF;
1761 default: /* real mode */
1762 change_mask |= (EFLG_IOPL | EFLG_IF);
1766 *(unsigned long *)dest =
1767 (ctxt->eflags & ~change_mask) | (val & change_mask);
1772 static int em_popf(struct x86_emulate_ctxt *ctxt)
1774 ctxt->dst.type = OP_REG;
1775 ctxt->dst.addr.reg = &ctxt->eflags;
1776 ctxt->dst.bytes = ctxt->op_bytes;
1777 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1780 static int em_enter(struct x86_emulate_ctxt *ctxt)
1783 unsigned frame_size = ctxt->src.val;
1784 unsigned nesting_level = ctxt->src2.val & 31;
1788 return X86EMUL_UNHANDLEABLE;
1790 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1791 rc = push(ctxt, &rbp, stack_size(ctxt));
1792 if (rc != X86EMUL_CONTINUE)
1794 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1796 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1797 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1799 return X86EMUL_CONTINUE;
1802 static int em_leave(struct x86_emulate_ctxt *ctxt)
1804 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1806 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1809 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1811 int seg = ctxt->src2.val;
1813 ctxt->src.val = get_segment_selector(ctxt, seg);
1814 if (ctxt->op_bytes == 4) {
1815 rsp_increment(ctxt, -2);
1819 return em_push(ctxt);
1822 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1824 int seg = ctxt->src2.val;
1825 unsigned long selector;
1828 rc = emulate_pop(ctxt, &selector, 2);
1829 if (rc != X86EMUL_CONTINUE)
1832 if (ctxt->modrm_reg == VCPU_SREG_SS)
1833 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1834 if (ctxt->op_bytes > 2)
1835 rsp_increment(ctxt, ctxt->op_bytes - 2);
1837 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1841 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1843 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1844 int rc = X86EMUL_CONTINUE;
1845 int reg = VCPU_REGS_RAX;
1847 while (reg <= VCPU_REGS_RDI) {
1848 (reg == VCPU_REGS_RSP) ?
1849 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1852 if (rc != X86EMUL_CONTINUE)
1861 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1863 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1864 return em_push(ctxt);
1867 static int em_popa(struct x86_emulate_ctxt *ctxt)
1869 int rc = X86EMUL_CONTINUE;
1870 int reg = VCPU_REGS_RDI;
1872 while (reg >= VCPU_REGS_RAX) {
1873 if (reg == VCPU_REGS_RSP) {
1874 rsp_increment(ctxt, ctxt->op_bytes);
1878 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1879 if (rc != X86EMUL_CONTINUE)
1886 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1888 const struct x86_emulate_ops *ops = ctxt->ops;
1895 /* TODO: Add limit checks */
1896 ctxt->src.val = ctxt->eflags;
1898 if (rc != X86EMUL_CONTINUE)
1901 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1903 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1905 if (rc != X86EMUL_CONTINUE)
1908 ctxt->src.val = ctxt->_eip;
1910 if (rc != X86EMUL_CONTINUE)
1913 ops->get_idt(ctxt, &dt);
1915 eip_addr = dt.address + (irq << 2);
1916 cs_addr = dt.address + (irq << 2) + 2;
1918 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1919 if (rc != X86EMUL_CONTINUE)
1922 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1923 if (rc != X86EMUL_CONTINUE)
1926 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1927 if (rc != X86EMUL_CONTINUE)
1935 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1939 invalidate_registers(ctxt);
1940 rc = __emulate_int_real(ctxt, irq);
1941 if (rc == X86EMUL_CONTINUE)
1942 writeback_registers(ctxt);
1946 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1948 switch(ctxt->mode) {
1949 case X86EMUL_MODE_REAL:
1950 return __emulate_int_real(ctxt, irq);
1951 case X86EMUL_MODE_VM86:
1952 case X86EMUL_MODE_PROT16:
1953 case X86EMUL_MODE_PROT32:
1954 case X86EMUL_MODE_PROT64:
1956 /* Protected mode interrupts unimplemented yet */
1957 return X86EMUL_UNHANDLEABLE;
1961 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1963 int rc = X86EMUL_CONTINUE;
1964 unsigned long temp_eip = 0;
1965 unsigned long temp_eflags = 0;
1966 unsigned long cs = 0;
1967 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1968 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1969 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1970 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1972 /* TODO: Add stack limit check */
1974 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1976 if (rc != X86EMUL_CONTINUE)
1979 if (temp_eip & ~0xffff)
1980 return emulate_gp(ctxt, 0);
1982 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1984 if (rc != X86EMUL_CONTINUE)
1987 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1989 if (rc != X86EMUL_CONTINUE)
1992 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1994 if (rc != X86EMUL_CONTINUE)
1997 ctxt->_eip = temp_eip;
2000 if (ctxt->op_bytes == 4)
2001 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2002 else if (ctxt->op_bytes == 2) {
2003 ctxt->eflags &= ~0xffff;
2004 ctxt->eflags |= temp_eflags;
2007 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2008 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2013 static int em_iret(struct x86_emulate_ctxt *ctxt)
2015 switch(ctxt->mode) {
2016 case X86EMUL_MODE_REAL:
2017 return emulate_iret_real(ctxt);
2018 case X86EMUL_MODE_VM86:
2019 case X86EMUL_MODE_PROT16:
2020 case X86EMUL_MODE_PROT32:
2021 case X86EMUL_MODE_PROT64:
2023 /* iret from protected mode unimplemented yet */
2024 return X86EMUL_UNHANDLEABLE;
2028 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2031 unsigned short sel, old_sel;
2032 struct desc_struct old_desc, new_desc;
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 u8 cpl = ctxt->ops->cpl(ctxt);
2036 /* Assignment of RIP may only fail in 64-bit mode */
2037 if (ctxt->mode == X86EMUL_MODE_PROT64)
2038 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2041 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2043 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2045 if (rc != X86EMUL_CONTINUE)
2048 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2049 if (rc != X86EMUL_CONTINUE) {
2050 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2051 /* assigning eip failed; restore the old cs */
2052 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2058 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2060 return assign_eip_near(ctxt, ctxt->src.val);
2063 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2068 old_eip = ctxt->_eip;
2069 rc = assign_eip_near(ctxt, ctxt->src.val);
2070 if (rc != X86EMUL_CONTINUE)
2072 ctxt->src.val = old_eip;
2077 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2079 u64 old = ctxt->dst.orig_val64;
2081 if (ctxt->dst.bytes == 16)
2082 return X86EMUL_UNHANDLEABLE;
2084 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2085 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2086 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2087 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2088 ctxt->eflags &= ~EFLG_ZF;
2090 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2091 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2093 ctxt->eflags |= EFLG_ZF;
2095 return X86EMUL_CONTINUE;
2098 static int em_ret(struct x86_emulate_ctxt *ctxt)
2103 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2104 if (rc != X86EMUL_CONTINUE)
2107 return assign_eip_near(ctxt, eip);
2110 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2113 unsigned long eip, cs;
2115 int cpl = ctxt->ops->cpl(ctxt);
2116 struct desc_struct old_desc, new_desc;
2117 const struct x86_emulate_ops *ops = ctxt->ops;
2119 if (ctxt->mode == X86EMUL_MODE_PROT64)
2120 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2123 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2124 if (rc != X86EMUL_CONTINUE)
2126 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2127 if (rc != X86EMUL_CONTINUE)
2129 /* Outer-privilege level return is not implemented */
2130 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2131 return X86EMUL_UNHANDLEABLE;
2132 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
2134 if (rc != X86EMUL_CONTINUE)
2136 rc = assign_eip_far(ctxt, eip, &new_desc);
2137 if (rc != X86EMUL_CONTINUE) {
2138 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2139 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2144 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2148 rc = em_ret_far(ctxt);
2149 if (rc != X86EMUL_CONTINUE)
2151 rsp_increment(ctxt, ctxt->src.val);
2152 return X86EMUL_CONTINUE;
2155 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2157 /* Save real source value, then compare EAX against destination. */
2158 ctxt->dst.orig_val = ctxt->dst.val;
2159 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2160 ctxt->src.orig_val = ctxt->src.val;
2161 ctxt->src.val = ctxt->dst.orig_val;
2162 fastop(ctxt, em_cmp);
2164 if (ctxt->eflags & EFLG_ZF) {
2165 /* Success: write back to memory. */
2166 ctxt->dst.val = ctxt->src.orig_val;
2168 /* Failure: write the value we saw to EAX. */
2169 ctxt->dst.type = OP_REG;
2170 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2171 ctxt->dst.val = ctxt->dst.orig_val;
2173 return X86EMUL_CONTINUE;
2176 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2178 int seg = ctxt->src2.val;
2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2184 rc = load_segment_descriptor(ctxt, sel, seg);
2185 if (rc != X86EMUL_CONTINUE)
2188 ctxt->dst.val = ctxt->src.val;
2193 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2194 struct desc_struct *cs, struct desc_struct *ss)
2196 cs->l = 0; /* will be adjusted later */
2197 set_desc_base(cs, 0); /* flat segment */
2198 cs->g = 1; /* 4kb granularity */
2199 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2200 cs->type = 0x0b; /* Read, Execute, Accessed */
2202 cs->dpl = 0; /* will be adjusted later */
2207 set_desc_base(ss, 0); /* flat segment */
2208 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2209 ss->g = 1; /* 4kb granularity */
2211 ss->type = 0x03; /* Read/Write, Accessed */
2212 ss->d = 1; /* 32bit stack segment */
2219 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2221 u32 eax, ebx, ecx, edx;
2224 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2225 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2226 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2227 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2230 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2232 const struct x86_emulate_ops *ops = ctxt->ops;
2233 u32 eax, ebx, ecx, edx;
2236 * syscall should always be enabled in longmode - so only become
2237 * vendor specific (cpuid) if other modes are active...
2239 if (ctxt->mode == X86EMUL_MODE_PROT64)
2244 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2246 * Intel ("GenuineIntel")
2247 * remark: Intel CPUs only support "syscall" in 64bit
2248 * longmode. Also an 64bit guest with a
2249 * 32bit compat-app running will #UD !! While this
2250 * behaviour can be fixed (by emulating) into AMD
2251 * response - CPUs of AMD can't behave like Intel.
2253 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2254 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2255 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2258 /* AMD ("AuthenticAMD") */
2259 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2260 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2261 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2264 /* AMD ("AMDisbetter!") */
2265 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2266 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2267 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2270 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2274 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2276 const struct x86_emulate_ops *ops = ctxt->ops;
2277 struct desc_struct cs, ss;
2282 /* syscall is not available in real mode */
2283 if (ctxt->mode == X86EMUL_MODE_REAL ||
2284 ctxt->mode == X86EMUL_MODE_VM86)
2285 return emulate_ud(ctxt);
2287 if (!(em_syscall_is_enabled(ctxt)))
2288 return emulate_ud(ctxt);
2290 ops->get_msr(ctxt, MSR_EFER, &efer);
2291 setup_syscalls_segments(ctxt, &cs, &ss);
2293 if (!(efer & EFER_SCE))
2294 return emulate_ud(ctxt);
2296 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2298 cs_sel = (u16)(msr_data & 0xfffc);
2299 ss_sel = (u16)(msr_data + 8);
2301 if (efer & EFER_LMA) {
2305 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2306 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2308 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2309 if (efer & EFER_LMA) {
2310 #ifdef CONFIG_X86_64
2311 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2314 ctxt->mode == X86EMUL_MODE_PROT64 ?
2315 MSR_LSTAR : MSR_CSTAR, &msr_data);
2316 ctxt->_eip = msr_data;
2318 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2319 ctxt->eflags &= ~msr_data;
2320 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2324 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2325 ctxt->_eip = (u32)msr_data;
2327 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2330 return X86EMUL_CONTINUE;
2333 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2335 const struct x86_emulate_ops *ops = ctxt->ops;
2336 struct desc_struct cs, ss;
2341 ops->get_msr(ctxt, MSR_EFER, &efer);
2342 /* inject #GP if in real mode */
2343 if (ctxt->mode == X86EMUL_MODE_REAL)
2344 return emulate_gp(ctxt, 0);
2347 * Not recognized on AMD in compat mode (but is recognized in legacy
2350 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2351 && !vendor_intel(ctxt))
2352 return emulate_ud(ctxt);
2354 /* sysenter/sysexit have not been tested in 64bit mode. */
2355 if (ctxt->mode == X86EMUL_MODE_PROT64)
2356 return X86EMUL_UNHANDLEABLE;
2358 setup_syscalls_segments(ctxt, &cs, &ss);
2360 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361 switch (ctxt->mode) {
2362 case X86EMUL_MODE_PROT32:
2363 if ((msr_data & 0xfffc) == 0x0)
2364 return emulate_gp(ctxt, 0);
2366 case X86EMUL_MODE_PROT64:
2367 if (msr_data == 0x0)
2368 return emulate_gp(ctxt, 0);
2374 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2375 cs_sel = (u16)msr_data;
2376 cs_sel &= ~SELECTOR_RPL_MASK;
2377 ss_sel = cs_sel + 8;
2378 ss_sel &= ~SELECTOR_RPL_MASK;
2379 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2384 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2385 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2387 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2388 ctxt->_eip = msr_data;
2390 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2391 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2393 return X86EMUL_CONTINUE;
2396 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2398 const struct x86_emulate_ops *ops = ctxt->ops;
2399 struct desc_struct cs, ss;
2400 u64 msr_data, rcx, rdx;
2402 u16 cs_sel = 0, ss_sel = 0;
2404 /* inject #GP if in real mode or Virtual 8086 mode */
2405 if (ctxt->mode == X86EMUL_MODE_REAL ||
2406 ctxt->mode == X86EMUL_MODE_VM86)
2407 return emulate_gp(ctxt, 0);
2409 setup_syscalls_segments(ctxt, &cs, &ss);
2411 if ((ctxt->rex_prefix & 0x8) != 0x0)
2412 usermode = X86EMUL_MODE_PROT64;
2414 usermode = X86EMUL_MODE_PROT32;
2416 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2421 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2423 case X86EMUL_MODE_PROT32:
2424 cs_sel = (u16)(msr_data + 16);
2425 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0);
2427 ss_sel = (u16)(msr_data + 24);
2431 case X86EMUL_MODE_PROT64:
2432 cs_sel = (u16)(msr_data + 32);
2433 if (msr_data == 0x0)
2434 return emulate_gp(ctxt, 0);
2435 ss_sel = cs_sel + 8;
2438 if (is_noncanonical_address(rcx) ||
2439 is_noncanonical_address(rdx))
2440 return emulate_gp(ctxt, 0);
2443 cs_sel |= SELECTOR_RPL_MASK;
2444 ss_sel |= SELECTOR_RPL_MASK;
2446 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2447 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2450 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2452 return X86EMUL_CONTINUE;
2455 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2458 if (ctxt->mode == X86EMUL_MODE_REAL)
2460 if (ctxt->mode == X86EMUL_MODE_VM86)
2462 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2463 return ctxt->ops->cpl(ctxt) > iopl;
2466 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2469 const struct x86_emulate_ops *ops = ctxt->ops;
2470 struct desc_struct tr_seg;
2473 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2474 unsigned mask = (1 << len) - 1;
2477 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2480 if (desc_limit_scaled(&tr_seg) < 103)
2482 base = get_desc_base(&tr_seg);
2483 #ifdef CONFIG_X86_64
2484 base |= ((u64)base3) << 32;
2486 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2487 if (r != X86EMUL_CONTINUE)
2489 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2491 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2492 if (r != X86EMUL_CONTINUE)
2494 if ((perm >> bit_idx) & mask)
2499 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2505 if (emulator_bad_iopl(ctxt))
2506 if (!emulator_io_port_access_allowed(ctxt, port, len))
2509 ctxt->perm_ok = true;
2514 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2515 struct tss_segment_16 *tss)
2517 tss->ip = ctxt->_eip;
2518 tss->flag = ctxt->eflags;
2519 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2520 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2521 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2522 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2523 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2524 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2525 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2526 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2528 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2529 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2530 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2531 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2532 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2535 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2536 struct tss_segment_16 *tss)
2541 ctxt->_eip = tss->ip;
2542 ctxt->eflags = tss->flag | 2;
2543 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2544 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2545 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2546 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2547 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2548 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2549 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2550 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2553 * SDM says that segment selectors are loaded before segment
2556 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2557 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2558 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2559 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2560 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2565 * Now load segment descriptors. If fault happens at this stage
2566 * it is handled in a context of new task
2568 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2570 if (ret != X86EMUL_CONTINUE)
2572 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2574 if (ret != X86EMUL_CONTINUE)
2576 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2578 if (ret != X86EMUL_CONTINUE)
2580 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2582 if (ret != X86EMUL_CONTINUE)
2584 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2586 if (ret != X86EMUL_CONTINUE)
2589 return X86EMUL_CONTINUE;
2592 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2593 u16 tss_selector, u16 old_tss_sel,
2594 ulong old_tss_base, struct desc_struct *new_desc)
2596 const struct x86_emulate_ops *ops = ctxt->ops;
2597 struct tss_segment_16 tss_seg;
2599 u32 new_tss_base = get_desc_base(new_desc);
2601 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2603 if (ret != X86EMUL_CONTINUE)
2606 save_state_to_tss16(ctxt, &tss_seg);
2608 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2610 if (ret != X86EMUL_CONTINUE)
2613 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2615 if (ret != X86EMUL_CONTINUE)
2618 if (old_tss_sel != 0xffff) {
2619 tss_seg.prev_task_link = old_tss_sel;
2621 ret = ops->write_std(ctxt, new_tss_base,
2622 &tss_seg.prev_task_link,
2623 sizeof tss_seg.prev_task_link,
2625 if (ret != X86EMUL_CONTINUE)
2629 return load_state_from_tss16(ctxt, &tss_seg);
2632 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2633 struct tss_segment_32 *tss)
2635 /* CR3 and ldt selector are not saved intentionally */
2636 tss->eip = ctxt->_eip;
2637 tss->eflags = ctxt->eflags;
2638 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2639 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2640 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2641 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2642 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2643 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2644 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2645 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2647 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2648 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2649 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2650 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2651 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2652 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2655 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2656 struct tss_segment_32 *tss)
2661 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2662 return emulate_gp(ctxt, 0);
2663 ctxt->_eip = tss->eip;
2664 ctxt->eflags = tss->eflags | 2;
2666 /* General purpose registers */
2667 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2668 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2669 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2670 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2671 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2672 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2673 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2674 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2677 * SDM says that segment selectors are loaded before segment
2678 * descriptors. This is important because CPL checks will
2681 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2682 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2683 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2684 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2685 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2686 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2687 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2690 * If we're switching between Protected Mode and VM86, we need to make
2691 * sure to update the mode before loading the segment descriptors so
2692 * that the selectors are interpreted correctly.
2694 if (ctxt->eflags & X86_EFLAGS_VM) {
2695 ctxt->mode = X86EMUL_MODE_VM86;
2698 ctxt->mode = X86EMUL_MODE_PROT32;
2703 * Now load segment descriptors. If fault happenes at this stage
2704 * it is handled in a context of new task
2706 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2708 if (ret != X86EMUL_CONTINUE)
2710 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2712 if (ret != X86EMUL_CONTINUE)
2714 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2716 if (ret != X86EMUL_CONTINUE)
2718 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2720 if (ret != X86EMUL_CONTINUE)
2722 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2724 if (ret != X86EMUL_CONTINUE)
2726 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2728 if (ret != X86EMUL_CONTINUE)
2730 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2732 if (ret != X86EMUL_CONTINUE)
2735 return X86EMUL_CONTINUE;
2738 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2739 u16 tss_selector, u16 old_tss_sel,
2740 ulong old_tss_base, struct desc_struct *new_desc)
2742 const struct x86_emulate_ops *ops = ctxt->ops;
2743 struct tss_segment_32 tss_seg;
2745 u32 new_tss_base = get_desc_base(new_desc);
2746 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2747 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2749 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2751 if (ret != X86EMUL_CONTINUE)
2754 save_state_to_tss32(ctxt, &tss_seg);
2756 /* Only GP registers and segment selectors are saved */
2757 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2758 ldt_sel_offset - eip_offset, &ctxt->exception);
2759 if (ret != X86EMUL_CONTINUE)
2762 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2764 if (ret != X86EMUL_CONTINUE)
2767 if (old_tss_sel != 0xffff) {
2768 tss_seg.prev_task_link = old_tss_sel;
2770 ret = ops->write_std(ctxt, new_tss_base,
2771 &tss_seg.prev_task_link,
2772 sizeof tss_seg.prev_task_link,
2774 if (ret != X86EMUL_CONTINUE)
2778 return load_state_from_tss32(ctxt, &tss_seg);
2781 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2782 u16 tss_selector, int idt_index, int reason,
2783 bool has_error_code, u32 error_code)
2785 const struct x86_emulate_ops *ops = ctxt->ops;
2786 struct desc_struct curr_tss_desc, next_tss_desc;
2788 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2789 ulong old_tss_base =
2790 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2794 /* FIXME: old_tss_base == ~0 ? */
2796 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2797 if (ret != X86EMUL_CONTINUE)
2799 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2800 if (ret != X86EMUL_CONTINUE)
2803 /* FIXME: check that next_tss_desc is tss */
2806 * Check privileges. The three cases are task switch caused by...
2808 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2809 * 2. Exception/IRQ/iret: No check is performed
2810 * 3. jmp/call to TSS/task-gate: No check is performed since the
2811 * hardware checks it before exiting.
2813 if (reason == TASK_SWITCH_GATE) {
2814 if (idt_index != -1) {
2815 /* Software interrupts */
2816 struct desc_struct task_gate_desc;
2819 ret = read_interrupt_descriptor(ctxt, idt_index,
2821 if (ret != X86EMUL_CONTINUE)
2824 dpl = task_gate_desc.dpl;
2825 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2826 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2830 desc_limit = desc_limit_scaled(&next_tss_desc);
2831 if (!next_tss_desc.p ||
2832 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2833 desc_limit < 0x2b)) {
2834 return emulate_ts(ctxt, tss_selector & 0xfffc);
2837 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2838 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2839 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2842 if (reason == TASK_SWITCH_IRET)
2843 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2845 /* set back link to prev task only if NT bit is set in eflags
2846 note that old_tss_sel is not used after this point */
2847 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2848 old_tss_sel = 0xffff;
2850 if (next_tss_desc.type & 8)
2851 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2852 old_tss_base, &next_tss_desc);
2854 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2855 old_tss_base, &next_tss_desc);
2856 if (ret != X86EMUL_CONTINUE)
2859 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2860 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2862 if (reason != TASK_SWITCH_IRET) {
2863 next_tss_desc.type |= (1 << 1); /* set busy flag */
2864 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2867 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2868 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2870 if (has_error_code) {
2871 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2872 ctxt->lock_prefix = 0;
2873 ctxt->src.val = (unsigned long) error_code;
2874 ret = em_push(ctxt);
2880 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2881 u16 tss_selector, int idt_index, int reason,
2882 bool has_error_code, u32 error_code)
2886 invalidate_registers(ctxt);
2887 ctxt->_eip = ctxt->eip;
2888 ctxt->dst.type = OP_NONE;
2890 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2891 has_error_code, error_code);
2893 if (rc == X86EMUL_CONTINUE) {
2894 ctxt->eip = ctxt->_eip;
2895 writeback_registers(ctxt);
2898 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2901 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2904 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2906 register_address_increment(ctxt, reg, df * op->bytes);
2907 op->addr.mem.ea = register_address(ctxt, reg);
2910 static int em_das(struct x86_emulate_ctxt *ctxt)
2913 bool af, cf, old_cf;
2915 cf = ctxt->eflags & X86_EFLAGS_CF;
2921 af = ctxt->eflags & X86_EFLAGS_AF;
2922 if ((al & 0x0f) > 9 || af) {
2924 cf = old_cf | (al >= 250);
2929 if (old_al > 0x99 || old_cf) {
2935 /* Set PF, ZF, SF */
2936 ctxt->src.type = OP_IMM;
2938 ctxt->src.bytes = 1;
2939 fastop(ctxt, em_or);
2940 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2942 ctxt->eflags |= X86_EFLAGS_CF;
2944 ctxt->eflags |= X86_EFLAGS_AF;
2945 return X86EMUL_CONTINUE;
2948 static int em_aam(struct x86_emulate_ctxt *ctxt)
2952 if (ctxt->src.val == 0)
2953 return emulate_de(ctxt);
2955 al = ctxt->dst.val & 0xff;
2956 ah = al / ctxt->src.val;
2957 al %= ctxt->src.val;
2959 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2961 /* Set PF, ZF, SF */
2962 ctxt->src.type = OP_IMM;
2964 ctxt->src.bytes = 1;
2965 fastop(ctxt, em_or);
2967 return X86EMUL_CONTINUE;
2970 static int em_aad(struct x86_emulate_ctxt *ctxt)
2972 u8 al = ctxt->dst.val & 0xff;
2973 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2975 al = (al + (ah * ctxt->src.val)) & 0xff;
2977 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2979 /* Set PF, ZF, SF */
2980 ctxt->src.type = OP_IMM;
2982 ctxt->src.bytes = 1;
2983 fastop(ctxt, em_or);
2985 return X86EMUL_CONTINUE;
2988 static int em_call(struct x86_emulate_ctxt *ctxt)
2991 long rel = ctxt->src.val;
2993 ctxt->src.val = (unsigned long)ctxt->_eip;
2994 rc = jmp_rel(ctxt, rel);
2995 if (rc != X86EMUL_CONTINUE)
2997 return em_push(ctxt);
3000 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3005 struct desc_struct old_desc, new_desc;
3006 const struct x86_emulate_ops *ops = ctxt->ops;
3007 int cpl = ctxt->ops->cpl(ctxt);
3009 old_eip = ctxt->_eip;
3010 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3012 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3013 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3015 if (rc != X86EMUL_CONTINUE)
3016 return X86EMUL_CONTINUE;
3018 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3019 if (rc != X86EMUL_CONTINUE)
3022 ctxt->src.val = old_cs;
3024 if (rc != X86EMUL_CONTINUE)
3027 ctxt->src.val = old_eip;
3029 /* If we failed, we tainted the memory, but the very least we should
3031 if (rc != X86EMUL_CONTINUE)
3035 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3040 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3045 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3046 if (rc != X86EMUL_CONTINUE)
3048 rc = assign_eip_near(ctxt, eip);
3049 if (rc != X86EMUL_CONTINUE)
3051 rsp_increment(ctxt, ctxt->src.val);
3052 return X86EMUL_CONTINUE;
3055 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3057 /* Write back the register source. */
3058 ctxt->src.val = ctxt->dst.val;
3059 write_register_operand(&ctxt->src);
3061 /* Write back the memory destination with implicit LOCK prefix. */
3062 ctxt->dst.val = ctxt->src.orig_val;
3063 ctxt->lock_prefix = 1;
3064 return X86EMUL_CONTINUE;
3067 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3069 ctxt->dst.val = ctxt->src2.val;
3070 return fastop(ctxt, em_imul);
3073 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3075 ctxt->dst.type = OP_REG;
3076 ctxt->dst.bytes = ctxt->src.bytes;
3077 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3078 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3080 return X86EMUL_CONTINUE;
3083 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3087 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3088 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3089 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3090 return X86EMUL_CONTINUE;
3093 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3097 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3098 return emulate_gp(ctxt, 0);
3099 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3100 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3101 return X86EMUL_CONTINUE;
3104 static int em_mov(struct x86_emulate_ctxt *ctxt)
3106 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3107 return X86EMUL_CONTINUE;
3110 #define FFL(x) bit(X86_FEATURE_##x)
3112 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3114 u32 ebx, ecx, edx, eax = 1;
3118 * Check MOVBE is set in the guest-visible CPUID leaf.
3120 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3121 if (!(ecx & FFL(MOVBE)))
3122 return emulate_ud(ctxt);
3124 switch (ctxt->op_bytes) {
3127 * From MOVBE definition: "...When the operand size is 16 bits,
3128 * the upper word of the destination register remains unchanged
3131 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3132 * rules so we have to do the operation almost per hand.
3134 tmp = (u16)ctxt->src.val;
3135 ctxt->dst.val &= ~0xffffUL;
3136 ctxt->dst.val |= (unsigned long)swab16(tmp);
3139 ctxt->dst.val = swab32((u32)ctxt->src.val);
3142 ctxt->dst.val = swab64(ctxt->src.val);
3147 return X86EMUL_CONTINUE;
3150 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3152 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3153 return emulate_gp(ctxt, 0);
3155 /* Disable writeback. */
3156 ctxt->dst.type = OP_NONE;
3157 return X86EMUL_CONTINUE;
3160 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3164 if (ctxt->mode == X86EMUL_MODE_PROT64)
3165 val = ctxt->src.val & ~0ULL;
3167 val = ctxt->src.val & ~0U;
3169 /* #UD condition is already handled. */
3170 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3171 return emulate_gp(ctxt, 0);
3173 /* Disable writeback. */
3174 ctxt->dst.type = OP_NONE;
3175 return X86EMUL_CONTINUE;
3178 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3182 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3183 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3184 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3185 return emulate_gp(ctxt, 0);
3187 return X86EMUL_CONTINUE;
3190 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3194 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3195 return emulate_gp(ctxt, 0);
3197 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3198 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3199 return X86EMUL_CONTINUE;
3202 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3204 if (ctxt->modrm_reg > VCPU_SREG_GS)
3205 return emulate_ud(ctxt);
3207 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3208 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3209 ctxt->dst.bytes = 2;
3210 return X86EMUL_CONTINUE;
3213 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3215 u16 sel = ctxt->src.val;
3217 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3218 return emulate_ud(ctxt);
3220 if (ctxt->modrm_reg == VCPU_SREG_SS)
3221 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3223 /* Disable writeback. */
3224 ctxt->dst.type = OP_NONE;
3225 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3228 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3230 u16 sel = ctxt->src.val;
3232 /* Disable writeback. */
3233 ctxt->dst.type = OP_NONE;
3234 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3237 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3239 u16 sel = ctxt->src.val;
3241 /* Disable writeback. */
3242 ctxt->dst.type = OP_NONE;
3243 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3246 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3251 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3252 if (rc == X86EMUL_CONTINUE)
3253 ctxt->ops->invlpg(ctxt, linear);
3254 /* Disable writeback. */
3255 ctxt->dst.type = OP_NONE;
3256 return X86EMUL_CONTINUE;
3259 static int em_clts(struct x86_emulate_ctxt *ctxt)
3263 cr0 = ctxt->ops->get_cr(ctxt, 0);
3265 ctxt->ops->set_cr(ctxt, 0, cr0);
3266 return X86EMUL_CONTINUE;
3269 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3271 int rc = ctxt->ops->fix_hypercall(ctxt);
3273 if (rc != X86EMUL_CONTINUE)
3276 /* Let the processor re-execute the fixed hypercall */
3277 ctxt->_eip = ctxt->eip;
3278 /* Disable writeback. */
3279 ctxt->dst.type = OP_NONE;
3280 return X86EMUL_CONTINUE;
3283 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3284 void (*get)(struct x86_emulate_ctxt *ctxt,
3285 struct desc_ptr *ptr))
3287 struct desc_ptr desc_ptr;
3289 if (ctxt->mode == X86EMUL_MODE_PROT64)
3291 get(ctxt, &desc_ptr);
3292 if (ctxt->op_bytes == 2) {
3294 desc_ptr.address &= 0x00ffffff;
3296 /* Disable writeback. */
3297 ctxt->dst.type = OP_NONE;
3298 return segmented_write(ctxt, ctxt->dst.addr.mem,
3299 &desc_ptr, 2 + ctxt->op_bytes);
3302 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3304 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3307 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3309 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3312 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3314 struct desc_ptr desc_ptr;
3317 if (ctxt->mode == X86EMUL_MODE_PROT64)
3319 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3320 &desc_ptr.size, &desc_ptr.address,
3322 if (rc != X86EMUL_CONTINUE)
3324 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3325 is_noncanonical_address(desc_ptr.address))
3326 return emulate_gp(ctxt, 0);
3328 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3330 ctxt->ops->set_idt(ctxt, &desc_ptr);
3331 /* Disable writeback. */
3332 ctxt->dst.type = OP_NONE;
3333 return X86EMUL_CONTINUE;
3336 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3338 return em_lgdt_lidt(ctxt, true);
3341 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3345 rc = ctxt->ops->fix_hypercall(ctxt);
3347 /* Disable writeback. */
3348 ctxt->dst.type = OP_NONE;
3352 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3354 return em_lgdt_lidt(ctxt, false);
3357 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3359 if (ctxt->dst.type == OP_MEM)
3360 ctxt->dst.bytes = 2;
3361 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3362 return X86EMUL_CONTINUE;
3365 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3367 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3368 | (ctxt->src.val & 0x0f));
3369 ctxt->dst.type = OP_NONE;
3370 return X86EMUL_CONTINUE;
3373 static int em_loop(struct x86_emulate_ctxt *ctxt)
3375 int rc = X86EMUL_CONTINUE;
3377 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3378 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3379 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3380 rc = jmp_rel(ctxt, ctxt->src.val);
3385 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3387 int rc = X86EMUL_CONTINUE;
3389 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3390 rc = jmp_rel(ctxt, ctxt->src.val);
3395 static int em_in(struct x86_emulate_ctxt *ctxt)
3397 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3399 return X86EMUL_IO_NEEDED;
3401 return X86EMUL_CONTINUE;
3404 static int em_out(struct x86_emulate_ctxt *ctxt)
3406 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3408 /* Disable writeback. */
3409 ctxt->dst.type = OP_NONE;
3410 return X86EMUL_CONTINUE;
3413 static int em_cli(struct x86_emulate_ctxt *ctxt)
3415 if (emulator_bad_iopl(ctxt))
3416 return emulate_gp(ctxt, 0);
3418 ctxt->eflags &= ~X86_EFLAGS_IF;
3419 return X86EMUL_CONTINUE;
3422 static int em_sti(struct x86_emulate_ctxt *ctxt)
3424 if (emulator_bad_iopl(ctxt))
3425 return emulate_gp(ctxt, 0);
3427 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3428 ctxt->eflags |= X86_EFLAGS_IF;
3429 return X86EMUL_CONTINUE;
3432 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3434 u32 eax, ebx, ecx, edx;
3436 eax = reg_read(ctxt, VCPU_REGS_RAX);
3437 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3438 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3439 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3440 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3441 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3442 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3443 return X86EMUL_CONTINUE;
3446 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3450 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3451 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3453 ctxt->eflags &= ~0xffUL;
3454 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3455 return X86EMUL_CONTINUE;
3458 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3460 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3461 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3462 return X86EMUL_CONTINUE;
3465 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3467 switch (ctxt->op_bytes) {
3468 #ifdef CONFIG_X86_64
3470 asm("bswap %0" : "+r"(ctxt->dst.val));
3474 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3477 return X86EMUL_CONTINUE;
3480 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3482 /* emulating clflush regardless of cpuid */
3483 return X86EMUL_CONTINUE;
3486 static bool valid_cr(int nr)
3498 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3500 if (!valid_cr(ctxt->modrm_reg))
3501 return emulate_ud(ctxt);
3503 return X86EMUL_CONTINUE;
3506 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3508 u64 new_val = ctxt->src.val64;
3509 int cr = ctxt->modrm_reg;
3512 static u64 cr_reserved_bits[] = {
3513 0xffffffff00000000ULL,
3514 0, 0, 0, /* CR3 checked later */
3521 return emulate_ud(ctxt);
3523 if (new_val & cr_reserved_bits[cr])
3524 return emulate_gp(ctxt, 0);
3529 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3530 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3531 return emulate_gp(ctxt, 0);
3533 cr4 = ctxt->ops->get_cr(ctxt, 4);
3534 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3536 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3537 !(cr4 & X86_CR4_PAE))
3538 return emulate_gp(ctxt, 0);
3545 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3546 if (efer & EFER_LMA)
3547 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3550 return emulate_gp(ctxt, 0);
3555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3557 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3558 return emulate_gp(ctxt, 0);
3564 return X86EMUL_CONTINUE;
3567 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3571 ctxt->ops->get_dr(ctxt, 7, &dr7);
3573 /* Check if DR7.Global_Enable is set */
3574 return dr7 & (1 << 13);
3577 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3579 int dr = ctxt->modrm_reg;
3583 return emulate_ud(ctxt);
3585 cr4 = ctxt->ops->get_cr(ctxt, 4);
3586 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3587 return emulate_ud(ctxt);
3589 if (check_dr7_gd(ctxt)) {
3592 ctxt->ops->get_dr(ctxt, 6, &dr6);
3594 dr6 |= DR6_BD | DR6_RTM;
3595 ctxt->ops->set_dr(ctxt, 6, dr6);
3596 return emulate_db(ctxt);
3599 return X86EMUL_CONTINUE;
3602 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3604 u64 new_val = ctxt->src.val64;
3605 int dr = ctxt->modrm_reg;
3607 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3608 return emulate_gp(ctxt, 0);
3610 return check_dr_read(ctxt);
3613 static int check_svme(struct x86_emulate_ctxt *ctxt)
3617 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3619 if (!(efer & EFER_SVME))
3620 return emulate_ud(ctxt);
3622 return X86EMUL_CONTINUE;
3625 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3627 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3629 /* Valid physical address? */
3630 if (rax & 0xffff000000000000ULL)
3631 return emulate_gp(ctxt, 0);
3633 return check_svme(ctxt);
3636 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3638 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3640 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3641 return emulate_ud(ctxt);
3643 return X86EMUL_CONTINUE;
3646 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3648 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3649 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3651 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3652 ctxt->ops->check_pmc(ctxt, rcx))
3653 return emulate_gp(ctxt, 0);
3655 return X86EMUL_CONTINUE;
3658 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3660 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3661 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3662 return emulate_gp(ctxt, 0);
3664 return X86EMUL_CONTINUE;
3667 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3669 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3670 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3671 return emulate_gp(ctxt, 0);
3673 return X86EMUL_CONTINUE;
3676 #define D(_y) { .flags = (_y) }
3677 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3678 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3679 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3680 #define N D(NotImpl)
3681 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3682 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3683 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3684 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3685 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3686 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3687 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3688 #define II(_f, _e, _i) \
3689 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3690 #define IIP(_f, _e, _i, _p) \
3691 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3692 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3693 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3695 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3696 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3697 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3698 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3699 #define I2bvIP(_f, _e, _i, _p) \
3700 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3702 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3703 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3704 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3706 static const struct opcode group7_rm0[] = {
3708 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3712 static const struct opcode group7_rm1[] = {
3713 DI(SrcNone | Priv, monitor),
3714 DI(SrcNone | Priv, mwait),
3718 static const struct opcode group7_rm3[] = {
3719 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3720 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3721 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3722 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3723 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3724 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3725 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3726 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3729 static const struct opcode group7_rm7[] = {
3731 DIP(SrcNone, rdtscp, check_rdtsc),
3735 static const struct opcode group1[] = {
3737 F(Lock | PageTable, em_or),
3740 F(Lock | PageTable, em_and),
3746 static const struct opcode group1A[] = {
3747 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3750 static const struct opcode group2[] = {
3751 F(DstMem | ModRM, em_rol),
3752 F(DstMem | ModRM, em_ror),
3753 F(DstMem | ModRM, em_rcl),
3754 F(DstMem | ModRM, em_rcr),
3755 F(DstMem | ModRM, em_shl),
3756 F(DstMem | ModRM, em_shr),
3757 F(DstMem | ModRM, em_shl),
3758 F(DstMem | ModRM, em_sar),
3761 static const struct opcode group3[] = {
3762 F(DstMem | SrcImm | NoWrite, em_test),
3763 F(DstMem | SrcImm | NoWrite, em_test),
3764 F(DstMem | SrcNone | Lock, em_not),
3765 F(DstMem | SrcNone | Lock, em_neg),
3766 F(DstXacc | Src2Mem, em_mul_ex),
3767 F(DstXacc | Src2Mem, em_imul_ex),
3768 F(DstXacc | Src2Mem, em_div_ex),
3769 F(DstXacc | Src2Mem, em_idiv_ex),
3772 static const struct opcode group4[] = {
3773 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3774 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3778 static const struct opcode group5[] = {
3779 F(DstMem | SrcNone | Lock, em_inc),
3780 F(DstMem | SrcNone | Lock, em_dec),
3781 I(SrcMem | NearBranch, em_call_near_abs),
3782 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3783 I(SrcMem | NearBranch, em_jmp_abs),
3784 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3785 I(SrcMem | Stack, em_push), D(Undefined),
3788 static const struct opcode group6[] = {
3791 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3792 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3796 static const struct group_dual group7 = { {
3797 II(Mov | DstMem, em_sgdt, sgdt),
3798 II(Mov | DstMem, em_sidt, sidt),
3799 II(SrcMem | Priv, em_lgdt, lgdt),
3800 II(SrcMem | Priv, em_lidt, lidt),
3801 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3802 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3803 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3807 N, EXT(0, group7_rm3),
3808 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3809 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3813 static const struct opcode group8[] = {
3815 F(DstMem | SrcImmByte | NoWrite, em_bt),
3816 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3817 F(DstMem | SrcImmByte | Lock, em_btr),
3818 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3821 static const struct group_dual group9 = { {
3822 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3824 N, N, N, N, N, N, N, N,
3827 static const struct opcode group11[] = {
3828 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3832 static const struct gprefix pfx_0f_ae_7 = {
3833 I(SrcMem | ByteOp, em_clflush), N, N, N,
3836 static const struct group_dual group15 = { {
3837 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3839 N, N, N, N, N, N, N, N,
3842 static const struct gprefix pfx_0f_6f_0f_7f = {
3843 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3846 static const struct instr_dual instr_dual_0f_2b = {
3850 static const struct gprefix pfx_0f_2b = {
3851 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3854 static const struct gprefix pfx_0f_28_0f_29 = {
3855 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3858 static const struct gprefix pfx_0f_e7 = {
3859 N, I(Sse, em_mov), N, N,
3862 static const struct escape escape_d9 = { {
3863 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3872 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3876 N, N, N, N, N, N, N, N,
3878 N, N, N, N, N, N, N, N,
3880 N, N, N, N, N, N, N, N,
3883 static const struct escape escape_db = { {
3884 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3893 N, N, N, N, N, N, N, N,
3895 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3899 N, N, N, N, N, N, N, N,
3901 N, N, N, N, N, N, N, N,
3904 static const struct escape escape_dd = { {
3905 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3914 N, N, N, N, N, N, N, N,
3916 N, N, N, N, N, N, N, N,
3918 N, N, N, N, N, N, N, N,
3920 N, N, N, N, N, N, N, N,
3922 N, N, N, N, N, N, N, N,
3925 static const struct instr_dual instr_dual_0f_c3 = {
3926 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3929 static const struct opcode opcode_table[256] = {
3931 F6ALU(Lock, em_add),
3932 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3933 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3935 F6ALU(Lock | PageTable, em_or),
3936 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3939 F6ALU(Lock, em_adc),
3940 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3941 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3943 F6ALU(Lock, em_sbb),
3944 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3945 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3947 F6ALU(Lock | PageTable, em_and), N, N,
3949 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3951 F6ALU(Lock, em_xor), N, N,
3953 F6ALU(NoWrite, em_cmp), N, N,
3955 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3957 X8(I(SrcReg | Stack, em_push)),
3959 X8(I(DstReg | Stack, em_pop)),
3961 I(ImplicitOps | Stack | No64, em_pusha),
3962 I(ImplicitOps | Stack | No64, em_popa),
3963 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3966 I(SrcImm | Mov | Stack, em_push),
3967 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3968 I(SrcImmByte | Mov | Stack, em_push),
3969 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3970 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3971 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3973 X16(D(SrcImmByte | NearBranch)),
3975 G(ByteOp | DstMem | SrcImm, group1),
3976 G(DstMem | SrcImm, group1),
3977 G(ByteOp | DstMem | SrcImm | No64, group1),
3978 G(DstMem | SrcImmByte, group1),
3979 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3980 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3982 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3983 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3984 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3985 D(ModRM | SrcMem | NoAccess | DstReg),
3986 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3989 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3991 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3992 I(SrcImmFAddr | No64, em_call_far), N,
3993 II(ImplicitOps | Stack, em_pushf, pushf),
3994 II(ImplicitOps | Stack, em_popf, popf),
3995 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3997 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3998 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3999 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4000 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4002 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4003 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4004 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4005 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4007 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4009 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4011 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4012 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4013 I(ImplicitOps | NearBranch, em_ret),
4014 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4015 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4016 G(ByteOp, group11), G(0, group11),
4018 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4019 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4020 I(ImplicitOps | Stack, em_ret_far),
4021 D(ImplicitOps), DI(SrcImmByte, intn),
4022 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4024 G(Src2One | ByteOp, group2), G(Src2One, group2),
4025 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4026 I(DstAcc | SrcImmUByte | No64, em_aam),
4027 I(DstAcc | SrcImmUByte | No64, em_aad),
4028 F(DstAcc | ByteOp | No64, em_salc),
4029 I(DstAcc | SrcXLat | ByteOp, em_mov),
4031 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4033 X3(I(SrcImmByte | NearBranch, em_loop)),
4034 I(SrcImmByte | NearBranch, em_jcxz),
4035 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4036 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4038 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4039 I(SrcImmFAddr | No64, em_jmp_far),
4040 D(SrcImmByte | ImplicitOps | NearBranch),
4041 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4042 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4044 N, DI(ImplicitOps, icebp), N, N,
4045 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4046 G(ByteOp, group3), G(0, group3),
4048 D(ImplicitOps), D(ImplicitOps),
4049 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4050 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4053 static const struct opcode twobyte_table[256] = {
4055 G(0, group6), GD(0, &group7), N, N,
4056 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4057 II(ImplicitOps | Priv, em_clts, clts), N,
4058 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4059 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4061 N, N, N, N, N, N, N, N,
4062 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4063 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4065 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4066 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4067 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4069 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4072 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4073 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4074 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4077 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4078 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4079 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4080 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4081 I(ImplicitOps | EmulateOnUD, em_sysenter),
4082 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4084 N, N, N, N, N, N, N, N,
4086 X16(D(DstReg | SrcMem | ModRM)),
4088 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4093 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4098 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4100 X16(D(SrcImm | NearBranch)),
4102 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4104 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4105 II(ImplicitOps, em_cpuid, cpuid),
4106 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4107 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4108 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4110 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4111 DI(ImplicitOps, rsm),
4112 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4113 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4114 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4115 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4117 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4118 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4119 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4120 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4121 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4122 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4126 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4127 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4128 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4130 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4131 N, ID(0, &instr_dual_0f_c3),
4132 N, N, N, GD(0, &group9),
4134 X8(I(DstReg, em_bswap)),
4136 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4138 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4139 N, N, N, N, N, N, N, N,
4141 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4144 static const struct instr_dual instr_dual_0f_38_f0 = {
4145 I(DstReg | SrcMem | Mov, em_movbe), N
4148 static const struct instr_dual instr_dual_0f_38_f1 = {
4149 I(DstMem | SrcReg | Mov, em_movbe), N
4152 static const struct gprefix three_byte_0f_38_f0 = {
4153 ID(0, &instr_dual_0f_38_f0), N, N, N
4156 static const struct gprefix three_byte_0f_38_f1 = {
4157 ID(0, &instr_dual_0f_38_f1), N, N, N
4161 * Insns below are selected by the prefix which indexed by the third opcode
4164 static const struct opcode opcode_map_0f_38[256] = {
4166 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4168 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4170 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4171 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4190 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4194 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4200 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4201 unsigned size, bool sign_extension)
4203 int rc = X86EMUL_CONTINUE;
4207 op->addr.mem.ea = ctxt->_eip;
4208 /* NB. Immediates are sign-extended as necessary. */
4209 switch (op->bytes) {
4211 op->val = insn_fetch(s8, ctxt);
4214 op->val = insn_fetch(s16, ctxt);
4217 op->val = insn_fetch(s32, ctxt);
4220 op->val = insn_fetch(s64, ctxt);
4223 if (!sign_extension) {
4224 switch (op->bytes) {
4232 op->val &= 0xffffffff;
4240 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4243 int rc = X86EMUL_CONTINUE;
4247 decode_register_operand(ctxt, op);
4250 rc = decode_imm(ctxt, op, 1, false);
4253 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4257 if (ctxt->d & BitOp)
4258 fetch_bit_operand(ctxt);
4259 op->orig_val = op->val;
4262 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4266 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4267 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4268 fetch_register_operand(op);
4269 op->orig_val = op->val;
4273 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4274 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4275 fetch_register_operand(op);
4276 op->orig_val = op->val;
4279 if (ctxt->d & ByteOp) {
4284 op->bytes = ctxt->op_bytes;
4285 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4286 fetch_register_operand(op);
4287 op->orig_val = op->val;
4291 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4293 register_address(ctxt, VCPU_REGS_RDI);
4294 op->addr.mem.seg = VCPU_SREG_ES;
4301 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4302 fetch_register_operand(op);
4307 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4310 rc = decode_imm(ctxt, op, 1, true);
4318 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4321 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4324 ctxt->memop.bytes = 1;
4325 if (ctxt->memop.type == OP_REG) {
4326 ctxt->memop.addr.reg = decode_register(ctxt,
4327 ctxt->modrm_rm, true);
4328 fetch_register_operand(&ctxt->memop);
4332 ctxt->memop.bytes = 2;
4335 ctxt->memop.bytes = 4;
4338 rc = decode_imm(ctxt, op, 2, false);
4341 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4345 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4347 register_address(ctxt, VCPU_REGS_RSI);
4348 op->addr.mem.seg = ctxt->seg_override;
4354 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4357 reg_read(ctxt, VCPU_REGS_RBX) +
4358 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4359 op->addr.mem.seg = ctxt->seg_override;
4364 op->addr.mem.ea = ctxt->_eip;
4365 op->bytes = ctxt->op_bytes + 2;
4366 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4369 ctxt->memop.bytes = ctxt->op_bytes + 2;
4373 op->val = VCPU_SREG_ES;
4377 op->val = VCPU_SREG_CS;
4381 op->val = VCPU_SREG_SS;
4385 op->val = VCPU_SREG_DS;
4389 op->val = VCPU_SREG_FS;
4393 op->val = VCPU_SREG_GS;
4396 /* Special instructions do their own operand decoding. */
4398 op->type = OP_NONE; /* Disable writeback. */
4406 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4408 int rc = X86EMUL_CONTINUE;
4409 int mode = ctxt->mode;
4410 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4411 bool op_prefix = false;
4412 bool has_seg_override = false;
4413 struct opcode opcode;
4415 ctxt->memop.type = OP_NONE;
4416 ctxt->memopp = NULL;
4417 ctxt->_eip = ctxt->eip;
4418 ctxt->fetch.ptr = ctxt->fetch.data;
4419 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4420 ctxt->opcode_len = 1;
4422 memcpy(ctxt->fetch.data, insn, insn_len);
4424 rc = __do_insn_fetch_bytes(ctxt, 1);
4425 if (rc != X86EMUL_CONTINUE)
4430 case X86EMUL_MODE_REAL:
4431 case X86EMUL_MODE_VM86:
4432 case X86EMUL_MODE_PROT16:
4433 def_op_bytes = def_ad_bytes = 2;
4435 case X86EMUL_MODE_PROT32:
4436 def_op_bytes = def_ad_bytes = 4;
4438 #ifdef CONFIG_X86_64
4439 case X86EMUL_MODE_PROT64:
4445 return EMULATION_FAILED;
4448 ctxt->op_bytes = def_op_bytes;
4449 ctxt->ad_bytes = def_ad_bytes;
4451 /* Legacy prefixes. */
4453 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4454 case 0x66: /* operand-size override */
4456 /* switch between 2/4 bytes */
4457 ctxt->op_bytes = def_op_bytes ^ 6;
4459 case 0x67: /* address-size override */
4460 if (mode == X86EMUL_MODE_PROT64)
4461 /* switch between 4/8 bytes */
4462 ctxt->ad_bytes = def_ad_bytes ^ 12;
4464 /* switch between 2/4 bytes */
4465 ctxt->ad_bytes = def_ad_bytes ^ 6;
4467 case 0x26: /* ES override */
4468 case 0x2e: /* CS override */
4469 case 0x36: /* SS override */
4470 case 0x3e: /* DS override */
4471 has_seg_override = true;
4472 ctxt->seg_override = (ctxt->b >> 3) & 3;
4474 case 0x64: /* FS override */
4475 case 0x65: /* GS override */
4476 has_seg_override = true;
4477 ctxt->seg_override = ctxt->b & 7;
4479 case 0x40 ... 0x4f: /* REX */
4480 if (mode != X86EMUL_MODE_PROT64)
4482 ctxt->rex_prefix = ctxt->b;
4484 case 0xf0: /* LOCK */
4485 ctxt->lock_prefix = 1;
4487 case 0xf2: /* REPNE/REPNZ */
4488 case 0xf3: /* REP/REPE/REPZ */
4489 ctxt->rep_prefix = ctxt->b;
4495 /* Any legacy prefix after a REX prefix nullifies its effect. */
4497 ctxt->rex_prefix = 0;
4503 if (ctxt->rex_prefix & 8)
4504 ctxt->op_bytes = 8; /* REX.W */
4506 /* Opcode byte(s). */
4507 opcode = opcode_table[ctxt->b];
4508 /* Two-byte opcode? */
4509 if (ctxt->b == 0x0f) {
4510 ctxt->opcode_len = 2;
4511 ctxt->b = insn_fetch(u8, ctxt);
4512 opcode = twobyte_table[ctxt->b];
4514 /* 0F_38 opcode map */
4515 if (ctxt->b == 0x38) {
4516 ctxt->opcode_len = 3;
4517 ctxt->b = insn_fetch(u8, ctxt);
4518 opcode = opcode_map_0f_38[ctxt->b];
4521 ctxt->d = opcode.flags;
4523 if (ctxt->d & ModRM)
4524 ctxt->modrm = insn_fetch(u8, ctxt);
4526 /* vex-prefix instructions are not implemented */
4527 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4528 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4532 while (ctxt->d & GroupMask) {
4533 switch (ctxt->d & GroupMask) {
4535 goffset = (ctxt->modrm >> 3) & 7;
4536 opcode = opcode.u.group[goffset];
4539 goffset = (ctxt->modrm >> 3) & 7;
4540 if ((ctxt->modrm >> 6) == 3)
4541 opcode = opcode.u.gdual->mod3[goffset];
4543 opcode = opcode.u.gdual->mod012[goffset];
4546 goffset = ctxt->modrm & 7;
4547 opcode = opcode.u.group[goffset];
4550 if (ctxt->rep_prefix && op_prefix)
4551 return EMULATION_FAILED;
4552 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4553 switch (simd_prefix) {
4554 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4555 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4556 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4557 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4561 if (ctxt->modrm > 0xbf)
4562 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4564 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4567 if ((ctxt->modrm >> 6) == 3)
4568 opcode = opcode.u.idual->mod3;
4570 opcode = opcode.u.idual->mod012;
4573 return EMULATION_FAILED;
4576 ctxt->d &= ~(u64)GroupMask;
4577 ctxt->d |= opcode.flags;
4582 return EMULATION_FAILED;
4584 ctxt->execute = opcode.u.execute;
4586 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4587 return EMULATION_FAILED;
4589 if (unlikely(ctxt->d &
4590 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4593 * These are copied unconditionally here, and checked unconditionally
4594 * in x86_emulate_insn.
4596 ctxt->check_perm = opcode.check_perm;
4597 ctxt->intercept = opcode.intercept;
4599 if (ctxt->d & NotImpl)
4600 return EMULATION_FAILED;
4602 if (mode == X86EMUL_MODE_PROT64) {
4603 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4605 else if (ctxt->d & NearBranch)
4609 if (ctxt->d & Op3264) {
4610 if (mode == X86EMUL_MODE_PROT64)
4616 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4620 ctxt->op_bytes = 16;
4621 else if (ctxt->d & Mmx)
4625 /* ModRM and SIB bytes. */
4626 if (ctxt->d & ModRM) {
4627 rc = decode_modrm(ctxt, &ctxt->memop);
4628 if (!has_seg_override) {
4629 has_seg_override = true;
4630 ctxt->seg_override = ctxt->modrm_seg;
4632 } else if (ctxt->d & MemAbs)
4633 rc = decode_abs(ctxt, &ctxt->memop);
4634 if (rc != X86EMUL_CONTINUE)
4637 if (!has_seg_override)
4638 ctxt->seg_override = VCPU_SREG_DS;
4640 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4643 * Decode and fetch the source operand: register, memory
4646 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4647 if (rc != X86EMUL_CONTINUE)
4651 * Decode and fetch the second source operand: register, memory
4654 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4655 if (rc != X86EMUL_CONTINUE)
4658 /* Decode and fetch the destination operand: register or memory. */
4659 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4661 if (ctxt->rip_relative)
4662 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4663 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4666 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4669 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4671 return ctxt->d & PageTable;
4674 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4676 /* The second termination condition only applies for REPE
4677 * and REPNE. Test if the repeat string operation prefix is
4678 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4679 * corresponding termination condition according to:
4680 * - if REPE/REPZ and ZF = 0 then done
4681 * - if REPNE/REPNZ and ZF = 1 then done
4683 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4684 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4685 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4686 ((ctxt->eflags & EFLG_ZF) == 0))
4687 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4688 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4694 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4698 ctxt->ops->get_fpu(ctxt);
4699 asm volatile("1: fwait \n\t"
4701 ".pushsection .fixup,\"ax\" \n\t"
4703 "movb $1, %[fault] \n\t"
4706 _ASM_EXTABLE(1b, 3b)
4707 : [fault]"+qm"(fault));
4708 ctxt->ops->put_fpu(ctxt);
4710 if (unlikely(fault))
4711 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4713 return X86EMUL_CONTINUE;
4716 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4719 if (op->type == OP_MM)
4720 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4723 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4725 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4726 if (!(ctxt->d & ByteOp))
4727 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4728 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4729 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4731 : "c"(ctxt->src2.val));
4732 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4733 if (!fop) /* exception is returned in fop variable */
4734 return emulate_de(ctxt);
4735 return X86EMUL_CONTINUE;
4738 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4740 memset(&ctxt->rip_relative, 0,
4741 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4743 ctxt->io_read.pos = 0;
4744 ctxt->io_read.end = 0;
4745 ctxt->mem_read.end = 0;
4748 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4750 const struct x86_emulate_ops *ops = ctxt->ops;
4751 int rc = X86EMUL_CONTINUE;
4752 int saved_dst_type = ctxt->dst.type;
4754 ctxt->mem_read.pos = 0;
4756 /* LOCK prefix is allowed only with some instructions */
4757 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4758 rc = emulate_ud(ctxt);
4762 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4763 rc = emulate_ud(ctxt);
4767 if (unlikely(ctxt->d &
4768 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4769 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4770 (ctxt->d & Undefined)) {
4771 rc = emulate_ud(ctxt);
4775 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4776 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4777 rc = emulate_ud(ctxt);
4781 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4782 rc = emulate_nm(ctxt);
4786 if (ctxt->d & Mmx) {
4787 rc = flush_pending_x87_faults(ctxt);
4788 if (rc != X86EMUL_CONTINUE)
4791 * Now that we know the fpu is exception safe, we can fetch
4794 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4795 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4796 if (!(ctxt->d & Mov))
4797 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4800 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4801 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4802 X86_ICPT_PRE_EXCEPT);
4803 if (rc != X86EMUL_CONTINUE)
4807 /* Instruction can only be executed in protected mode */
4808 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4809 rc = emulate_ud(ctxt);
4813 /* Privileged instruction can be executed only in CPL=0 */
4814 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4815 if (ctxt->d & PrivUD)
4816 rc = emulate_ud(ctxt);
4818 rc = emulate_gp(ctxt, 0);
4822 /* Do instruction specific permission checks */
4823 if (ctxt->d & CheckPerm) {
4824 rc = ctxt->check_perm(ctxt);
4825 if (rc != X86EMUL_CONTINUE)
4829 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4830 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4831 X86_ICPT_POST_EXCEPT);
4832 if (rc != X86EMUL_CONTINUE)
4836 if (ctxt->rep_prefix && (ctxt->d & String)) {
4837 /* All REP prefixes have the same first termination condition */
4838 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4839 ctxt->eip = ctxt->_eip;
4840 ctxt->eflags &= ~EFLG_RF;
4846 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4847 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4848 ctxt->src.valptr, ctxt->src.bytes);
4849 if (rc != X86EMUL_CONTINUE)
4851 ctxt->src.orig_val64 = ctxt->src.val64;
4854 if (ctxt->src2.type == OP_MEM) {
4855 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4856 &ctxt->src2.val, ctxt->src2.bytes);
4857 if (rc != X86EMUL_CONTINUE)
4861 if ((ctxt->d & DstMask) == ImplicitOps)
4865 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4866 /* optimisation - avoid slow emulated read if Mov */
4867 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4868 &ctxt->dst.val, ctxt->dst.bytes);
4869 if (rc != X86EMUL_CONTINUE)
4872 ctxt->dst.orig_val = ctxt->dst.val;
4876 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4877 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4878 X86_ICPT_POST_MEMACCESS);
4879 if (rc != X86EMUL_CONTINUE)
4883 if (ctxt->rep_prefix && (ctxt->d & String))
4884 ctxt->eflags |= EFLG_RF;
4886 ctxt->eflags &= ~EFLG_RF;
4888 if (ctxt->execute) {
4889 if (ctxt->d & Fastop) {
4890 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4891 rc = fastop(ctxt, fop);
4892 if (rc != X86EMUL_CONTINUE)
4896 rc = ctxt->execute(ctxt);
4897 if (rc != X86EMUL_CONTINUE)
4902 if (ctxt->opcode_len == 2)
4904 else if (ctxt->opcode_len == 3)
4905 goto threebyte_insn;
4908 case 0x63: /* movsxd */
4909 if (ctxt->mode != X86EMUL_MODE_PROT64)
4910 goto cannot_emulate;
4911 ctxt->dst.val = (s32) ctxt->src.val;
4913 case 0x70 ... 0x7f: /* jcc (short) */
4914 if (test_cc(ctxt->b, ctxt->eflags))
4915 rc = jmp_rel(ctxt, ctxt->src.val);
4917 case 0x8d: /* lea r16/r32, m */
4918 ctxt->dst.val = ctxt->src.addr.mem.ea;
4920 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4921 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4922 ctxt->dst.type = OP_NONE;
4926 case 0x98: /* cbw/cwde/cdqe */
4927 switch (ctxt->op_bytes) {
4928 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4929 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4930 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4933 case 0xcc: /* int3 */
4934 rc = emulate_int(ctxt, 3);
4936 case 0xcd: /* int n */
4937 rc = emulate_int(ctxt, ctxt->src.val);
4939 case 0xce: /* into */
4940 if (ctxt->eflags & EFLG_OF)
4941 rc = emulate_int(ctxt, 4);
4943 case 0xe9: /* jmp rel */
4944 case 0xeb: /* jmp rel short */
4945 rc = jmp_rel(ctxt, ctxt->src.val);
4946 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4948 case 0xf4: /* hlt */
4949 ctxt->ops->halt(ctxt);
4951 case 0xf5: /* cmc */
4952 /* complement carry flag from eflags reg */
4953 ctxt->eflags ^= EFLG_CF;
4955 case 0xf8: /* clc */
4956 ctxt->eflags &= ~EFLG_CF;
4958 case 0xf9: /* stc */
4959 ctxt->eflags |= EFLG_CF;
4961 case 0xfc: /* cld */
4962 ctxt->eflags &= ~EFLG_DF;
4964 case 0xfd: /* std */
4965 ctxt->eflags |= EFLG_DF;
4968 goto cannot_emulate;
4971 if (rc != X86EMUL_CONTINUE)
4975 if (ctxt->d & SrcWrite) {
4976 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4977 rc = writeback(ctxt, &ctxt->src);
4978 if (rc != X86EMUL_CONTINUE)
4981 if (!(ctxt->d & NoWrite)) {
4982 rc = writeback(ctxt, &ctxt->dst);
4983 if (rc != X86EMUL_CONTINUE)
4988 * restore dst type in case the decoding will be reused
4989 * (happens for string instruction )
4991 ctxt->dst.type = saved_dst_type;
4993 if ((ctxt->d & SrcMask) == SrcSI)
4994 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4996 if ((ctxt->d & DstMask) == DstDI)
4997 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4999 if (ctxt->rep_prefix && (ctxt->d & String)) {
5001 struct read_cache *r = &ctxt->io_read;
5002 if ((ctxt->d & SrcMask) == SrcSI)
5003 count = ctxt->src.count;
5005 count = ctxt->dst.count;
5006 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5008 if (!string_insn_completed(ctxt)) {
5010 * Re-enter guest when pio read ahead buffer is empty
5011 * or, if it is not used, after each 1024 iteration.
5013 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5014 (r->end == 0 || r->end != r->pos)) {
5016 * Reset read cache. Usually happens before
5017 * decode, but since instruction is restarted
5018 * we have to do it here.
5020 ctxt->mem_read.end = 0;
5021 writeback_registers(ctxt);
5022 return EMULATION_RESTART;
5024 goto done; /* skip rip writeback */
5026 ctxt->eflags &= ~EFLG_RF;
5029 ctxt->eip = ctxt->_eip;
5032 if (rc == X86EMUL_PROPAGATE_FAULT) {
5033 WARN_ON(ctxt->exception.vector > 0x1f);
5034 ctxt->have_exception = true;
5036 if (rc == X86EMUL_INTERCEPTED)
5037 return EMULATION_INTERCEPTED;
5039 if (rc == X86EMUL_CONTINUE)
5040 writeback_registers(ctxt);
5042 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5046 case 0x09: /* wbinvd */
5047 (ctxt->ops->wbinvd)(ctxt);
5049 case 0x08: /* invd */
5050 case 0x0d: /* GrpP (prefetch) */
5051 case 0x18: /* Grp16 (prefetch/nop) */
5052 case 0x1f: /* nop */
5054 case 0x20: /* mov cr, reg */
5055 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5057 case 0x21: /* mov from dr to reg */
5058 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5060 case 0x40 ... 0x4f: /* cmov */
5061 if (test_cc(ctxt->b, ctxt->eflags))
5062 ctxt->dst.val = ctxt->src.val;
5063 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5064 ctxt->op_bytes != 4)
5065 ctxt->dst.type = OP_NONE; /* no writeback */
5067 case 0x80 ... 0x8f: /* jnz rel, etc*/
5068 if (test_cc(ctxt->b, ctxt->eflags))
5069 rc = jmp_rel(ctxt, ctxt->src.val);
5071 case 0x90 ... 0x9f: /* setcc r/m8 */
5072 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5074 case 0xb6 ... 0xb7: /* movzx */
5075 ctxt->dst.bytes = ctxt->op_bytes;
5076 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5077 : (u16) ctxt->src.val;
5079 case 0xbe ... 0xbf: /* movsx */
5080 ctxt->dst.bytes = ctxt->op_bytes;
5081 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5082 (s16) ctxt->src.val;
5085 goto cannot_emulate;
5090 if (rc != X86EMUL_CONTINUE)
5096 return EMULATION_FAILED;
5099 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5101 invalidate_registers(ctxt);
5104 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5106 writeback_registers(ctxt);