1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
170 #define No16 ((u64)1 << 53) /* No 16 bit operand */
172 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
174 #define X2(x...) x, x
175 #define X3(x...) X2(x), x
176 #define X4(x...) X2(x), X2(x)
177 #define X5(x...) X4(x), x
178 #define X6(x...) X4(x), X2(x)
179 #define X7(x...) X4(x), X3(x)
180 #define X8(x...) X4(x), X4(x)
181 #define X16(x...) X8(x), X8(x)
183 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
184 #define FASTOP_SIZE 8
187 * fastop functions have a special calling convention:
192 * flags: rflags (in/out)
193 * ex: rsi (in:fastop pointer, out:zero if exception)
195 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
196 * different operand sizes can be reached by calculation, rather than a jump
197 * table (which would be bigger than the code).
199 * fastop functions are declared as taking a never-defined fastop parameter,
200 * so they can't be called from C directly.
209 int (*execute)(struct x86_emulate_ctxt *ctxt);
210 const struct opcode *group;
211 const struct group_dual *gdual;
212 const struct gprefix *gprefix;
213 const struct escape *esc;
214 void (*fastop)(struct fastop *fake);
216 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
220 struct opcode mod012[8];
221 struct opcode mod3[8];
225 struct opcode pfx_no;
226 struct opcode pfx_66;
227 struct opcode pfx_f2;
228 struct opcode pfx_f3;
233 struct opcode high[64];
236 /* EFLAGS bit definitions. */
237 #define EFLG_ID (1<<21)
238 #define EFLG_VIP (1<<20)
239 #define EFLG_VIF (1<<19)
240 #define EFLG_AC (1<<18)
241 #define EFLG_VM (1<<17)
242 #define EFLG_RF (1<<16)
243 #define EFLG_IOPL (3<<12)
244 #define EFLG_NT (1<<14)
245 #define EFLG_OF (1<<11)
246 #define EFLG_DF (1<<10)
247 #define EFLG_IF (1<<9)
248 #define EFLG_TF (1<<8)
249 #define EFLG_SF (1<<7)
250 #define EFLG_ZF (1<<6)
251 #define EFLG_AF (1<<4)
252 #define EFLG_PF (1<<2)
253 #define EFLG_CF (1<<0)
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 #define EFLG_RESERVED_ONE_MASK 2
258 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
260 if (!(ctxt->regs_valid & (1 << nr))) {
261 ctxt->regs_valid |= 1 << nr;
262 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
264 return ctxt->_regs[nr];
267 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->regs_dirty |= 1 << nr;
271 return &ctxt->_regs[nr];
274 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 return reg_write(ctxt, nr);
280 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
285 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
288 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
290 ctxt->regs_dirty = 0;
291 ctxt->regs_valid = 0;
295 * These EFLAGS bits are restored from saved value during emulation, and
296 * any changes are written back to the saved value after emulation.
298 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
306 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
308 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
309 #define FOP_RET "ret \n\t"
311 #define FOP_START(op) \
312 extern void em_##op(struct fastop *fake); \
313 asm(".pushsection .text, \"ax\" \n\t" \
314 ".global em_" #op " \n\t" \
321 #define FOPNOP() FOP_ALIGN FOP_RET
323 #define FOP1E(op, dst) \
324 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
326 #define FOP1EEX(op, dst) \
327 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
329 #define FASTOP1(op) \
334 ON64(FOP1E(op##q, rax)) \
337 /* 1-operand, using src2 (for MUL/DIV r/m) */
338 #define FASTOP1SRC2(op, name) \
343 ON64(FOP1E(op, rcx)) \
346 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
347 #define FASTOP1SRC2EX(op, name) \
352 ON64(FOP1EEX(op, rcx)) \
355 #define FOP2E(op, dst, src) \
356 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
358 #define FASTOP2(op) \
360 FOP2E(op##b, al, dl) \
361 FOP2E(op##w, ax, dx) \
362 FOP2E(op##l, eax, edx) \
363 ON64(FOP2E(op##q, rax, rdx)) \
366 /* 2 operand, word only */
367 #define FASTOP2W(op) \
370 FOP2E(op##w, ax, dx) \
371 FOP2E(op##l, eax, edx) \
372 ON64(FOP2E(op##q, rax, rdx)) \
375 /* 2 operand, src is CL */
376 #define FASTOP2CL(op) \
378 FOP2E(op##b, al, cl) \
379 FOP2E(op##w, ax, cl) \
380 FOP2E(op##l, eax, cl) \
381 ON64(FOP2E(op##q, rax, cl)) \
384 /* 2 operand, src and dest are reversed */
385 #define FASTOP2R(op, name) \
387 FOP2E(op##b, dl, al) \
388 FOP2E(op##w, dx, ax) \
389 FOP2E(op##l, edx, eax) \
390 ON64(FOP2E(op##q, rdx, rax)) \
393 #define FOP3E(op, dst, src, src2) \
394 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
396 /* 3-operand, word-only, src2=cl */
397 #define FASTOP3WCL(op) \
400 FOP3E(op##w, ax, dx, cl) \
401 FOP3E(op##l, eax, edx, cl) \
402 ON64(FOP3E(op##q, rax, rdx, cl)) \
405 /* Special case for SETcc - 1 instruction per cc */
406 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
408 asm(".global kvm_fastop_exception \n"
409 "kvm_fastop_exception: xor %esi, %esi; ret");
430 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
433 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
434 enum x86_intercept intercept,
435 enum x86_intercept_stage stage)
437 struct x86_instruction_info info = {
438 .intercept = intercept,
439 .rep_prefix = ctxt->rep_prefix,
440 .modrm_mod = ctxt->modrm_mod,
441 .modrm_reg = ctxt->modrm_reg,
442 .modrm_rm = ctxt->modrm_rm,
443 .src_val = ctxt->src.val64,
444 .dst_val = ctxt->dst.val64,
445 .src_bytes = ctxt->src.bytes,
446 .dst_bytes = ctxt->dst.bytes,
447 .ad_bytes = ctxt->ad_bytes,
448 .next_rip = ctxt->eip,
451 return ctxt->ops->intercept(ctxt, &info, stage);
454 static void assign_masked(ulong *dest, ulong src, ulong mask)
456 *dest = (*dest & ~mask) | (src & mask);
459 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
461 return (1UL << (ctxt->ad_bytes << 3)) - 1;
464 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
467 struct desc_struct ss;
469 if (ctxt->mode == X86EMUL_MODE_PROT64)
471 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
472 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
475 static int stack_size(struct x86_emulate_ctxt *ctxt)
477 return (__fls(stack_mask(ctxt)) + 1) >> 3;
480 /* Access/update address held in a register, based on addressing mode. */
481 static inline unsigned long
482 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
484 if (ctxt->ad_bytes == sizeof(unsigned long))
487 return reg & ad_mask(ctxt);
490 static inline unsigned long
491 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
493 return address_mask(ctxt, reg);
496 static void masked_increment(ulong *reg, ulong mask, int inc)
498 assign_masked(reg, *reg + inc, mask);
502 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
506 if (ctxt->ad_bytes == sizeof(unsigned long))
509 mask = ad_mask(ctxt);
510 masked_increment(reg, mask, inc);
513 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
515 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
518 static u32 desc_limit_scaled(struct desc_struct *desc)
520 u32 limit = get_desc_limit(desc);
522 return desc->g ? (limit << 12) | 0xfff : limit;
525 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
527 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
530 return ctxt->ops->get_cached_segment_base(ctxt, seg);
533 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
534 u32 error, bool valid)
537 ctxt->exception.vector = vec;
538 ctxt->exception.error_code = error;
539 ctxt->exception.error_code_valid = valid;
540 return X86EMUL_PROPAGATE_FAULT;
543 static int emulate_db(struct x86_emulate_ctxt *ctxt)
545 return emulate_exception(ctxt, DB_VECTOR, 0, false);
548 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
550 return emulate_exception(ctxt, GP_VECTOR, err, true);
553 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, SS_VECTOR, err, true);
558 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, UD_VECTOR, 0, false);
563 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
565 return emulate_exception(ctxt, TS_VECTOR, err, true);
568 static int emulate_de(struct x86_emulate_ctxt *ctxt)
570 return emulate_exception(ctxt, DE_VECTOR, 0, false);
573 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
575 return emulate_exception(ctxt, NM_VECTOR, 0, false);
578 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
581 switch (ctxt->op_bytes) {
583 ctxt->_eip = (u16)dst;
586 ctxt->_eip = (u32)dst;
590 if ((cs_l && is_noncanonical_address(dst)) ||
591 (!cs_l && (dst >> 32) != 0))
592 return emulate_gp(ctxt, 0);
597 WARN(1, "unsupported eip assignment size\n");
599 return X86EMUL_CONTINUE;
602 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
604 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
607 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
609 return assign_eip_near(ctxt, ctxt->_eip + rel);
612 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
615 struct desc_struct desc;
617 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
621 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
626 struct desc_struct desc;
628 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
629 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
633 * x86 defines three classes of vector instructions: explicitly
634 * aligned, explicitly unaligned, and the rest, which change behaviour
635 * depending on whether they're AVX encoded or not.
637 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
638 * subject to the same check.
640 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
642 if (likely(size < 16))
645 if (ctxt->d & Aligned)
647 else if (ctxt->d & Unaligned)
649 else if (ctxt->d & Avx)
655 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
656 struct segmented_address addr,
657 unsigned *max_size, unsigned size,
658 bool write, bool fetch,
661 struct desc_struct desc;
668 la = seg_base(ctxt, addr.seg) + addr.ea;
670 switch (ctxt->mode) {
671 case X86EMUL_MODE_PROT64:
672 if (is_noncanonical_address(la))
673 return emulate_gp(ctxt, 0);
675 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
676 if (size > *max_size)
680 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
684 /* code segment in protected mode or read-only data segment */
685 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
686 || !(desc.type & 2)) && write)
688 /* unreadable code segment */
689 if (!fetch && (desc.type & 8) && !(desc.type & 2))
691 lim = desc_limit_scaled(&desc);
692 if ((desc.type & 8) || !(desc.type & 4)) {
693 /* expand-up segment */
696 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
698 /* expand-down segment */
701 lim = desc.d ? 0xffffffff : 0xffff;
704 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
706 if (size > *max_size)
708 cpl = ctxt->ops->cpl(ctxt);
710 /* data segment or readable code segment */
713 } else if ((desc.type & 8) && !(desc.type & 4)) {
714 /* nonconforming code segment */
717 } else if ((desc.type & 8) && (desc.type & 4)) {
718 /* conforming code segment */
724 if (ctxt->mode != X86EMUL_MODE_PROT64)
726 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
727 return emulate_gp(ctxt, 0);
729 return X86EMUL_CONTINUE;
731 if (addr.seg == VCPU_SREG_SS)
732 return emulate_ss(ctxt, 0);
734 return emulate_gp(ctxt, 0);
737 static int linearize(struct x86_emulate_ctxt *ctxt,
738 struct segmented_address addr,
739 unsigned size, bool write,
743 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
747 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
748 struct segmented_address addr,
755 rc = linearize(ctxt, addr, size, false, &linear);
756 if (rc != X86EMUL_CONTINUE)
758 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
762 * Prefetch the remaining bytes of the instruction without crossing page
763 * boundary if they are not in fetch_cache yet.
765 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
768 unsigned size, max_size;
769 unsigned long linear;
770 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
771 struct segmented_address addr = { .seg = VCPU_SREG_CS,
772 .ea = ctxt->eip + cur_size };
775 * We do not know exactly how many bytes will be needed, and
776 * __linearize is expensive, so fetch as much as possible. We
777 * just have to avoid going beyond the 15 byte limit, the end
778 * of the segment, or the end of the page.
780 * __linearize is called with size 0 so that it does not do any
781 * boundary check itself. Instead, we use max_size to check
784 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
785 if (unlikely(rc != X86EMUL_CONTINUE))
788 size = min_t(unsigned, 15UL ^ cur_size, max_size);
789 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
792 * One instruction can only straddle two pages,
793 * and one has been loaded at the beginning of
794 * x86_decode_insn. So, if not enough bytes
795 * still, we must have hit the 15-byte boundary.
797 if (unlikely(size < op_size))
798 return emulate_gp(ctxt, 0);
800 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
801 size, &ctxt->exception);
802 if (unlikely(rc != X86EMUL_CONTINUE))
804 ctxt->fetch.end += size;
805 return X86EMUL_CONTINUE;
808 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
811 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
813 if (unlikely(done_size < size))
814 return __do_insn_fetch_bytes(ctxt, size - done_size);
816 return X86EMUL_CONTINUE;
819 /* Fetch next part of the instruction being emulated. */
820 #define insn_fetch(_type, _ctxt) \
823 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
824 if (rc != X86EMUL_CONTINUE) \
826 ctxt->_eip += sizeof(_type); \
827 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
828 ctxt->fetch.ptr += sizeof(_type); \
832 #define insn_fetch_arr(_arr, _size, _ctxt) \
834 rc = do_insn_fetch_bytes(_ctxt, _size); \
835 if (rc != X86EMUL_CONTINUE) \
837 ctxt->_eip += (_size); \
838 memcpy(_arr, ctxt->fetch.ptr, _size); \
839 ctxt->fetch.ptr += (_size); \
843 * Given the 'reg' portion of a ModRM byte, and a register block, return a
844 * pointer into the block that addresses the relevant register.
845 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
847 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
851 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
853 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
854 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
856 p = reg_rmw(ctxt, modrm_reg);
860 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
861 struct segmented_address addr,
862 u16 *size, unsigned long *address, int op_bytes)
869 rc = segmented_read_std(ctxt, addr, size, 2);
870 if (rc != X86EMUL_CONTINUE)
873 rc = segmented_read_std(ctxt, addr, address, op_bytes);
887 FASTOP1SRC2(mul, mul_ex);
888 FASTOP1SRC2(imul, imul_ex);
889 FASTOP1SRC2EX(div, div_ex);
890 FASTOP1SRC2EX(idiv, idiv_ex);
919 FASTOP2R(cmp, cmp_r);
921 static u8 test_cc(unsigned int condition, unsigned long flags)
924 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
926 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
927 asm("push %[flags]; popf; call *%[fastop]"
928 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
932 static void fetch_register_operand(struct operand *op)
936 op->val = *(u8 *)op->addr.reg;
939 op->val = *(u16 *)op->addr.reg;
942 op->val = *(u32 *)op->addr.reg;
945 op->val = *(u64 *)op->addr.reg;
950 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
952 ctxt->ops->get_fpu(ctxt);
954 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
955 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
956 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
957 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
958 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
959 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
960 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
961 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
963 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
964 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
965 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
966 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
967 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
968 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
969 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
970 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
974 ctxt->ops->put_fpu(ctxt);
977 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
980 ctxt->ops->get_fpu(ctxt);
982 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
983 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
984 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
985 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
986 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
987 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
988 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
989 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
991 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
992 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
993 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
994 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
995 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
996 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
997 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
998 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1002 ctxt->ops->put_fpu(ctxt);
1005 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1007 ctxt->ops->get_fpu(ctxt);
1009 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1010 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1011 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1012 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1013 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1014 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1015 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1016 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1019 ctxt->ops->put_fpu(ctxt);
1022 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1024 ctxt->ops->get_fpu(ctxt);
1026 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1027 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1028 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1029 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1030 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1031 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1032 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1033 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1036 ctxt->ops->put_fpu(ctxt);
1039 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1041 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1042 return emulate_nm(ctxt);
1044 ctxt->ops->get_fpu(ctxt);
1045 asm volatile("fninit");
1046 ctxt->ops->put_fpu(ctxt);
1047 return X86EMUL_CONTINUE;
1050 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1054 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1055 return emulate_nm(ctxt);
1057 ctxt->ops->get_fpu(ctxt);
1058 asm volatile("fnstcw %0": "+m"(fcw));
1059 ctxt->ops->put_fpu(ctxt);
1061 /* force 2 byte destination */
1062 ctxt->dst.bytes = 2;
1063 ctxt->dst.val = fcw;
1065 return X86EMUL_CONTINUE;
1068 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1072 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1073 return emulate_nm(ctxt);
1075 ctxt->ops->get_fpu(ctxt);
1076 asm volatile("fnstsw %0": "+m"(fsw));
1077 ctxt->ops->put_fpu(ctxt);
1079 /* force 2 byte destination */
1080 ctxt->dst.bytes = 2;
1081 ctxt->dst.val = fsw;
1083 return X86EMUL_CONTINUE;
1086 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1089 unsigned reg = ctxt->modrm_reg;
1091 if (!(ctxt->d & ModRM))
1092 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1094 if (ctxt->d & Sse) {
1098 read_sse_reg(ctxt, &op->vec_val, reg);
1101 if (ctxt->d & Mmx) {
1110 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1111 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1113 fetch_register_operand(op);
1114 op->orig_val = op->val;
1117 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1119 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1120 ctxt->modrm_seg = VCPU_SREG_SS;
1123 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1127 int index_reg, base_reg, scale;
1128 int rc = X86EMUL_CONTINUE;
1131 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1132 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1133 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1135 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1136 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1137 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1138 ctxt->modrm_seg = VCPU_SREG_DS;
1140 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1142 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1143 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1145 if (ctxt->d & Sse) {
1148 op->addr.xmm = ctxt->modrm_rm;
1149 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1152 if (ctxt->d & Mmx) {
1155 op->addr.mm = ctxt->modrm_rm & 7;
1158 fetch_register_operand(op);
1164 if (ctxt->ad_bytes == 2) {
1165 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1166 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1167 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1168 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1170 /* 16-bit ModR/M decode. */
1171 switch (ctxt->modrm_mod) {
1173 if (ctxt->modrm_rm == 6)
1174 modrm_ea += insn_fetch(u16, ctxt);
1177 modrm_ea += insn_fetch(s8, ctxt);
1180 modrm_ea += insn_fetch(u16, ctxt);
1183 switch (ctxt->modrm_rm) {
1185 modrm_ea += bx + si;
1188 modrm_ea += bx + di;
1191 modrm_ea += bp + si;
1194 modrm_ea += bp + di;
1203 if (ctxt->modrm_mod != 0)
1210 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1211 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1212 ctxt->modrm_seg = VCPU_SREG_SS;
1213 modrm_ea = (u16)modrm_ea;
1215 /* 32/64-bit ModR/M decode. */
1216 if ((ctxt->modrm_rm & 7) == 4) {
1217 sib = insn_fetch(u8, ctxt);
1218 index_reg |= (sib >> 3) & 7;
1219 base_reg |= sib & 7;
1222 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1223 modrm_ea += insn_fetch(s32, ctxt);
1225 modrm_ea += reg_read(ctxt, base_reg);
1226 adjust_modrm_seg(ctxt, base_reg);
1229 modrm_ea += reg_read(ctxt, index_reg) << scale;
1230 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1231 modrm_ea += insn_fetch(s32, ctxt);
1232 if (ctxt->mode == X86EMUL_MODE_PROT64)
1233 ctxt->rip_relative = 1;
1235 base_reg = ctxt->modrm_rm;
1236 modrm_ea += reg_read(ctxt, base_reg);
1237 adjust_modrm_seg(ctxt, base_reg);
1239 switch (ctxt->modrm_mod) {
1241 modrm_ea += insn_fetch(s8, ctxt);
1244 modrm_ea += insn_fetch(s32, ctxt);
1248 op->addr.mem.ea = modrm_ea;
1249 if (ctxt->ad_bytes != 8)
1250 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1256 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1259 int rc = X86EMUL_CONTINUE;
1262 switch (ctxt->ad_bytes) {
1264 op->addr.mem.ea = insn_fetch(u16, ctxt);
1267 op->addr.mem.ea = insn_fetch(u32, ctxt);
1270 op->addr.mem.ea = insn_fetch(u64, ctxt);
1277 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1281 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1282 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1284 if (ctxt->src.bytes == 2)
1285 sv = (s16)ctxt->src.val & (s16)mask;
1286 else if (ctxt->src.bytes == 4)
1287 sv = (s32)ctxt->src.val & (s32)mask;
1289 sv = (s64)ctxt->src.val & (s64)mask;
1291 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1292 ctxt->dst.addr.mem.ea + (sv >> 3));
1295 /* only subword offset */
1296 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1299 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1300 unsigned long addr, void *dest, unsigned size)
1303 struct read_cache *mc = &ctxt->mem_read;
1305 if (mc->pos < mc->end)
1308 WARN_ON((mc->end + size) >= sizeof(mc->data));
1310 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1312 if (rc != X86EMUL_CONTINUE)
1318 memcpy(dest, mc->data + mc->pos, size);
1320 return X86EMUL_CONTINUE;
1323 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1324 struct segmented_address addr,
1331 rc = linearize(ctxt, addr, size, false, &linear);
1332 if (rc != X86EMUL_CONTINUE)
1334 return read_emulated(ctxt, linear, data, size);
1337 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1338 struct segmented_address addr,
1345 rc = linearize(ctxt, addr, size, true, &linear);
1346 if (rc != X86EMUL_CONTINUE)
1348 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1352 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1353 struct segmented_address addr,
1354 const void *orig_data, const void *data,
1360 rc = linearize(ctxt, addr, size, true, &linear);
1361 if (rc != X86EMUL_CONTINUE)
1363 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1364 size, &ctxt->exception);
1367 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1368 unsigned int size, unsigned short port,
1371 struct read_cache *rc = &ctxt->io_read;
1373 if (rc->pos == rc->end) { /* refill pio read ahead */
1374 unsigned int in_page, n;
1375 unsigned int count = ctxt->rep_prefix ?
1376 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1377 in_page = (ctxt->eflags & EFLG_DF) ?
1378 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1379 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1380 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1383 rc->pos = rc->end = 0;
1384 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1389 if (ctxt->rep_prefix && (ctxt->d & String) &&
1390 !(ctxt->eflags & EFLG_DF)) {
1391 ctxt->dst.data = rc->data + rc->pos;
1392 ctxt->dst.type = OP_MEM_STR;
1393 ctxt->dst.count = (rc->end - rc->pos) / size;
1396 memcpy(dest, rc->data + rc->pos, size);
1402 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1403 u16 index, struct desc_struct *desc)
1408 ctxt->ops->get_idt(ctxt, &dt);
1410 if (dt.size < index * 8 + 7)
1411 return emulate_gp(ctxt, index << 3 | 0x2);
1413 addr = dt.address + index * 8;
1414 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1418 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1419 u16 selector, struct desc_ptr *dt)
1421 const struct x86_emulate_ops *ops = ctxt->ops;
1424 if (selector & 1 << 2) {
1425 struct desc_struct desc;
1428 memset (dt, 0, sizeof *dt);
1429 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1433 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1434 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1436 ops->get_gdt(ctxt, dt);
1439 /* allowed just for 8 bytes segments */
1440 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1441 u16 selector, struct desc_struct *desc,
1445 u16 index = selector >> 3;
1448 get_descriptor_table_ptr(ctxt, selector, &dt);
1450 if (dt.size < index * 8 + 7)
1451 return emulate_gp(ctxt, selector & 0xfffc);
1453 *desc_addr_p = addr = dt.address + index * 8;
1454 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1458 /* allowed just for 8 bytes segments */
1459 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1460 u16 selector, struct desc_struct *desc)
1463 u16 index = selector >> 3;
1466 get_descriptor_table_ptr(ctxt, selector, &dt);
1468 if (dt.size < index * 8 + 7)
1469 return emulate_gp(ctxt, selector & 0xfffc);
1471 addr = dt.address + index * 8;
1472 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1476 /* Does not support long mode */
1477 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1478 u16 selector, int seg, u8 cpl,
1479 bool in_task_switch,
1480 struct desc_struct *desc)
1482 struct desc_struct seg_desc, old_desc;
1484 unsigned err_vec = GP_VECTOR;
1486 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1492 memset(&seg_desc, 0, sizeof seg_desc);
1494 if (ctxt->mode == X86EMUL_MODE_REAL) {
1495 /* set real mode segment descriptor (keep limit etc. for
1497 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1498 set_desc_base(&seg_desc, selector << 4);
1500 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1501 /* VM86 needs a clean new segment descriptor */
1502 set_desc_base(&seg_desc, selector << 4);
1503 set_desc_limit(&seg_desc, 0xffff);
1513 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1514 if ((seg == VCPU_SREG_CS
1515 || (seg == VCPU_SREG_SS
1516 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1517 || seg == VCPU_SREG_TR)
1521 /* TR should be in GDT only */
1522 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1525 if (null_selector) /* for NULL selector skip all following checks */
1528 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1529 if (ret != X86EMUL_CONTINUE)
1532 err_code = selector & 0xfffc;
1533 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1535 /* can't load system descriptor into segment selector */
1536 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1540 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1549 * segment is not a writable data segment or segment
1550 * selector's RPL != CPL or segment selector's RPL != CPL
1552 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1556 if (!(seg_desc.type & 8))
1559 if (seg_desc.type & 4) {
1565 if (rpl > cpl || dpl != cpl)
1568 /* in long-mode d/b must be clear if l is set */
1569 if (seg_desc.d && seg_desc.l) {
1572 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1573 if (efer & EFER_LMA)
1577 /* CS(RPL) <- CPL */
1578 selector = (selector & 0xfffc) | cpl;
1581 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1583 old_desc = seg_desc;
1584 seg_desc.type |= 2; /* busy */
1585 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1586 sizeof(seg_desc), &ctxt->exception);
1587 if (ret != X86EMUL_CONTINUE)
1590 case VCPU_SREG_LDTR:
1591 if (seg_desc.s || seg_desc.type != 2)
1594 default: /* DS, ES, FS, or GS */
1596 * segment is not a data or readable code segment or
1597 * ((segment is a data or nonconforming code segment)
1598 * and (both RPL and CPL > DPL))
1600 if ((seg_desc.type & 0xa) == 0x8 ||
1601 (((seg_desc.type & 0xc) != 0xc) &&
1602 (rpl > dpl && cpl > dpl)))
1608 /* mark segment as accessed */
1610 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1611 if (ret != X86EMUL_CONTINUE)
1613 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1614 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1615 sizeof(base3), &ctxt->exception);
1616 if (ret != X86EMUL_CONTINUE)
1618 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1619 ((u64)base3 << 32)))
1620 return emulate_gp(ctxt, 0);
1623 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1626 return X86EMUL_CONTINUE;
1628 return emulate_exception(ctxt, err_vec, err_code, true);
1631 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1632 u16 selector, int seg)
1634 u8 cpl = ctxt->ops->cpl(ctxt);
1635 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1638 static void write_register_operand(struct operand *op)
1640 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1641 switch (op->bytes) {
1643 *(u8 *)op->addr.reg = (u8)op->val;
1646 *(u16 *)op->addr.reg = (u16)op->val;
1649 *op->addr.reg = (u32)op->val;
1650 break; /* 64b: zero-extend */
1652 *op->addr.reg = op->val;
1657 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1661 write_register_operand(op);
1664 if (ctxt->lock_prefix)
1665 return segmented_cmpxchg(ctxt,
1671 return segmented_write(ctxt,
1677 return segmented_write(ctxt,
1680 op->bytes * op->count);
1683 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1686 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1694 return X86EMUL_CONTINUE;
1697 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1699 struct segmented_address addr;
1701 rsp_increment(ctxt, -bytes);
1702 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1703 addr.seg = VCPU_SREG_SS;
1705 return segmented_write(ctxt, addr, data, bytes);
1708 static int em_push(struct x86_emulate_ctxt *ctxt)
1710 /* Disable writeback. */
1711 ctxt->dst.type = OP_NONE;
1712 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1715 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1716 void *dest, int len)
1719 struct segmented_address addr;
1721 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1722 addr.seg = VCPU_SREG_SS;
1723 rc = segmented_read(ctxt, addr, dest, len);
1724 if (rc != X86EMUL_CONTINUE)
1727 rsp_increment(ctxt, len);
1731 static int em_pop(struct x86_emulate_ctxt *ctxt)
1733 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1736 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1737 void *dest, int len)
1740 unsigned long val, change_mask;
1741 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1742 int cpl = ctxt->ops->cpl(ctxt);
1744 rc = emulate_pop(ctxt, &val, len);
1745 if (rc != X86EMUL_CONTINUE)
1748 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1749 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1751 switch(ctxt->mode) {
1752 case X86EMUL_MODE_PROT64:
1753 case X86EMUL_MODE_PROT32:
1754 case X86EMUL_MODE_PROT16:
1756 change_mask |= EFLG_IOPL;
1758 change_mask |= EFLG_IF;
1760 case X86EMUL_MODE_VM86:
1762 return emulate_gp(ctxt, 0);
1763 change_mask |= EFLG_IF;
1765 default: /* real mode */
1766 change_mask |= (EFLG_IOPL | EFLG_IF);
1770 *(unsigned long *)dest =
1771 (ctxt->eflags & ~change_mask) | (val & change_mask);
1776 static int em_popf(struct x86_emulate_ctxt *ctxt)
1778 ctxt->dst.type = OP_REG;
1779 ctxt->dst.addr.reg = &ctxt->eflags;
1780 ctxt->dst.bytes = ctxt->op_bytes;
1781 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1784 static int em_enter(struct x86_emulate_ctxt *ctxt)
1787 unsigned frame_size = ctxt->src.val;
1788 unsigned nesting_level = ctxt->src2.val & 31;
1792 return X86EMUL_UNHANDLEABLE;
1794 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1795 rc = push(ctxt, &rbp, stack_size(ctxt));
1796 if (rc != X86EMUL_CONTINUE)
1798 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1801 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1803 return X86EMUL_CONTINUE;
1806 static int em_leave(struct x86_emulate_ctxt *ctxt)
1808 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1810 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1813 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1815 int seg = ctxt->src2.val;
1817 ctxt->src.val = get_segment_selector(ctxt, seg);
1818 if (ctxt->op_bytes == 4) {
1819 rsp_increment(ctxt, -2);
1823 return em_push(ctxt);
1826 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1828 int seg = ctxt->src2.val;
1829 unsigned long selector;
1832 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1833 if (rc != X86EMUL_CONTINUE)
1836 if (ctxt->modrm_reg == VCPU_SREG_SS)
1837 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1839 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1843 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1845 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1846 int rc = X86EMUL_CONTINUE;
1847 int reg = VCPU_REGS_RAX;
1849 while (reg <= VCPU_REGS_RDI) {
1850 (reg == VCPU_REGS_RSP) ?
1851 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1854 if (rc != X86EMUL_CONTINUE)
1863 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1865 ctxt->src.val = (unsigned long)ctxt->eflags;
1866 return em_push(ctxt);
1869 static int em_popa(struct x86_emulate_ctxt *ctxt)
1871 int rc = X86EMUL_CONTINUE;
1872 int reg = VCPU_REGS_RDI;
1874 while (reg >= VCPU_REGS_RAX) {
1875 if (reg == VCPU_REGS_RSP) {
1876 rsp_increment(ctxt, ctxt->op_bytes);
1880 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1881 if (rc != X86EMUL_CONTINUE)
1888 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1890 const struct x86_emulate_ops *ops = ctxt->ops;
1897 /* TODO: Add limit checks */
1898 ctxt->src.val = ctxt->eflags;
1900 if (rc != X86EMUL_CONTINUE)
1903 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1905 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1907 if (rc != X86EMUL_CONTINUE)
1910 ctxt->src.val = ctxt->_eip;
1912 if (rc != X86EMUL_CONTINUE)
1915 ops->get_idt(ctxt, &dt);
1917 eip_addr = dt.address + (irq << 2);
1918 cs_addr = dt.address + (irq << 2) + 2;
1920 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1921 if (rc != X86EMUL_CONTINUE)
1924 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1925 if (rc != X86EMUL_CONTINUE)
1928 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1929 if (rc != X86EMUL_CONTINUE)
1937 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1941 invalidate_registers(ctxt);
1942 rc = __emulate_int_real(ctxt, irq);
1943 if (rc == X86EMUL_CONTINUE)
1944 writeback_registers(ctxt);
1948 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1950 switch(ctxt->mode) {
1951 case X86EMUL_MODE_REAL:
1952 return __emulate_int_real(ctxt, irq);
1953 case X86EMUL_MODE_VM86:
1954 case X86EMUL_MODE_PROT16:
1955 case X86EMUL_MODE_PROT32:
1956 case X86EMUL_MODE_PROT64:
1958 /* Protected mode interrupts unimplemented yet */
1959 return X86EMUL_UNHANDLEABLE;
1963 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1965 int rc = X86EMUL_CONTINUE;
1966 unsigned long temp_eip = 0;
1967 unsigned long temp_eflags = 0;
1968 unsigned long cs = 0;
1969 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1970 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1971 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1972 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1974 /* TODO: Add stack limit check */
1976 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1978 if (rc != X86EMUL_CONTINUE)
1981 if (temp_eip & ~0xffff)
1982 return emulate_gp(ctxt, 0);
1984 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1986 if (rc != X86EMUL_CONTINUE)
1989 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1991 if (rc != X86EMUL_CONTINUE)
1994 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1996 if (rc != X86EMUL_CONTINUE)
1999 ctxt->_eip = temp_eip;
2002 if (ctxt->op_bytes == 4)
2003 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2004 else if (ctxt->op_bytes == 2) {
2005 ctxt->eflags &= ~0xffff;
2006 ctxt->eflags |= temp_eflags;
2009 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2010 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2015 static int em_iret(struct x86_emulate_ctxt *ctxt)
2017 switch(ctxt->mode) {
2018 case X86EMUL_MODE_REAL:
2019 return emulate_iret_real(ctxt);
2020 case X86EMUL_MODE_VM86:
2021 case X86EMUL_MODE_PROT16:
2022 case X86EMUL_MODE_PROT32:
2023 case X86EMUL_MODE_PROT64:
2025 /* iret from protected mode unimplemented yet */
2026 return X86EMUL_UNHANDLEABLE;
2030 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2033 unsigned short sel, old_sel;
2034 struct desc_struct old_desc, new_desc;
2035 const struct x86_emulate_ops *ops = ctxt->ops;
2036 u8 cpl = ctxt->ops->cpl(ctxt);
2038 /* Assignment of RIP may only fail in 64-bit mode */
2039 if (ctxt->mode == X86EMUL_MODE_PROT64)
2040 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2043 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2045 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2047 if (rc != X86EMUL_CONTINUE)
2050 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2051 if (rc != X86EMUL_CONTINUE) {
2052 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2053 /* assigning eip failed; restore the old cs */
2054 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2060 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2062 return assign_eip_near(ctxt, ctxt->src.val);
2065 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2070 old_eip = ctxt->_eip;
2071 rc = assign_eip_near(ctxt, ctxt->src.val);
2072 if (rc != X86EMUL_CONTINUE)
2074 ctxt->src.val = old_eip;
2079 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2081 u64 old = ctxt->dst.orig_val64;
2083 if (ctxt->dst.bytes == 16)
2084 return X86EMUL_UNHANDLEABLE;
2086 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2087 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2088 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2089 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2090 ctxt->eflags &= ~EFLG_ZF;
2092 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2093 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2095 ctxt->eflags |= EFLG_ZF;
2097 return X86EMUL_CONTINUE;
2100 static int em_ret(struct x86_emulate_ctxt *ctxt)
2105 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2106 if (rc != X86EMUL_CONTINUE)
2109 return assign_eip_near(ctxt, eip);
2112 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2115 unsigned long eip, cs;
2117 int cpl = ctxt->ops->cpl(ctxt);
2118 struct desc_struct old_desc, new_desc;
2119 const struct x86_emulate_ops *ops = ctxt->ops;
2121 if (ctxt->mode == X86EMUL_MODE_PROT64)
2122 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2125 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2126 if (rc != X86EMUL_CONTINUE)
2128 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2129 if (rc != X86EMUL_CONTINUE)
2131 /* Outer-privilege level return is not implemented */
2132 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2133 return X86EMUL_UNHANDLEABLE;
2134 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2136 if (rc != X86EMUL_CONTINUE)
2138 rc = assign_eip_far(ctxt, eip, new_desc.l);
2139 if (rc != X86EMUL_CONTINUE) {
2140 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2141 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2146 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2150 rc = em_ret_far(ctxt);
2151 if (rc != X86EMUL_CONTINUE)
2153 rsp_increment(ctxt, ctxt->src.val);
2154 return X86EMUL_CONTINUE;
2157 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2159 /* Save real source value, then compare EAX against destination. */
2160 ctxt->dst.orig_val = ctxt->dst.val;
2161 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2162 ctxt->src.orig_val = ctxt->src.val;
2163 ctxt->src.val = ctxt->dst.orig_val;
2164 fastop(ctxt, em_cmp);
2166 if (ctxt->eflags & EFLG_ZF) {
2167 /* Success: write back to memory. */
2168 ctxt->dst.val = ctxt->src.orig_val;
2170 /* Failure: write the value we saw to EAX. */
2171 ctxt->dst.type = OP_REG;
2172 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2173 ctxt->dst.val = ctxt->dst.orig_val;
2175 return X86EMUL_CONTINUE;
2178 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2180 int seg = ctxt->src2.val;
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2186 rc = load_segment_descriptor(ctxt, sel, seg);
2187 if (rc != X86EMUL_CONTINUE)
2190 ctxt->dst.val = ctxt->src.val;
2195 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2196 struct desc_struct *cs, struct desc_struct *ss)
2198 cs->l = 0; /* will be adjusted later */
2199 set_desc_base(cs, 0); /* flat segment */
2200 cs->g = 1; /* 4kb granularity */
2201 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2202 cs->type = 0x0b; /* Read, Execute, Accessed */
2204 cs->dpl = 0; /* will be adjusted later */
2209 set_desc_base(ss, 0); /* flat segment */
2210 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2211 ss->g = 1; /* 4kb granularity */
2213 ss->type = 0x03; /* Read/Write, Accessed */
2214 ss->d = 1; /* 32bit stack segment */
2221 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2223 u32 eax, ebx, ecx, edx;
2226 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2227 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2228 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2229 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2232 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2234 const struct x86_emulate_ops *ops = ctxt->ops;
2235 u32 eax, ebx, ecx, edx;
2238 * syscall should always be enabled in longmode - so only become
2239 * vendor specific (cpuid) if other modes are active...
2241 if (ctxt->mode == X86EMUL_MODE_PROT64)
2246 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2248 * Intel ("GenuineIntel")
2249 * remark: Intel CPUs only support "syscall" in 64bit
2250 * longmode. Also an 64bit guest with a
2251 * 32bit compat-app running will #UD !! While this
2252 * behaviour can be fixed (by emulating) into AMD
2253 * response - CPUs of AMD can't behave like Intel.
2255 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2256 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2257 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2260 /* AMD ("AuthenticAMD") */
2261 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2262 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2263 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2266 /* AMD ("AMDisbetter!") */
2267 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2268 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2269 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2272 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2276 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2278 const struct x86_emulate_ops *ops = ctxt->ops;
2279 struct desc_struct cs, ss;
2284 /* syscall is not available in real mode */
2285 if (ctxt->mode == X86EMUL_MODE_REAL ||
2286 ctxt->mode == X86EMUL_MODE_VM86)
2287 return emulate_ud(ctxt);
2289 if (!(em_syscall_is_enabled(ctxt)))
2290 return emulate_ud(ctxt);
2292 ops->get_msr(ctxt, MSR_EFER, &efer);
2293 setup_syscalls_segments(ctxt, &cs, &ss);
2295 if (!(efer & EFER_SCE))
2296 return emulate_ud(ctxt);
2298 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2300 cs_sel = (u16)(msr_data & 0xfffc);
2301 ss_sel = (u16)(msr_data + 8);
2303 if (efer & EFER_LMA) {
2307 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2308 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2310 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2311 if (efer & EFER_LMA) {
2312 #ifdef CONFIG_X86_64
2313 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2316 ctxt->mode == X86EMUL_MODE_PROT64 ?
2317 MSR_LSTAR : MSR_CSTAR, &msr_data);
2318 ctxt->_eip = msr_data;
2320 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2321 ctxt->eflags &= ~msr_data;
2322 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2326 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2327 ctxt->_eip = (u32)msr_data;
2329 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2332 return X86EMUL_CONTINUE;
2335 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2337 const struct x86_emulate_ops *ops = ctxt->ops;
2338 struct desc_struct cs, ss;
2343 ops->get_msr(ctxt, MSR_EFER, &efer);
2344 /* inject #GP if in real mode */
2345 if (ctxt->mode == X86EMUL_MODE_REAL)
2346 return emulate_gp(ctxt, 0);
2349 * Not recognized on AMD in compat mode (but is recognized in legacy
2352 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2353 && !vendor_intel(ctxt))
2354 return emulate_ud(ctxt);
2356 /* sysenter/sysexit have not been tested in 64bit mode. */
2357 if (ctxt->mode == X86EMUL_MODE_PROT64)
2358 return X86EMUL_UNHANDLEABLE;
2360 setup_syscalls_segments(ctxt, &cs, &ss);
2362 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2363 switch (ctxt->mode) {
2364 case X86EMUL_MODE_PROT32:
2365 if ((msr_data & 0xfffc) == 0x0)
2366 return emulate_gp(ctxt, 0);
2368 case X86EMUL_MODE_PROT64:
2369 if (msr_data == 0x0)
2370 return emulate_gp(ctxt, 0);
2376 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2377 cs_sel = (u16)msr_data;
2378 cs_sel &= ~SELECTOR_RPL_MASK;
2379 ss_sel = cs_sel + 8;
2380 ss_sel &= ~SELECTOR_RPL_MASK;
2381 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2386 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2387 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2389 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2390 ctxt->_eip = msr_data;
2392 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2393 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2395 return X86EMUL_CONTINUE;
2398 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2400 const struct x86_emulate_ops *ops = ctxt->ops;
2401 struct desc_struct cs, ss;
2402 u64 msr_data, rcx, rdx;
2404 u16 cs_sel = 0, ss_sel = 0;
2406 /* inject #GP if in real mode or Virtual 8086 mode */
2407 if (ctxt->mode == X86EMUL_MODE_REAL ||
2408 ctxt->mode == X86EMUL_MODE_VM86)
2409 return emulate_gp(ctxt, 0);
2411 setup_syscalls_segments(ctxt, &cs, &ss);
2413 if ((ctxt->rex_prefix & 0x8) != 0x0)
2414 usermode = X86EMUL_MODE_PROT64;
2416 usermode = X86EMUL_MODE_PROT32;
2418 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2419 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2423 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2425 case X86EMUL_MODE_PROT32:
2426 cs_sel = (u16)(msr_data + 16);
2427 if ((msr_data & 0xfffc) == 0x0)
2428 return emulate_gp(ctxt, 0);
2429 ss_sel = (u16)(msr_data + 24);
2433 case X86EMUL_MODE_PROT64:
2434 cs_sel = (u16)(msr_data + 32);
2435 if (msr_data == 0x0)
2436 return emulate_gp(ctxt, 0);
2437 ss_sel = cs_sel + 8;
2440 if (is_noncanonical_address(rcx) ||
2441 is_noncanonical_address(rdx))
2442 return emulate_gp(ctxt, 0);
2445 cs_sel |= SELECTOR_RPL_MASK;
2446 ss_sel |= SELECTOR_RPL_MASK;
2448 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2449 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2452 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2454 return X86EMUL_CONTINUE;
2457 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2460 if (ctxt->mode == X86EMUL_MODE_REAL)
2462 if (ctxt->mode == X86EMUL_MODE_VM86)
2464 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2465 return ctxt->ops->cpl(ctxt) > iopl;
2468 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2471 const struct x86_emulate_ops *ops = ctxt->ops;
2472 struct desc_struct tr_seg;
2475 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2476 unsigned mask = (1 << len) - 1;
2479 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2482 if (desc_limit_scaled(&tr_seg) < 103)
2484 base = get_desc_base(&tr_seg);
2485 #ifdef CONFIG_X86_64
2486 base |= ((u64)base3) << 32;
2488 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2489 if (r != X86EMUL_CONTINUE)
2491 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2493 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2494 if (r != X86EMUL_CONTINUE)
2496 if ((perm >> bit_idx) & mask)
2501 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2507 if (emulator_bad_iopl(ctxt))
2508 if (!emulator_io_port_access_allowed(ctxt, port, len))
2511 ctxt->perm_ok = true;
2516 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2517 struct tss_segment_16 *tss)
2519 tss->ip = ctxt->_eip;
2520 tss->flag = ctxt->eflags;
2521 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2522 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2523 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2524 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2525 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2526 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2527 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2528 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2530 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2531 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2532 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2533 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2534 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2537 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2538 struct tss_segment_16 *tss)
2543 ctxt->_eip = tss->ip;
2544 ctxt->eflags = tss->flag | 2;
2545 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2546 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2547 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2548 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2549 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2550 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2551 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2552 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2555 * SDM says that segment selectors are loaded before segment
2558 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2559 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2560 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2561 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2562 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2567 * Now load segment descriptors. If fault happens at this stage
2568 * it is handled in a context of new task
2570 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2578 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2580 if (ret != X86EMUL_CONTINUE)
2582 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2584 if (ret != X86EMUL_CONTINUE)
2586 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2588 if (ret != X86EMUL_CONTINUE)
2591 return X86EMUL_CONTINUE;
2594 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2595 u16 tss_selector, u16 old_tss_sel,
2596 ulong old_tss_base, struct desc_struct *new_desc)
2598 const struct x86_emulate_ops *ops = ctxt->ops;
2599 struct tss_segment_16 tss_seg;
2601 u32 new_tss_base = get_desc_base(new_desc);
2603 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2605 if (ret != X86EMUL_CONTINUE)
2606 /* FIXME: need to provide precise fault address */
2609 save_state_to_tss16(ctxt, &tss_seg);
2611 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2613 if (ret != X86EMUL_CONTINUE)
2614 /* FIXME: need to provide precise fault address */
2617 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2619 if (ret != X86EMUL_CONTINUE)
2620 /* FIXME: need to provide precise fault address */
2623 if (old_tss_sel != 0xffff) {
2624 tss_seg.prev_task_link = old_tss_sel;
2626 ret = ops->write_std(ctxt, new_tss_base,
2627 &tss_seg.prev_task_link,
2628 sizeof tss_seg.prev_task_link,
2630 if (ret != X86EMUL_CONTINUE)
2631 /* FIXME: need to provide precise fault address */
2635 return load_state_from_tss16(ctxt, &tss_seg);
2638 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2639 struct tss_segment_32 *tss)
2641 /* CR3 and ldt selector are not saved intentionally */
2642 tss->eip = ctxt->_eip;
2643 tss->eflags = ctxt->eflags;
2644 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2645 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2646 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2647 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2648 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2649 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2650 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2651 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2653 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2654 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2655 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2656 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2657 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2658 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2661 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2662 struct tss_segment_32 *tss)
2667 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2668 return emulate_gp(ctxt, 0);
2669 ctxt->_eip = tss->eip;
2670 ctxt->eflags = tss->eflags | 2;
2672 /* General purpose registers */
2673 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2674 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2675 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2676 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2677 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2678 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2679 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2680 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2683 * SDM says that segment selectors are loaded before segment
2684 * descriptors. This is important because CPL checks will
2687 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2688 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2689 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2690 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2691 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2692 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2693 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2696 * If we're switching between Protected Mode and VM86, we need to make
2697 * sure to update the mode before loading the segment descriptors so
2698 * that the selectors are interpreted correctly.
2700 if (ctxt->eflags & X86_EFLAGS_VM) {
2701 ctxt->mode = X86EMUL_MODE_VM86;
2704 ctxt->mode = X86EMUL_MODE_PROT32;
2709 * Now load segment descriptors. If fault happenes at this stage
2710 * it is handled in a context of new task
2712 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2726 if (ret != X86EMUL_CONTINUE)
2728 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2730 if (ret != X86EMUL_CONTINUE)
2732 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2734 if (ret != X86EMUL_CONTINUE)
2736 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2738 if (ret != X86EMUL_CONTINUE)
2741 return X86EMUL_CONTINUE;
2744 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2745 u16 tss_selector, u16 old_tss_sel,
2746 ulong old_tss_base, struct desc_struct *new_desc)
2748 const struct x86_emulate_ops *ops = ctxt->ops;
2749 struct tss_segment_32 tss_seg;
2751 u32 new_tss_base = get_desc_base(new_desc);
2752 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2753 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2755 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2757 if (ret != X86EMUL_CONTINUE)
2758 /* FIXME: need to provide precise fault address */
2761 save_state_to_tss32(ctxt, &tss_seg);
2763 /* Only GP registers and segment selectors are saved */
2764 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2765 ldt_sel_offset - eip_offset, &ctxt->exception);
2766 if (ret != X86EMUL_CONTINUE)
2767 /* FIXME: need to provide precise fault address */
2770 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2772 if (ret != X86EMUL_CONTINUE)
2773 /* FIXME: need to provide precise fault address */
2776 if (old_tss_sel != 0xffff) {
2777 tss_seg.prev_task_link = old_tss_sel;
2779 ret = ops->write_std(ctxt, new_tss_base,
2780 &tss_seg.prev_task_link,
2781 sizeof tss_seg.prev_task_link,
2783 if (ret != X86EMUL_CONTINUE)
2784 /* FIXME: need to provide precise fault address */
2788 return load_state_from_tss32(ctxt, &tss_seg);
2791 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2792 u16 tss_selector, int idt_index, int reason,
2793 bool has_error_code, u32 error_code)
2795 const struct x86_emulate_ops *ops = ctxt->ops;
2796 struct desc_struct curr_tss_desc, next_tss_desc;
2798 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2799 ulong old_tss_base =
2800 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2804 /* FIXME: old_tss_base == ~0 ? */
2806 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2807 if (ret != X86EMUL_CONTINUE)
2809 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2810 if (ret != X86EMUL_CONTINUE)
2813 /* FIXME: check that next_tss_desc is tss */
2816 * Check privileges. The three cases are task switch caused by...
2818 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2819 * 2. Exception/IRQ/iret: No check is performed
2820 * 3. jmp/call to TSS/task-gate: No check is performed since the
2821 * hardware checks it before exiting.
2823 if (reason == TASK_SWITCH_GATE) {
2824 if (idt_index != -1) {
2825 /* Software interrupts */
2826 struct desc_struct task_gate_desc;
2829 ret = read_interrupt_descriptor(ctxt, idt_index,
2831 if (ret != X86EMUL_CONTINUE)
2834 dpl = task_gate_desc.dpl;
2835 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2836 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2840 desc_limit = desc_limit_scaled(&next_tss_desc);
2841 if (!next_tss_desc.p ||
2842 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2843 desc_limit < 0x2b)) {
2844 return emulate_ts(ctxt, tss_selector & 0xfffc);
2847 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2848 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2849 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2852 if (reason == TASK_SWITCH_IRET)
2853 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2855 /* set back link to prev task only if NT bit is set in eflags
2856 note that old_tss_sel is not used after this point */
2857 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2858 old_tss_sel = 0xffff;
2860 if (next_tss_desc.type & 8)
2861 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2862 old_tss_base, &next_tss_desc);
2864 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2865 old_tss_base, &next_tss_desc);
2866 if (ret != X86EMUL_CONTINUE)
2869 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2870 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2872 if (reason != TASK_SWITCH_IRET) {
2873 next_tss_desc.type |= (1 << 1); /* set busy flag */
2874 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2877 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2878 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2880 if (has_error_code) {
2881 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2882 ctxt->lock_prefix = 0;
2883 ctxt->src.val = (unsigned long) error_code;
2884 ret = em_push(ctxt);
2890 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2891 u16 tss_selector, int idt_index, int reason,
2892 bool has_error_code, u32 error_code)
2896 invalidate_registers(ctxt);
2897 ctxt->_eip = ctxt->eip;
2898 ctxt->dst.type = OP_NONE;
2900 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2901 has_error_code, error_code);
2903 if (rc == X86EMUL_CONTINUE) {
2904 ctxt->eip = ctxt->_eip;
2905 writeback_registers(ctxt);
2908 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2911 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2914 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2916 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2917 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2920 static int em_das(struct x86_emulate_ctxt *ctxt)
2923 bool af, cf, old_cf;
2925 cf = ctxt->eflags & X86_EFLAGS_CF;
2931 af = ctxt->eflags & X86_EFLAGS_AF;
2932 if ((al & 0x0f) > 9 || af) {
2934 cf = old_cf | (al >= 250);
2939 if (old_al > 0x99 || old_cf) {
2945 /* Set PF, ZF, SF */
2946 ctxt->src.type = OP_IMM;
2948 ctxt->src.bytes = 1;
2949 fastop(ctxt, em_or);
2950 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2952 ctxt->eflags |= X86_EFLAGS_CF;
2954 ctxt->eflags |= X86_EFLAGS_AF;
2955 return X86EMUL_CONTINUE;
2958 static int em_aam(struct x86_emulate_ctxt *ctxt)
2962 if (ctxt->src.val == 0)
2963 return emulate_de(ctxt);
2965 al = ctxt->dst.val & 0xff;
2966 ah = al / ctxt->src.val;
2967 al %= ctxt->src.val;
2969 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2971 /* Set PF, ZF, SF */
2972 ctxt->src.type = OP_IMM;
2974 ctxt->src.bytes = 1;
2975 fastop(ctxt, em_or);
2977 return X86EMUL_CONTINUE;
2980 static int em_aad(struct x86_emulate_ctxt *ctxt)
2982 u8 al = ctxt->dst.val & 0xff;
2983 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2985 al = (al + (ah * ctxt->src.val)) & 0xff;
2987 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2989 /* Set PF, ZF, SF */
2990 ctxt->src.type = OP_IMM;
2992 ctxt->src.bytes = 1;
2993 fastop(ctxt, em_or);
2995 return X86EMUL_CONTINUE;
2998 static int em_call(struct x86_emulate_ctxt *ctxt)
3001 long rel = ctxt->src.val;
3003 ctxt->src.val = (unsigned long)ctxt->_eip;
3004 rc = jmp_rel(ctxt, rel);
3005 if (rc != X86EMUL_CONTINUE)
3007 return em_push(ctxt);
3010 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3015 struct desc_struct old_desc, new_desc;
3016 const struct x86_emulate_ops *ops = ctxt->ops;
3017 int cpl = ctxt->ops->cpl(ctxt);
3019 old_eip = ctxt->_eip;
3020 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3022 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3023 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3025 if (rc != X86EMUL_CONTINUE)
3026 return X86EMUL_CONTINUE;
3028 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3029 if (rc != X86EMUL_CONTINUE)
3032 ctxt->src.val = old_cs;
3034 if (rc != X86EMUL_CONTINUE)
3037 ctxt->src.val = old_eip;
3039 /* If we failed, we tainted the memory, but the very least we should
3041 if (rc != X86EMUL_CONTINUE)
3045 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3050 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3055 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3056 if (rc != X86EMUL_CONTINUE)
3058 rc = assign_eip_near(ctxt, eip);
3059 if (rc != X86EMUL_CONTINUE)
3061 rsp_increment(ctxt, ctxt->src.val);
3062 return X86EMUL_CONTINUE;
3065 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3067 /* Write back the register source. */
3068 ctxt->src.val = ctxt->dst.val;
3069 write_register_operand(&ctxt->src);
3071 /* Write back the memory destination with implicit LOCK prefix. */
3072 ctxt->dst.val = ctxt->src.orig_val;
3073 ctxt->lock_prefix = 1;
3074 return X86EMUL_CONTINUE;
3077 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3079 ctxt->dst.val = ctxt->src2.val;
3080 return fastop(ctxt, em_imul);
3083 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3085 ctxt->dst.type = OP_REG;
3086 ctxt->dst.bytes = ctxt->src.bytes;
3087 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3088 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3090 return X86EMUL_CONTINUE;
3093 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3097 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3098 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3099 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3100 return X86EMUL_CONTINUE;
3103 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3107 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3108 return emulate_gp(ctxt, 0);
3109 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3110 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3111 return X86EMUL_CONTINUE;
3114 static int em_mov(struct x86_emulate_ctxt *ctxt)
3116 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3117 return X86EMUL_CONTINUE;
3120 #define FFL(x) bit(X86_FEATURE_##x)
3122 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3124 u32 ebx, ecx, edx, eax = 1;
3128 * Check MOVBE is set in the guest-visible CPUID leaf.
3130 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3131 if (!(ecx & FFL(MOVBE)))
3132 return emulate_ud(ctxt);
3134 switch (ctxt->op_bytes) {
3137 * From MOVBE definition: "...When the operand size is 16 bits,
3138 * the upper word of the destination register remains unchanged
3141 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3142 * rules so we have to do the operation almost per hand.
3144 tmp = (u16)ctxt->src.val;
3145 ctxt->dst.val &= ~0xffffUL;
3146 ctxt->dst.val |= (unsigned long)swab16(tmp);
3149 ctxt->dst.val = swab32((u32)ctxt->src.val);
3152 ctxt->dst.val = swab64(ctxt->src.val);
3157 return X86EMUL_CONTINUE;
3160 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3162 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3163 return emulate_gp(ctxt, 0);
3165 /* Disable writeback. */
3166 ctxt->dst.type = OP_NONE;
3167 return X86EMUL_CONTINUE;
3170 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3174 if (ctxt->mode == X86EMUL_MODE_PROT64)
3175 val = ctxt->src.val & ~0ULL;
3177 val = ctxt->src.val & ~0U;
3179 /* #UD condition is already handled. */
3180 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3181 return emulate_gp(ctxt, 0);
3183 /* Disable writeback. */
3184 ctxt->dst.type = OP_NONE;
3185 return X86EMUL_CONTINUE;
3188 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3192 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3193 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3194 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3195 return emulate_gp(ctxt, 0);
3197 return X86EMUL_CONTINUE;
3200 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3204 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3205 return emulate_gp(ctxt, 0);
3207 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3208 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3209 return X86EMUL_CONTINUE;
3212 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3214 if (ctxt->modrm_reg > VCPU_SREG_GS)
3215 return emulate_ud(ctxt);
3217 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3218 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3219 ctxt->dst.bytes = 2;
3220 return X86EMUL_CONTINUE;
3223 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3225 u16 sel = ctxt->src.val;
3227 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3228 return emulate_ud(ctxt);
3230 if (ctxt->modrm_reg == VCPU_SREG_SS)
3231 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3233 /* Disable writeback. */
3234 ctxt->dst.type = OP_NONE;
3235 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3238 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3240 u16 sel = ctxt->src.val;
3242 /* Disable writeback. */
3243 ctxt->dst.type = OP_NONE;
3244 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3247 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3249 u16 sel = ctxt->src.val;
3251 /* Disable writeback. */
3252 ctxt->dst.type = OP_NONE;
3253 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3256 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3261 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3262 if (rc == X86EMUL_CONTINUE)
3263 ctxt->ops->invlpg(ctxt, linear);
3264 /* Disable writeback. */
3265 ctxt->dst.type = OP_NONE;
3266 return X86EMUL_CONTINUE;
3269 static int em_clts(struct x86_emulate_ctxt *ctxt)
3273 cr0 = ctxt->ops->get_cr(ctxt, 0);
3275 ctxt->ops->set_cr(ctxt, 0, cr0);
3276 return X86EMUL_CONTINUE;
3279 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3281 int rc = ctxt->ops->fix_hypercall(ctxt);
3283 if (rc != X86EMUL_CONTINUE)
3286 /* Let the processor re-execute the fixed hypercall */
3287 ctxt->_eip = ctxt->eip;
3288 /* Disable writeback. */
3289 ctxt->dst.type = OP_NONE;
3290 return X86EMUL_CONTINUE;
3293 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3294 void (*get)(struct x86_emulate_ctxt *ctxt,
3295 struct desc_ptr *ptr))
3297 struct desc_ptr desc_ptr;
3299 if (ctxt->mode == X86EMUL_MODE_PROT64)
3301 get(ctxt, &desc_ptr);
3302 if (ctxt->op_bytes == 2) {
3304 desc_ptr.address &= 0x00ffffff;
3306 /* Disable writeback. */
3307 ctxt->dst.type = OP_NONE;
3308 return segmented_write(ctxt, ctxt->dst.addr.mem,
3309 &desc_ptr, 2 + ctxt->op_bytes);
3312 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3314 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3317 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3319 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3322 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3324 struct desc_ptr desc_ptr;
3327 if (ctxt->mode == X86EMUL_MODE_PROT64)
3329 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3330 &desc_ptr.size, &desc_ptr.address,
3332 if (rc != X86EMUL_CONTINUE)
3334 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3335 is_noncanonical_address(desc_ptr.address))
3336 return emulate_gp(ctxt, 0);
3338 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3340 ctxt->ops->set_idt(ctxt, &desc_ptr);
3341 /* Disable writeback. */
3342 ctxt->dst.type = OP_NONE;
3343 return X86EMUL_CONTINUE;
3346 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3348 return em_lgdt_lidt(ctxt, true);
3351 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3355 rc = ctxt->ops->fix_hypercall(ctxt);
3357 /* Disable writeback. */
3358 ctxt->dst.type = OP_NONE;
3362 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3364 return em_lgdt_lidt(ctxt, false);
3367 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3369 if (ctxt->dst.type == OP_MEM)
3370 ctxt->dst.bytes = 2;
3371 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3372 return X86EMUL_CONTINUE;
3375 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3377 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3378 | (ctxt->src.val & 0x0f));
3379 ctxt->dst.type = OP_NONE;
3380 return X86EMUL_CONTINUE;
3383 static int em_loop(struct x86_emulate_ctxt *ctxt)
3385 int rc = X86EMUL_CONTINUE;
3387 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3388 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3389 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3390 rc = jmp_rel(ctxt, ctxt->src.val);
3395 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3397 int rc = X86EMUL_CONTINUE;
3399 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3400 rc = jmp_rel(ctxt, ctxt->src.val);
3405 static int em_in(struct x86_emulate_ctxt *ctxt)
3407 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3409 return X86EMUL_IO_NEEDED;
3411 return X86EMUL_CONTINUE;
3414 static int em_out(struct x86_emulate_ctxt *ctxt)
3416 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3418 /* Disable writeback. */
3419 ctxt->dst.type = OP_NONE;
3420 return X86EMUL_CONTINUE;
3423 static int em_cli(struct x86_emulate_ctxt *ctxt)
3425 if (emulator_bad_iopl(ctxt))
3426 return emulate_gp(ctxt, 0);
3428 ctxt->eflags &= ~X86_EFLAGS_IF;
3429 return X86EMUL_CONTINUE;
3432 static int em_sti(struct x86_emulate_ctxt *ctxt)
3434 if (emulator_bad_iopl(ctxt))
3435 return emulate_gp(ctxt, 0);
3437 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3438 ctxt->eflags |= X86_EFLAGS_IF;
3439 return X86EMUL_CONTINUE;
3442 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3444 u32 eax, ebx, ecx, edx;
3446 eax = reg_read(ctxt, VCPU_REGS_RAX);
3447 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3448 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3449 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3450 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3451 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3452 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3453 return X86EMUL_CONTINUE;
3456 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3460 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3461 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3463 ctxt->eflags &= ~0xffUL;
3464 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3465 return X86EMUL_CONTINUE;
3468 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3470 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3471 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3472 return X86EMUL_CONTINUE;
3475 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3477 switch (ctxt->op_bytes) {
3478 #ifdef CONFIG_X86_64
3480 asm("bswap %0" : "+r"(ctxt->dst.val));
3484 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3487 return X86EMUL_CONTINUE;
3490 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3492 /* emulating clflush regardless of cpuid */
3493 return X86EMUL_CONTINUE;
3496 static bool valid_cr(int nr)
3508 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3510 if (!valid_cr(ctxt->modrm_reg))
3511 return emulate_ud(ctxt);
3513 return X86EMUL_CONTINUE;
3516 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3518 u64 new_val = ctxt->src.val64;
3519 int cr = ctxt->modrm_reg;
3522 static u64 cr_reserved_bits[] = {
3523 0xffffffff00000000ULL,
3524 0, 0, 0, /* CR3 checked later */
3531 return emulate_ud(ctxt);
3533 if (new_val & cr_reserved_bits[cr])
3534 return emulate_gp(ctxt, 0);
3539 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3540 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3541 return emulate_gp(ctxt, 0);
3543 cr4 = ctxt->ops->get_cr(ctxt, 4);
3544 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3546 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3547 !(cr4 & X86_CR4_PAE))
3548 return emulate_gp(ctxt, 0);
3555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3556 if (efer & EFER_LMA)
3557 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3560 return emulate_gp(ctxt, 0);
3565 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3567 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3568 return emulate_gp(ctxt, 0);
3574 return X86EMUL_CONTINUE;
3577 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3581 ctxt->ops->get_dr(ctxt, 7, &dr7);
3583 /* Check if DR7.Global_Enable is set */
3584 return dr7 & (1 << 13);
3587 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3589 int dr = ctxt->modrm_reg;
3593 return emulate_ud(ctxt);
3595 cr4 = ctxt->ops->get_cr(ctxt, 4);
3596 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3597 return emulate_ud(ctxt);
3599 if (check_dr7_gd(ctxt)) {
3602 ctxt->ops->get_dr(ctxt, 6, &dr6);
3604 dr6 |= DR6_BD | DR6_RTM;
3605 ctxt->ops->set_dr(ctxt, 6, dr6);
3606 return emulate_db(ctxt);
3609 return X86EMUL_CONTINUE;
3612 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3614 u64 new_val = ctxt->src.val64;
3615 int dr = ctxt->modrm_reg;
3617 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3618 return emulate_gp(ctxt, 0);
3620 return check_dr_read(ctxt);
3623 static int check_svme(struct x86_emulate_ctxt *ctxt)
3627 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3629 if (!(efer & EFER_SVME))
3630 return emulate_ud(ctxt);
3632 return X86EMUL_CONTINUE;
3635 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3637 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3639 /* Valid physical address? */
3640 if (rax & 0xffff000000000000ULL)
3641 return emulate_gp(ctxt, 0);
3643 return check_svme(ctxt);
3646 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3648 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3650 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3651 return emulate_ud(ctxt);
3653 return X86EMUL_CONTINUE;
3656 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3658 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3659 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3661 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3662 ctxt->ops->check_pmc(ctxt, rcx))
3663 return emulate_gp(ctxt, 0);
3665 return X86EMUL_CONTINUE;
3668 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3670 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3671 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3672 return emulate_gp(ctxt, 0);
3674 return X86EMUL_CONTINUE;
3677 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3679 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3680 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3681 return emulate_gp(ctxt, 0);
3683 return X86EMUL_CONTINUE;
3686 #define D(_y) { .flags = (_y) }
3687 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3688 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3689 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3690 #define N D(NotImpl)
3691 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3692 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3693 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3694 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3695 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3696 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3697 #define II(_f, _e, _i) \
3698 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3699 #define IIP(_f, _e, _i, _p) \
3700 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3701 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3702 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3704 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3705 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3706 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3707 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3708 #define I2bvIP(_f, _e, _i, _p) \
3709 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3711 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3712 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3713 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3715 static const struct opcode group7_rm0[] = {
3717 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3721 static const struct opcode group7_rm1[] = {
3722 DI(SrcNone | Priv, monitor),
3723 DI(SrcNone | Priv, mwait),
3727 static const struct opcode group7_rm3[] = {
3728 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3729 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3730 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3731 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3732 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3733 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3734 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3735 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3738 static const struct opcode group7_rm7[] = {
3740 DIP(SrcNone, rdtscp, check_rdtsc),
3744 static const struct opcode group1[] = {
3746 F(Lock | PageTable, em_or),
3749 F(Lock | PageTable, em_and),
3755 static const struct opcode group1A[] = {
3756 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3759 static const struct opcode group2[] = {
3760 F(DstMem | ModRM, em_rol),
3761 F(DstMem | ModRM, em_ror),
3762 F(DstMem | ModRM, em_rcl),
3763 F(DstMem | ModRM, em_rcr),
3764 F(DstMem | ModRM, em_shl),
3765 F(DstMem | ModRM, em_shr),
3766 F(DstMem | ModRM, em_shl),
3767 F(DstMem | ModRM, em_sar),
3770 static const struct opcode group3[] = {
3771 F(DstMem | SrcImm | NoWrite, em_test),
3772 F(DstMem | SrcImm | NoWrite, em_test),
3773 F(DstMem | SrcNone | Lock, em_not),
3774 F(DstMem | SrcNone | Lock, em_neg),
3775 F(DstXacc | Src2Mem, em_mul_ex),
3776 F(DstXacc | Src2Mem, em_imul_ex),
3777 F(DstXacc | Src2Mem, em_div_ex),
3778 F(DstXacc | Src2Mem, em_idiv_ex),
3781 static const struct opcode group4[] = {
3782 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3783 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3787 static const struct opcode group5[] = {
3788 F(DstMem | SrcNone | Lock, em_inc),
3789 F(DstMem | SrcNone | Lock, em_dec),
3790 I(SrcMem | NearBranch, em_call_near_abs),
3791 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3792 I(SrcMem | NearBranch, em_jmp_abs),
3793 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3794 I(SrcMem | Stack, em_push), D(Undefined),
3797 static const struct opcode group6[] = {
3800 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3801 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3805 static const struct group_dual group7 = { {
3806 II(Mov | DstMem, em_sgdt, sgdt),
3807 II(Mov | DstMem, em_sidt, sidt),
3808 II(SrcMem | Priv, em_lgdt, lgdt),
3809 II(SrcMem | Priv, em_lidt, lidt),
3810 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3811 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3812 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3816 N, EXT(0, group7_rm3),
3817 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3818 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3822 static const struct opcode group8[] = {
3824 F(DstMem | SrcImmByte | NoWrite, em_bt),
3825 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3826 F(DstMem | SrcImmByte | Lock, em_btr),
3827 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3830 static const struct group_dual group9 = { {
3831 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3833 N, N, N, N, N, N, N, N,
3836 static const struct opcode group11[] = {
3837 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3841 static const struct gprefix pfx_0f_ae_7 = {
3842 I(SrcMem | ByteOp, em_clflush), N, N, N,
3845 static const struct group_dual group15 = { {
3846 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3848 N, N, N, N, N, N, N, N,
3851 static const struct gprefix pfx_0f_6f_0f_7f = {
3852 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3855 static const struct gprefix pfx_0f_2b = {
3856 I(0, em_mov), I(0, em_mov), N, N,
3859 static const struct gprefix pfx_0f_28_0f_29 = {
3860 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3863 static const struct gprefix pfx_0f_e7 = {
3864 N, I(Sse, em_mov), N, N,
3867 static const struct escape escape_d9 = { {
3868 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3871 N, N, N, N, N, N, N, N,
3873 N, N, N, N, N, N, N, N,
3875 N, N, N, N, N, N, N, N,
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3888 static const struct escape escape_db = { {
3889 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, N, N, N, N, N,
3896 N, N, N, N, N, N, N, N,
3898 N, N, N, N, N, N, N, N,
3900 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, N, N, N, N, N,
3909 static const struct escape escape_dd = { {
3910 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3917 N, N, N, N, N, N, N, N,
3919 N, N, N, N, N, N, N, N,
3921 N, N, N, N, N, N, N, N,
3923 N, N, N, N, N, N, N, N,
3925 N, N, N, N, N, N, N, N,
3927 N, N, N, N, N, N, N, N,
3930 static const struct opcode opcode_table[256] = {
3932 F6ALU(Lock, em_add),
3933 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3934 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3936 F6ALU(Lock | PageTable, em_or),
3937 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3940 F6ALU(Lock, em_adc),
3941 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3942 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3944 F6ALU(Lock, em_sbb),
3945 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3946 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3948 F6ALU(Lock | PageTable, em_and), N, N,
3950 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3952 F6ALU(Lock, em_xor), N, N,
3954 F6ALU(NoWrite, em_cmp), N, N,
3956 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3958 X8(I(SrcReg | Stack, em_push)),
3960 X8(I(DstReg | Stack, em_pop)),
3962 I(ImplicitOps | Stack | No64, em_pusha),
3963 I(ImplicitOps | Stack | No64, em_popa),
3964 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3967 I(SrcImm | Mov | Stack, em_push),
3968 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3969 I(SrcImmByte | Mov | Stack, em_push),
3970 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3971 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3972 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3974 X16(D(SrcImmByte | NearBranch)),
3976 G(ByteOp | DstMem | SrcImm, group1),
3977 G(DstMem | SrcImm, group1),
3978 G(ByteOp | DstMem | SrcImm | No64, group1),
3979 G(DstMem | SrcImmByte, group1),
3980 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3981 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3983 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3984 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3985 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3986 D(ModRM | SrcMem | NoAccess | DstReg),
3987 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3990 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3992 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3993 I(SrcImmFAddr | No64, em_call_far), N,
3994 II(ImplicitOps | Stack, em_pushf, pushf),
3995 II(ImplicitOps | Stack, em_popf, popf),
3996 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3998 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3999 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4000 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4001 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4003 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4004 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4005 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4006 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4008 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4010 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4012 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4013 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4014 I(ImplicitOps | NearBranch, em_ret),
4015 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4016 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4017 G(ByteOp, group11), G(0, group11),
4019 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4020 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4021 I(ImplicitOps | Stack, em_ret_far),
4022 D(ImplicitOps), DI(SrcImmByte, intn),
4023 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4025 G(Src2One | ByteOp, group2), G(Src2One, group2),
4026 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4027 I(DstAcc | SrcImmUByte | No64, em_aam),
4028 I(DstAcc | SrcImmUByte | No64, em_aad),
4029 F(DstAcc | ByteOp | No64, em_salc),
4030 I(DstAcc | SrcXLat | ByteOp, em_mov),
4032 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4034 X3(I(SrcImmByte | NearBranch, em_loop)),
4035 I(SrcImmByte | NearBranch, em_jcxz),
4036 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4037 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4039 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4040 I(SrcImmFAddr | No64, em_jmp_far),
4041 D(SrcImmByte | ImplicitOps | NearBranch),
4042 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4043 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4045 N, DI(ImplicitOps, icebp), N, N,
4046 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4047 G(ByteOp, group3), G(0, group3),
4049 D(ImplicitOps), D(ImplicitOps),
4050 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4051 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4054 static const struct opcode twobyte_table[256] = {
4056 G(0, group6), GD(0, &group7), N, N,
4057 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4058 II(ImplicitOps | Priv, em_clts, clts), N,
4059 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4060 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4062 N, N, N, N, N, N, N, N,
4063 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4064 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4066 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4067 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4068 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4070 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4073 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4074 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4075 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4078 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4079 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4080 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4081 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4082 I(ImplicitOps | EmulateOnUD, em_sysenter),
4083 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4085 N, N, N, N, N, N, N, N,
4087 X16(D(DstReg | SrcMem | ModRM)),
4089 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4094 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4099 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4101 X16(D(SrcImm | NearBranch)),
4103 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4105 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4106 II(ImplicitOps, em_cpuid, cpuid),
4107 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4108 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4109 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4111 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4112 DI(ImplicitOps, rsm),
4113 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4114 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4115 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4116 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4118 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4119 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4120 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4121 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4122 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4123 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4127 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4128 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4129 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4131 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4132 N, I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov),
4133 N, N, N, GD(0, &group9),
4135 X8(I(DstReg, em_bswap)),
4137 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4139 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4140 N, N, N, N, N, N, N, N,
4142 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4145 static const struct gprefix three_byte_0f_38_f0 = {
4146 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4149 static const struct gprefix three_byte_0f_38_f1 = {
4150 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4154 * Insns below are selected by the prefix which indexed by the third opcode
4157 static const struct opcode opcode_map_0f_38[256] = {
4159 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4161 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4163 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4164 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4183 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4187 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4193 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4194 unsigned size, bool sign_extension)
4196 int rc = X86EMUL_CONTINUE;
4200 op->addr.mem.ea = ctxt->_eip;
4201 /* NB. Immediates are sign-extended as necessary. */
4202 switch (op->bytes) {
4204 op->val = insn_fetch(s8, ctxt);
4207 op->val = insn_fetch(s16, ctxt);
4210 op->val = insn_fetch(s32, ctxt);
4213 op->val = insn_fetch(s64, ctxt);
4216 if (!sign_extension) {
4217 switch (op->bytes) {
4225 op->val &= 0xffffffff;
4233 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4236 int rc = X86EMUL_CONTINUE;
4240 decode_register_operand(ctxt, op);
4243 rc = decode_imm(ctxt, op, 1, false);
4246 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4250 if (ctxt->d & BitOp)
4251 fetch_bit_operand(ctxt);
4252 op->orig_val = op->val;
4255 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4259 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4260 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4261 fetch_register_operand(op);
4262 op->orig_val = op->val;
4266 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4267 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4268 fetch_register_operand(op);
4269 op->orig_val = op->val;
4272 if (ctxt->d & ByteOp) {
4277 op->bytes = ctxt->op_bytes;
4278 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4279 fetch_register_operand(op);
4280 op->orig_val = op->val;
4284 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4286 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4287 op->addr.mem.seg = VCPU_SREG_ES;
4294 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4295 fetch_register_operand(op);
4299 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4302 rc = decode_imm(ctxt, op, 1, true);
4309 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4312 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4315 ctxt->memop.bytes = 1;
4316 if (ctxt->memop.type == OP_REG) {
4317 ctxt->memop.addr.reg = decode_register(ctxt,
4318 ctxt->modrm_rm, true);
4319 fetch_register_operand(&ctxt->memop);
4323 ctxt->memop.bytes = 2;
4326 ctxt->memop.bytes = 4;
4329 rc = decode_imm(ctxt, op, 2, false);
4332 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4336 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4338 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4339 op->addr.mem.seg = ctxt->seg_override;
4345 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4347 register_address(ctxt,
4348 reg_read(ctxt, VCPU_REGS_RBX) +
4349 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4350 op->addr.mem.seg = ctxt->seg_override;
4355 op->addr.mem.ea = ctxt->_eip;
4356 op->bytes = ctxt->op_bytes + 2;
4357 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4360 ctxt->memop.bytes = ctxt->op_bytes + 2;
4363 op->val = VCPU_SREG_ES;
4366 op->val = VCPU_SREG_CS;
4369 op->val = VCPU_SREG_SS;
4372 op->val = VCPU_SREG_DS;
4375 op->val = VCPU_SREG_FS;
4378 op->val = VCPU_SREG_GS;
4381 /* Special instructions do their own operand decoding. */
4383 op->type = OP_NONE; /* Disable writeback. */
4391 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4393 int rc = X86EMUL_CONTINUE;
4394 int mode = ctxt->mode;
4395 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4396 bool op_prefix = false;
4397 bool has_seg_override = false;
4398 struct opcode opcode;
4400 ctxt->memop.type = OP_NONE;
4401 ctxt->memopp = NULL;
4402 ctxt->_eip = ctxt->eip;
4403 ctxt->fetch.ptr = ctxt->fetch.data;
4404 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4405 ctxt->opcode_len = 1;
4407 memcpy(ctxt->fetch.data, insn, insn_len);
4409 rc = __do_insn_fetch_bytes(ctxt, 1);
4410 if (rc != X86EMUL_CONTINUE)
4415 case X86EMUL_MODE_REAL:
4416 case X86EMUL_MODE_VM86:
4417 case X86EMUL_MODE_PROT16:
4418 def_op_bytes = def_ad_bytes = 2;
4420 case X86EMUL_MODE_PROT32:
4421 def_op_bytes = def_ad_bytes = 4;
4423 #ifdef CONFIG_X86_64
4424 case X86EMUL_MODE_PROT64:
4430 return EMULATION_FAILED;
4433 ctxt->op_bytes = def_op_bytes;
4434 ctxt->ad_bytes = def_ad_bytes;
4436 /* Legacy prefixes. */
4438 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4439 case 0x66: /* operand-size override */
4441 /* switch between 2/4 bytes */
4442 ctxt->op_bytes = def_op_bytes ^ 6;
4444 case 0x67: /* address-size override */
4445 if (mode == X86EMUL_MODE_PROT64)
4446 /* switch between 4/8 bytes */
4447 ctxt->ad_bytes = def_ad_bytes ^ 12;
4449 /* switch between 2/4 bytes */
4450 ctxt->ad_bytes = def_ad_bytes ^ 6;
4452 case 0x26: /* ES override */
4453 case 0x2e: /* CS override */
4454 case 0x36: /* SS override */
4455 case 0x3e: /* DS override */
4456 has_seg_override = true;
4457 ctxt->seg_override = (ctxt->b >> 3) & 3;
4459 case 0x64: /* FS override */
4460 case 0x65: /* GS override */
4461 has_seg_override = true;
4462 ctxt->seg_override = ctxt->b & 7;
4464 case 0x40 ... 0x4f: /* REX */
4465 if (mode != X86EMUL_MODE_PROT64)
4467 ctxt->rex_prefix = ctxt->b;
4469 case 0xf0: /* LOCK */
4470 ctxt->lock_prefix = 1;
4472 case 0xf2: /* REPNE/REPNZ */
4473 case 0xf3: /* REP/REPE/REPZ */
4474 ctxt->rep_prefix = ctxt->b;
4480 /* Any legacy prefix after a REX prefix nullifies its effect. */
4482 ctxt->rex_prefix = 0;
4488 if (ctxt->rex_prefix & 8)
4489 ctxt->op_bytes = 8; /* REX.W */
4491 /* Opcode byte(s). */
4492 opcode = opcode_table[ctxt->b];
4493 /* Two-byte opcode? */
4494 if (ctxt->b == 0x0f) {
4495 ctxt->opcode_len = 2;
4496 ctxt->b = insn_fetch(u8, ctxt);
4497 opcode = twobyte_table[ctxt->b];
4499 /* 0F_38 opcode map */
4500 if (ctxt->b == 0x38) {
4501 ctxt->opcode_len = 3;
4502 ctxt->b = insn_fetch(u8, ctxt);
4503 opcode = opcode_map_0f_38[ctxt->b];
4506 ctxt->d = opcode.flags;
4508 if (ctxt->d & ModRM)
4509 ctxt->modrm = insn_fetch(u8, ctxt);
4511 /* vex-prefix instructions are not implemented */
4512 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4513 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4517 while (ctxt->d & GroupMask) {
4518 switch (ctxt->d & GroupMask) {
4520 goffset = (ctxt->modrm >> 3) & 7;
4521 opcode = opcode.u.group[goffset];
4524 goffset = (ctxt->modrm >> 3) & 7;
4525 if ((ctxt->modrm >> 6) == 3)
4526 opcode = opcode.u.gdual->mod3[goffset];
4528 opcode = opcode.u.gdual->mod012[goffset];
4531 goffset = ctxt->modrm & 7;
4532 opcode = opcode.u.group[goffset];
4535 if (ctxt->rep_prefix && op_prefix)
4536 return EMULATION_FAILED;
4537 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4538 switch (simd_prefix) {
4539 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4540 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4541 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4542 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4546 if (ctxt->modrm > 0xbf)
4547 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4549 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4552 return EMULATION_FAILED;
4555 ctxt->d &= ~(u64)GroupMask;
4556 ctxt->d |= opcode.flags;
4561 return EMULATION_FAILED;
4563 ctxt->execute = opcode.u.execute;
4565 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4566 return EMULATION_FAILED;
4568 if (unlikely(ctxt->d &
4569 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4572 * These are copied unconditionally here, and checked unconditionally
4573 * in x86_emulate_insn.
4575 ctxt->check_perm = opcode.check_perm;
4576 ctxt->intercept = opcode.intercept;
4578 if (ctxt->d & NotImpl)
4579 return EMULATION_FAILED;
4581 if (mode == X86EMUL_MODE_PROT64) {
4582 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4584 else if (ctxt->d & NearBranch)
4588 if (ctxt->d & Op3264) {
4589 if (mode == X86EMUL_MODE_PROT64)
4595 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4599 ctxt->op_bytes = 16;
4600 else if (ctxt->d & Mmx)
4604 /* ModRM and SIB bytes. */
4605 if (ctxt->d & ModRM) {
4606 rc = decode_modrm(ctxt, &ctxt->memop);
4607 if (!has_seg_override) {
4608 has_seg_override = true;
4609 ctxt->seg_override = ctxt->modrm_seg;
4611 } else if (ctxt->d & MemAbs)
4612 rc = decode_abs(ctxt, &ctxt->memop);
4613 if (rc != X86EMUL_CONTINUE)
4616 if (!has_seg_override)
4617 ctxt->seg_override = VCPU_SREG_DS;
4619 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4622 * Decode and fetch the source operand: register, memory
4625 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4626 if (rc != X86EMUL_CONTINUE)
4630 * Decode and fetch the second source operand: register, memory
4633 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4634 if (rc != X86EMUL_CONTINUE)
4637 /* Decode and fetch the destination operand: register or memory. */
4638 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4640 if (ctxt->rip_relative)
4641 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4642 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4645 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4648 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4650 return ctxt->d & PageTable;
4653 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4655 /* The second termination condition only applies for REPE
4656 * and REPNE. Test if the repeat string operation prefix is
4657 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4658 * corresponding termination condition according to:
4659 * - if REPE/REPZ and ZF = 0 then done
4660 * - if REPNE/REPNZ and ZF = 1 then done
4662 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4663 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4664 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4665 ((ctxt->eflags & EFLG_ZF) == 0))
4666 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4667 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4673 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4677 ctxt->ops->get_fpu(ctxt);
4678 asm volatile("1: fwait \n\t"
4680 ".pushsection .fixup,\"ax\" \n\t"
4682 "movb $1, %[fault] \n\t"
4685 _ASM_EXTABLE(1b, 3b)
4686 : [fault]"+qm"(fault));
4687 ctxt->ops->put_fpu(ctxt);
4689 if (unlikely(fault))
4690 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4692 return X86EMUL_CONTINUE;
4695 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4698 if (op->type == OP_MM)
4699 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4702 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4704 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4705 if (!(ctxt->d & ByteOp))
4706 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4707 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4708 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4710 : "c"(ctxt->src2.val));
4711 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4712 if (!fop) /* exception is returned in fop variable */
4713 return emulate_de(ctxt);
4714 return X86EMUL_CONTINUE;
4717 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4719 memset(&ctxt->rip_relative, 0,
4720 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4722 ctxt->io_read.pos = 0;
4723 ctxt->io_read.end = 0;
4724 ctxt->mem_read.end = 0;
4727 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4729 const struct x86_emulate_ops *ops = ctxt->ops;
4730 int rc = X86EMUL_CONTINUE;
4731 int saved_dst_type = ctxt->dst.type;
4733 ctxt->mem_read.pos = 0;
4735 /* LOCK prefix is allowed only with some instructions */
4736 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4737 rc = emulate_ud(ctxt);
4741 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4742 rc = emulate_ud(ctxt);
4746 if (unlikely(ctxt->d &
4747 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4748 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4749 (ctxt->d & Undefined)) {
4750 rc = emulate_ud(ctxt);
4754 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4755 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4756 rc = emulate_ud(ctxt);
4760 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4761 rc = emulate_nm(ctxt);
4765 if (ctxt->d & Mmx) {
4766 rc = flush_pending_x87_faults(ctxt);
4767 if (rc != X86EMUL_CONTINUE)
4770 * Now that we know the fpu is exception safe, we can fetch
4773 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4774 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4775 if (!(ctxt->d & Mov))
4776 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4779 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4780 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4781 X86_ICPT_PRE_EXCEPT);
4782 if (rc != X86EMUL_CONTINUE)
4786 /* Privileged instruction can be executed only in CPL=0 */
4787 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4788 if (ctxt->d & PrivUD)
4789 rc = emulate_ud(ctxt);
4791 rc = emulate_gp(ctxt, 0);
4795 /* Instruction can only be executed in protected mode */
4796 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4797 rc = emulate_ud(ctxt);
4801 /* Do instruction specific permission checks */
4802 if (ctxt->d & CheckPerm) {
4803 rc = ctxt->check_perm(ctxt);
4804 if (rc != X86EMUL_CONTINUE)
4808 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4809 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4810 X86_ICPT_POST_EXCEPT);
4811 if (rc != X86EMUL_CONTINUE)
4815 if (ctxt->rep_prefix && (ctxt->d & String)) {
4816 /* All REP prefixes have the same first termination condition */
4817 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4818 ctxt->eip = ctxt->_eip;
4819 ctxt->eflags &= ~EFLG_RF;
4825 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4826 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4827 ctxt->src.valptr, ctxt->src.bytes);
4828 if (rc != X86EMUL_CONTINUE)
4830 ctxt->src.orig_val64 = ctxt->src.val64;
4833 if (ctxt->src2.type == OP_MEM) {
4834 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4835 &ctxt->src2.val, ctxt->src2.bytes);
4836 if (rc != X86EMUL_CONTINUE)
4840 if ((ctxt->d & DstMask) == ImplicitOps)
4844 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4845 /* optimisation - avoid slow emulated read if Mov */
4846 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4847 &ctxt->dst.val, ctxt->dst.bytes);
4848 if (rc != X86EMUL_CONTINUE)
4851 ctxt->dst.orig_val = ctxt->dst.val;
4855 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4856 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4857 X86_ICPT_POST_MEMACCESS);
4858 if (rc != X86EMUL_CONTINUE)
4862 if (ctxt->rep_prefix && (ctxt->d & String))
4863 ctxt->eflags |= EFLG_RF;
4865 ctxt->eflags &= ~EFLG_RF;
4867 if (ctxt->execute) {
4868 if (ctxt->d & Fastop) {
4869 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4870 rc = fastop(ctxt, fop);
4871 if (rc != X86EMUL_CONTINUE)
4875 rc = ctxt->execute(ctxt);
4876 if (rc != X86EMUL_CONTINUE)
4881 if (ctxt->opcode_len == 2)
4883 else if (ctxt->opcode_len == 3)
4884 goto threebyte_insn;
4887 case 0x63: /* movsxd */
4888 if (ctxt->mode != X86EMUL_MODE_PROT64)
4889 goto cannot_emulate;
4890 ctxt->dst.val = (s32) ctxt->src.val;
4892 case 0x70 ... 0x7f: /* jcc (short) */
4893 if (test_cc(ctxt->b, ctxt->eflags))
4894 rc = jmp_rel(ctxt, ctxt->src.val);
4896 case 0x8d: /* lea r16/r32, m */
4897 ctxt->dst.val = ctxt->src.addr.mem.ea;
4899 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4900 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4901 ctxt->dst.type = OP_NONE;
4905 case 0x98: /* cbw/cwde/cdqe */
4906 switch (ctxt->op_bytes) {
4907 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4908 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4909 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4912 case 0xcc: /* int3 */
4913 rc = emulate_int(ctxt, 3);
4915 case 0xcd: /* int n */
4916 rc = emulate_int(ctxt, ctxt->src.val);
4918 case 0xce: /* into */
4919 if (ctxt->eflags & EFLG_OF)
4920 rc = emulate_int(ctxt, 4);
4922 case 0xe9: /* jmp rel */
4923 case 0xeb: /* jmp rel short */
4924 rc = jmp_rel(ctxt, ctxt->src.val);
4925 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4927 case 0xf4: /* hlt */
4928 ctxt->ops->halt(ctxt);
4930 case 0xf5: /* cmc */
4931 /* complement carry flag from eflags reg */
4932 ctxt->eflags ^= EFLG_CF;
4934 case 0xf8: /* clc */
4935 ctxt->eflags &= ~EFLG_CF;
4937 case 0xf9: /* stc */
4938 ctxt->eflags |= EFLG_CF;
4940 case 0xfc: /* cld */
4941 ctxt->eflags &= ~EFLG_DF;
4943 case 0xfd: /* std */
4944 ctxt->eflags |= EFLG_DF;
4947 goto cannot_emulate;
4950 if (rc != X86EMUL_CONTINUE)
4954 if (ctxt->d & SrcWrite) {
4955 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4956 rc = writeback(ctxt, &ctxt->src);
4957 if (rc != X86EMUL_CONTINUE)
4960 if (!(ctxt->d & NoWrite)) {
4961 rc = writeback(ctxt, &ctxt->dst);
4962 if (rc != X86EMUL_CONTINUE)
4967 * restore dst type in case the decoding will be reused
4968 * (happens for string instruction )
4970 ctxt->dst.type = saved_dst_type;
4972 if ((ctxt->d & SrcMask) == SrcSI)
4973 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4975 if ((ctxt->d & DstMask) == DstDI)
4976 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4978 if (ctxt->rep_prefix && (ctxt->d & String)) {
4980 struct read_cache *r = &ctxt->io_read;
4981 if ((ctxt->d & SrcMask) == SrcSI)
4982 count = ctxt->src.count;
4984 count = ctxt->dst.count;
4985 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4988 if (!string_insn_completed(ctxt)) {
4990 * Re-enter guest when pio read ahead buffer is empty
4991 * or, if it is not used, after each 1024 iteration.
4993 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4994 (r->end == 0 || r->end != r->pos)) {
4996 * Reset read cache. Usually happens before
4997 * decode, but since instruction is restarted
4998 * we have to do it here.
5000 ctxt->mem_read.end = 0;
5001 writeback_registers(ctxt);
5002 return EMULATION_RESTART;
5004 goto done; /* skip rip writeback */
5006 ctxt->eflags &= ~EFLG_RF;
5009 ctxt->eip = ctxt->_eip;
5012 if (rc == X86EMUL_PROPAGATE_FAULT) {
5013 WARN_ON(ctxt->exception.vector > 0x1f);
5014 ctxt->have_exception = true;
5016 if (rc == X86EMUL_INTERCEPTED)
5017 return EMULATION_INTERCEPTED;
5019 if (rc == X86EMUL_CONTINUE)
5020 writeback_registers(ctxt);
5022 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5026 case 0x09: /* wbinvd */
5027 (ctxt->ops->wbinvd)(ctxt);
5029 case 0x08: /* invd */
5030 case 0x0d: /* GrpP (prefetch) */
5031 case 0x18: /* Grp16 (prefetch/nop) */
5032 case 0x1f: /* nop */
5034 case 0x20: /* mov cr, reg */
5035 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5037 case 0x21: /* mov from dr to reg */
5038 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5040 case 0x40 ... 0x4f: /* cmov */
5041 if (test_cc(ctxt->b, ctxt->eflags))
5042 ctxt->dst.val = ctxt->src.val;
5043 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5044 ctxt->op_bytes != 4)
5045 ctxt->dst.type = OP_NONE; /* no writeback */
5047 case 0x80 ... 0x8f: /* jnz rel, etc*/
5048 if (test_cc(ctxt->b, ctxt->eflags))
5049 rc = jmp_rel(ctxt, ctxt->src.val);
5051 case 0x90 ... 0x9f: /* setcc r/m8 */
5052 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5054 case 0xb6 ... 0xb7: /* movzx */
5055 ctxt->dst.bytes = ctxt->op_bytes;
5056 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5057 : (u16) ctxt->src.val;
5059 case 0xbe ... 0xbf: /* movsx */
5060 ctxt->dst.bytes = ctxt->op_bytes;
5061 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5062 (s16) ctxt->src.val;
5065 goto cannot_emulate;
5070 if (rc != X86EMUL_CONTINUE)
5076 return EMULATION_FAILED;
5079 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5081 invalidate_registers(ctxt);
5084 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5086 writeback_registers(ctxt);