1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 /* 2 operand, src and dest are reversed */
384 #define FASTOP2R(op, name) \
386 FOP2E(op##b, dl, al) \
387 FOP2E(op##w, dx, ax) \
388 FOP2E(op##l, edx, eax) \
389 ON64(FOP2E(op##q, rdx, rax)) \
392 #define FOP3E(op, dst, src, src2) \
393 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
395 /* 3-operand, word-only, src2=cl */
396 #define FASTOP3WCL(op) \
399 FOP3E(op##w, ax, dx, cl) \
400 FOP3E(op##l, eax, edx, cl) \
401 ON64(FOP3E(op##q, rax, rdx, cl)) \
404 /* Special case for SETcc - 1 instruction per cc */
405 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
407 asm(".global kvm_fastop_exception \n"
408 "kvm_fastop_exception: xor %esi, %esi; ret");
429 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
432 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
433 enum x86_intercept intercept,
434 enum x86_intercept_stage stage)
436 struct x86_instruction_info info = {
437 .intercept = intercept,
438 .rep_prefix = ctxt->rep_prefix,
439 .modrm_mod = ctxt->modrm_mod,
440 .modrm_reg = ctxt->modrm_reg,
441 .modrm_rm = ctxt->modrm_rm,
442 .src_val = ctxt->src.val64,
443 .dst_val = ctxt->dst.val64,
444 .src_bytes = ctxt->src.bytes,
445 .dst_bytes = ctxt->dst.bytes,
446 .ad_bytes = ctxt->ad_bytes,
447 .next_rip = ctxt->eip,
450 return ctxt->ops->intercept(ctxt, &info, stage);
453 static void assign_masked(ulong *dest, ulong src, ulong mask)
455 *dest = (*dest & ~mask) | (src & mask);
458 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
460 return (1UL << (ctxt->ad_bytes << 3)) - 1;
463 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
466 struct desc_struct ss;
468 if (ctxt->mode == X86EMUL_MODE_PROT64)
470 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
471 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
474 static int stack_size(struct x86_emulate_ctxt *ctxt)
476 return (__fls(stack_mask(ctxt)) + 1) >> 3;
479 /* Access/update address held in a register, based on addressing mode. */
480 static inline unsigned long
481 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 if (ctxt->ad_bytes == sizeof(unsigned long))
486 return reg & ad_mask(ctxt);
489 static inline unsigned long
490 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
492 return address_mask(ctxt, reg);
495 static void masked_increment(ulong *reg, ulong mask, int inc)
497 assign_masked(reg, *reg + inc, mask);
501 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
505 if (ctxt->ad_bytes == sizeof(unsigned long))
508 mask = ad_mask(ctxt);
509 masked_increment(reg, mask, inc);
512 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
514 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
517 static u32 desc_limit_scaled(struct desc_struct *desc)
519 u32 limit = get_desc_limit(desc);
521 return desc->g ? (limit << 12) | 0xfff : limit;
524 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
526 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
529 return ctxt->ops->get_cached_segment_base(ctxt, seg);
532 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
533 u32 error, bool valid)
536 ctxt->exception.vector = vec;
537 ctxt->exception.error_code = error;
538 ctxt->exception.error_code_valid = valid;
539 return X86EMUL_PROPAGATE_FAULT;
542 static int emulate_db(struct x86_emulate_ctxt *ctxt)
544 return emulate_exception(ctxt, DB_VECTOR, 0, false);
547 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549 return emulate_exception(ctxt, GP_VECTOR, err, true);
552 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, SS_VECTOR, err, true);
557 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, UD_VECTOR, 0, false);
562 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564 return emulate_exception(ctxt, TS_VECTOR, err, true);
567 static int emulate_de(struct x86_emulate_ctxt *ctxt)
569 return emulate_exception(ctxt, DE_VECTOR, 0, false);
572 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574 return emulate_exception(ctxt, NM_VECTOR, 0, false);
577 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
580 switch (ctxt->op_bytes) {
582 ctxt->_eip = (u16)dst;
585 ctxt->_eip = (u32)dst;
589 if ((cs_l && is_noncanonical_address(dst)) ||
590 (!cs_l && (dst >> 32) != 0))
591 return emulate_gp(ctxt, 0);
596 WARN(1, "unsupported eip assignment size\n");
598 return X86EMUL_CONTINUE;
601 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
603 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
606 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
608 return assign_eip_near(ctxt, ctxt->_eip + rel);
611 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
614 struct desc_struct desc;
616 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
620 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
625 struct desc_struct desc;
627 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
628 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
632 * x86 defines three classes of vector instructions: explicitly
633 * aligned, explicitly unaligned, and the rest, which change behaviour
634 * depending on whether they're AVX encoded or not.
636 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
637 * subject to the same check.
639 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 if (likely(size < 16))
644 if (ctxt->d & Aligned)
646 else if (ctxt->d & Unaligned)
648 else if (ctxt->d & Avx)
654 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
655 struct segmented_address addr,
656 unsigned *max_size, unsigned size,
657 bool write, bool fetch,
660 struct desc_struct desc;
667 la = seg_base(ctxt, addr.seg) +
668 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
670 switch (ctxt->mode) {
671 case X86EMUL_MODE_PROT64:
672 if (is_noncanonical_address(la))
673 return emulate_gp(ctxt, 0);
675 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
676 if (size > *max_size)
680 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
684 /* code segment in protected mode or read-only data segment */
685 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
686 || !(desc.type & 2)) && write)
688 /* unreadable code segment */
689 if (!fetch && (desc.type & 8) && !(desc.type & 2))
691 lim = desc_limit_scaled(&desc);
692 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
693 (ctxt->d & NoBigReal)) {
694 /* la is between zero and 0xffff */
697 *max_size = 0x10000 - la;
698 } else if ((desc.type & 8) || !(desc.type & 4)) {
699 /* expand-up segment */
702 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
704 /* expand-down segment */
707 lim = desc.d ? 0xffffffff : 0xffff;
710 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
712 if (size > *max_size)
714 cpl = ctxt->ops->cpl(ctxt);
716 /* data segment or readable code segment */
719 } else if ((desc.type & 8) && !(desc.type & 4)) {
720 /* nonconforming code segment */
723 } else if ((desc.type & 8) && (desc.type & 4)) {
724 /* conforming code segment */
730 if (ctxt->mode != X86EMUL_MODE_PROT64)
732 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
733 return emulate_gp(ctxt, 0);
735 return X86EMUL_CONTINUE;
737 if (addr.seg == VCPU_SREG_SS)
738 return emulate_ss(ctxt, 0);
740 return emulate_gp(ctxt, 0);
743 static int linearize(struct x86_emulate_ctxt *ctxt,
744 struct segmented_address addr,
745 unsigned size, bool write,
749 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
753 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
761 rc = linearize(ctxt, addr, size, false, &linear);
762 if (rc != X86EMUL_CONTINUE)
764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
768 * Prefetch the remaining bytes of the instruction without crossing page
769 * boundary if they are not in fetch_cache yet.
771 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
774 unsigned size, max_size;
775 unsigned long linear;
776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
778 .ea = ctxt->eip + cur_size };
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
791 if (unlikely(rc != X86EMUL_CONTINUE))
794 size = min_t(unsigned, 15UL ^ cur_size, max_size);
795 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
798 * One instruction can only straddle two pages,
799 * and one has been loaded at the beginning of
800 * x86_decode_insn. So, if not enough bytes
801 * still, we must have hit the 15-byte boundary.
803 if (unlikely(size < op_size))
804 return emulate_gp(ctxt, 0);
806 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
807 size, &ctxt->exception);
808 if (unlikely(rc != X86EMUL_CONTINUE))
810 ctxt->fetch.end += size;
811 return X86EMUL_CONTINUE;
814 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
817 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
819 if (unlikely(done_size < size))
820 return __do_insn_fetch_bytes(ctxt, size - done_size);
822 return X86EMUL_CONTINUE;
825 /* Fetch next part of the instruction being emulated. */
826 #define insn_fetch(_type, _ctxt) \
829 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
830 if (rc != X86EMUL_CONTINUE) \
832 ctxt->_eip += sizeof(_type); \
833 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
834 ctxt->fetch.ptr += sizeof(_type); \
838 #define insn_fetch_arr(_arr, _size, _ctxt) \
840 rc = do_insn_fetch_bytes(_ctxt, _size); \
841 if (rc != X86EMUL_CONTINUE) \
843 ctxt->_eip += (_size); \
844 memcpy(_arr, ctxt->fetch.ptr, _size); \
845 ctxt->fetch.ptr += (_size); \
849 * Given the 'reg' portion of a ModRM byte, and a register block, return a
850 * pointer into the block that addresses the relevant register.
851 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
853 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
857 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
859 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
860 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
862 p = reg_rmw(ctxt, modrm_reg);
866 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
867 struct segmented_address addr,
868 u16 *size, unsigned long *address, int op_bytes)
875 rc = segmented_read_std(ctxt, addr, size, 2);
876 if (rc != X86EMUL_CONTINUE)
879 rc = segmented_read_std(ctxt, addr, address, op_bytes);
893 FASTOP1SRC2(mul, mul_ex);
894 FASTOP1SRC2(imul, imul_ex);
895 FASTOP1SRC2EX(div, div_ex);
896 FASTOP1SRC2EX(idiv, idiv_ex);
925 FASTOP2R(cmp, cmp_r);
927 static u8 test_cc(unsigned int condition, unsigned long flags)
930 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
932 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
933 asm("push %[flags]; popf; call *%[fastop]"
934 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
938 static void fetch_register_operand(struct operand *op)
942 op->val = *(u8 *)op->addr.reg;
945 op->val = *(u16 *)op->addr.reg;
948 op->val = *(u32 *)op->addr.reg;
951 op->val = *(u64 *)op->addr.reg;
956 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
958 ctxt->ops->get_fpu(ctxt);
960 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
961 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
962 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
963 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
964 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
965 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
966 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
967 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
969 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
970 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
971 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
972 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
973 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
974 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
975 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
976 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
980 ctxt->ops->put_fpu(ctxt);
983 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
986 ctxt->ops->get_fpu(ctxt);
988 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
989 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
990 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
991 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
992 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
993 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
994 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
995 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
997 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
998 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
999 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1000 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1001 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1002 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1003 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1004 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1008 ctxt->ops->put_fpu(ctxt);
1011 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1013 ctxt->ops->get_fpu(ctxt);
1015 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1016 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1017 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1018 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1019 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1020 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1021 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1022 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1025 ctxt->ops->put_fpu(ctxt);
1028 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1030 ctxt->ops->get_fpu(ctxt);
1032 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1033 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1034 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1035 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1036 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1037 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1038 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1039 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1042 ctxt->ops->put_fpu(ctxt);
1045 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fninit");
1052 ctxt->ops->put_fpu(ctxt);
1053 return X86EMUL_CONTINUE;
1056 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1060 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1061 return emulate_nm(ctxt);
1063 ctxt->ops->get_fpu(ctxt);
1064 asm volatile("fnstcw %0": "+m"(fcw));
1065 ctxt->ops->put_fpu(ctxt);
1067 /* force 2 byte destination */
1068 ctxt->dst.bytes = 2;
1069 ctxt->dst.val = fcw;
1071 return X86EMUL_CONTINUE;
1074 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1081 ctxt->ops->get_fpu(ctxt);
1082 asm volatile("fnstsw %0": "+m"(fsw));
1083 ctxt->ops->put_fpu(ctxt);
1085 /* force 2 byte destination */
1086 ctxt->dst.bytes = 2;
1087 ctxt->dst.val = fsw;
1089 return X86EMUL_CONTINUE;
1092 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1095 unsigned reg = ctxt->modrm_reg;
1097 if (!(ctxt->d & ModRM))
1098 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1100 if (ctxt->d & Sse) {
1104 read_sse_reg(ctxt, &op->vec_val, reg);
1107 if (ctxt->d & Mmx) {
1116 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1117 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1119 fetch_register_operand(op);
1120 op->orig_val = op->val;
1123 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1125 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1126 ctxt->modrm_seg = VCPU_SREG_SS;
1129 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1133 int index_reg, base_reg, scale;
1134 int rc = X86EMUL_CONTINUE;
1137 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1138 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1139 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1141 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1142 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1143 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1144 ctxt->modrm_seg = VCPU_SREG_DS;
1146 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1148 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1149 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1151 if (ctxt->d & Sse) {
1154 op->addr.xmm = ctxt->modrm_rm;
1155 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1158 if (ctxt->d & Mmx) {
1161 op->addr.mm = ctxt->modrm_rm & 7;
1164 fetch_register_operand(op);
1170 if (ctxt->ad_bytes == 2) {
1171 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1172 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1173 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1174 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1176 /* 16-bit ModR/M decode. */
1177 switch (ctxt->modrm_mod) {
1179 if (ctxt->modrm_rm == 6)
1180 modrm_ea += insn_fetch(u16, ctxt);
1183 modrm_ea += insn_fetch(s8, ctxt);
1186 modrm_ea += insn_fetch(u16, ctxt);
1189 switch (ctxt->modrm_rm) {
1191 modrm_ea += bx + si;
1194 modrm_ea += bx + di;
1197 modrm_ea += bp + si;
1200 modrm_ea += bp + di;
1209 if (ctxt->modrm_mod != 0)
1216 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1217 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1218 ctxt->modrm_seg = VCPU_SREG_SS;
1219 modrm_ea = (u16)modrm_ea;
1221 /* 32/64-bit ModR/M decode. */
1222 if ((ctxt->modrm_rm & 7) == 4) {
1223 sib = insn_fetch(u8, ctxt);
1224 index_reg |= (sib >> 3) & 7;
1225 base_reg |= sib & 7;
1228 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1229 modrm_ea += insn_fetch(s32, ctxt);
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1235 modrm_ea += reg_read(ctxt, index_reg) << scale;
1236 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1237 modrm_ea += insn_fetch(s32, ctxt);
1238 if (ctxt->mode == X86EMUL_MODE_PROT64)
1239 ctxt->rip_relative = 1;
1241 base_reg = ctxt->modrm_rm;
1242 modrm_ea += reg_read(ctxt, base_reg);
1243 adjust_modrm_seg(ctxt, base_reg);
1245 switch (ctxt->modrm_mod) {
1247 modrm_ea += insn_fetch(s8, ctxt);
1250 modrm_ea += insn_fetch(s32, ctxt);
1254 op->addr.mem.ea = modrm_ea;
1255 if (ctxt->ad_bytes != 8)
1256 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1262 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1265 int rc = X86EMUL_CONTINUE;
1268 switch (ctxt->ad_bytes) {
1270 op->addr.mem.ea = insn_fetch(u16, ctxt);
1273 op->addr.mem.ea = insn_fetch(u32, ctxt);
1276 op->addr.mem.ea = insn_fetch(u64, ctxt);
1283 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1287 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1288 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1290 if (ctxt->src.bytes == 2)
1291 sv = (s16)ctxt->src.val & (s16)mask;
1292 else if (ctxt->src.bytes == 4)
1293 sv = (s32)ctxt->src.val & (s32)mask;
1295 sv = (s64)ctxt->src.val & (s64)mask;
1297 ctxt->dst.addr.mem.ea += (sv >> 3);
1300 /* only subword offset */
1301 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1304 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1305 unsigned long addr, void *dest, unsigned size)
1308 struct read_cache *mc = &ctxt->mem_read;
1310 if (mc->pos < mc->end)
1313 WARN_ON((mc->end + size) >= sizeof(mc->data));
1315 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1317 if (rc != X86EMUL_CONTINUE)
1323 memcpy(dest, mc->data + mc->pos, size);
1325 return X86EMUL_CONTINUE;
1328 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1329 struct segmented_address addr,
1336 rc = linearize(ctxt, addr, size, false, &linear);
1337 if (rc != X86EMUL_CONTINUE)
1339 return read_emulated(ctxt, linear, data, size);
1342 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1343 struct segmented_address addr,
1350 rc = linearize(ctxt, addr, size, true, &linear);
1351 if (rc != X86EMUL_CONTINUE)
1353 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1357 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1358 struct segmented_address addr,
1359 const void *orig_data, const void *data,
1365 rc = linearize(ctxt, addr, size, true, &linear);
1366 if (rc != X86EMUL_CONTINUE)
1368 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1369 size, &ctxt->exception);
1372 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1373 unsigned int size, unsigned short port,
1376 struct read_cache *rc = &ctxt->io_read;
1378 if (rc->pos == rc->end) { /* refill pio read ahead */
1379 unsigned int in_page, n;
1380 unsigned int count = ctxt->rep_prefix ?
1381 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1382 in_page = (ctxt->eflags & EFLG_DF) ?
1383 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1384 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1385 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1388 rc->pos = rc->end = 0;
1389 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1394 if (ctxt->rep_prefix && (ctxt->d & String) &&
1395 !(ctxt->eflags & EFLG_DF)) {
1396 ctxt->dst.data = rc->data + rc->pos;
1397 ctxt->dst.type = OP_MEM_STR;
1398 ctxt->dst.count = (rc->end - rc->pos) / size;
1401 memcpy(dest, rc->data + rc->pos, size);
1407 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1408 u16 index, struct desc_struct *desc)
1413 ctxt->ops->get_idt(ctxt, &dt);
1415 if (dt.size < index * 8 + 7)
1416 return emulate_gp(ctxt, index << 3 | 0x2);
1418 addr = dt.address + index * 8;
1419 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1423 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1424 u16 selector, struct desc_ptr *dt)
1426 const struct x86_emulate_ops *ops = ctxt->ops;
1429 if (selector & 1 << 2) {
1430 struct desc_struct desc;
1433 memset (dt, 0, sizeof *dt);
1434 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1438 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1439 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1441 ops->get_gdt(ctxt, dt);
1444 /* allowed just for 8 bytes segments */
1445 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1446 u16 selector, struct desc_struct *desc,
1450 u16 index = selector >> 3;
1453 get_descriptor_table_ptr(ctxt, selector, &dt);
1455 if (dt.size < index * 8 + 7)
1456 return emulate_gp(ctxt, selector & 0xfffc);
1458 *desc_addr_p = addr = dt.address + index * 8;
1459 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1463 /* allowed just for 8 bytes segments */
1464 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1465 u16 selector, struct desc_struct *desc)
1468 u16 index = selector >> 3;
1471 get_descriptor_table_ptr(ctxt, selector, &dt);
1473 if (dt.size < index * 8 + 7)
1474 return emulate_gp(ctxt, selector & 0xfffc);
1476 addr = dt.address + index * 8;
1477 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1481 /* Does not support long mode */
1482 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1483 u16 selector, int seg, u8 cpl,
1484 bool in_task_switch,
1485 struct desc_struct *desc)
1487 struct desc_struct seg_desc, old_desc;
1489 unsigned err_vec = GP_VECTOR;
1491 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1497 memset(&seg_desc, 0, sizeof seg_desc);
1499 if (ctxt->mode == X86EMUL_MODE_REAL) {
1500 /* set real mode segment descriptor (keep limit etc. for
1502 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1503 set_desc_base(&seg_desc, selector << 4);
1505 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1506 /* VM86 needs a clean new segment descriptor */
1507 set_desc_base(&seg_desc, selector << 4);
1508 set_desc_limit(&seg_desc, 0xffff);
1518 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1519 if ((seg == VCPU_SREG_CS
1520 || (seg == VCPU_SREG_SS
1521 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1522 || seg == VCPU_SREG_TR)
1526 /* TR should be in GDT only */
1527 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1530 if (null_selector) /* for NULL selector skip all following checks */
1533 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1534 if (ret != X86EMUL_CONTINUE)
1537 err_code = selector & 0xfffc;
1538 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1540 /* can't load system descriptor into segment selector */
1541 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1545 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1554 * segment is not a writable data segment or segment
1555 * selector's RPL != CPL or segment selector's RPL != CPL
1557 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1561 if (!(seg_desc.type & 8))
1564 if (seg_desc.type & 4) {
1570 if (rpl > cpl || dpl != cpl)
1573 /* in long-mode d/b must be clear if l is set */
1574 if (seg_desc.d && seg_desc.l) {
1577 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1578 if (efer & EFER_LMA)
1582 /* CS(RPL) <- CPL */
1583 selector = (selector & 0xfffc) | cpl;
1586 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1588 old_desc = seg_desc;
1589 seg_desc.type |= 2; /* busy */
1590 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1591 sizeof(seg_desc), &ctxt->exception);
1592 if (ret != X86EMUL_CONTINUE)
1595 case VCPU_SREG_LDTR:
1596 if (seg_desc.s || seg_desc.type != 2)
1599 default: /* DS, ES, FS, or GS */
1601 * segment is not a data or readable code segment or
1602 * ((segment is a data or nonconforming code segment)
1603 * and (both RPL and CPL > DPL))
1605 if ((seg_desc.type & 0xa) == 0x8 ||
1606 (((seg_desc.type & 0xc) != 0xc) &&
1607 (rpl > dpl && cpl > dpl)))
1613 /* mark segment as accessed */
1615 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1616 if (ret != X86EMUL_CONTINUE)
1618 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1619 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1620 sizeof(base3), &ctxt->exception);
1621 if (ret != X86EMUL_CONTINUE)
1623 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1624 ((u64)base3 << 32)))
1625 return emulate_gp(ctxt, 0);
1628 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1631 return X86EMUL_CONTINUE;
1633 return emulate_exception(ctxt, err_vec, err_code, true);
1636 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1637 u16 selector, int seg)
1639 u8 cpl = ctxt->ops->cpl(ctxt);
1640 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1643 static void write_register_operand(struct operand *op)
1645 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1646 switch (op->bytes) {
1648 *(u8 *)op->addr.reg = (u8)op->val;
1651 *(u16 *)op->addr.reg = (u16)op->val;
1654 *op->addr.reg = (u32)op->val;
1655 break; /* 64b: zero-extend */
1657 *op->addr.reg = op->val;
1662 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1666 write_register_operand(op);
1669 if (ctxt->lock_prefix)
1670 return segmented_cmpxchg(ctxt,
1676 return segmented_write(ctxt,
1682 return segmented_write(ctxt,
1685 op->bytes * op->count);
1688 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1691 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1699 return X86EMUL_CONTINUE;
1702 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1704 struct segmented_address addr;
1706 rsp_increment(ctxt, -bytes);
1707 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1708 addr.seg = VCPU_SREG_SS;
1710 return segmented_write(ctxt, addr, data, bytes);
1713 static int em_push(struct x86_emulate_ctxt *ctxt)
1715 /* Disable writeback. */
1716 ctxt->dst.type = OP_NONE;
1717 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1720 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1721 void *dest, int len)
1724 struct segmented_address addr;
1726 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1727 addr.seg = VCPU_SREG_SS;
1728 rc = segmented_read(ctxt, addr, dest, len);
1729 if (rc != X86EMUL_CONTINUE)
1732 rsp_increment(ctxt, len);
1736 static int em_pop(struct x86_emulate_ctxt *ctxt)
1738 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1741 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1742 void *dest, int len)
1745 unsigned long val, change_mask;
1746 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1747 int cpl = ctxt->ops->cpl(ctxt);
1749 rc = emulate_pop(ctxt, &val, len);
1750 if (rc != X86EMUL_CONTINUE)
1753 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1754 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1756 switch(ctxt->mode) {
1757 case X86EMUL_MODE_PROT64:
1758 case X86EMUL_MODE_PROT32:
1759 case X86EMUL_MODE_PROT16:
1761 change_mask |= EFLG_IOPL;
1763 change_mask |= EFLG_IF;
1765 case X86EMUL_MODE_VM86:
1767 return emulate_gp(ctxt, 0);
1768 change_mask |= EFLG_IF;
1770 default: /* real mode */
1771 change_mask |= (EFLG_IOPL | EFLG_IF);
1775 *(unsigned long *)dest =
1776 (ctxt->eflags & ~change_mask) | (val & change_mask);
1781 static int em_popf(struct x86_emulate_ctxt *ctxt)
1783 ctxt->dst.type = OP_REG;
1784 ctxt->dst.addr.reg = &ctxt->eflags;
1785 ctxt->dst.bytes = ctxt->op_bytes;
1786 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1789 static int em_enter(struct x86_emulate_ctxt *ctxt)
1792 unsigned frame_size = ctxt->src.val;
1793 unsigned nesting_level = ctxt->src2.val & 31;
1797 return X86EMUL_UNHANDLEABLE;
1799 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1800 rc = push(ctxt, &rbp, stack_size(ctxt));
1801 if (rc != X86EMUL_CONTINUE)
1803 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1805 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1806 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1808 return X86EMUL_CONTINUE;
1811 static int em_leave(struct x86_emulate_ctxt *ctxt)
1813 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1815 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1818 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1820 int seg = ctxt->src2.val;
1822 ctxt->src.val = get_segment_selector(ctxt, seg);
1823 if (ctxt->op_bytes == 4) {
1824 rsp_increment(ctxt, -2);
1828 return em_push(ctxt);
1831 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1833 int seg = ctxt->src2.val;
1834 unsigned long selector;
1837 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1838 if (rc != X86EMUL_CONTINUE)
1841 if (ctxt->modrm_reg == VCPU_SREG_SS)
1842 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1844 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1848 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1850 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1851 int rc = X86EMUL_CONTINUE;
1852 int reg = VCPU_REGS_RAX;
1854 while (reg <= VCPU_REGS_RDI) {
1855 (reg == VCPU_REGS_RSP) ?
1856 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1859 if (rc != X86EMUL_CONTINUE)
1868 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1870 ctxt->src.val = (unsigned long)ctxt->eflags;
1871 return em_push(ctxt);
1874 static int em_popa(struct x86_emulate_ctxt *ctxt)
1876 int rc = X86EMUL_CONTINUE;
1877 int reg = VCPU_REGS_RDI;
1879 while (reg >= VCPU_REGS_RAX) {
1880 if (reg == VCPU_REGS_RSP) {
1881 rsp_increment(ctxt, ctxt->op_bytes);
1885 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1886 if (rc != X86EMUL_CONTINUE)
1893 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1895 const struct x86_emulate_ops *ops = ctxt->ops;
1902 /* TODO: Add limit checks */
1903 ctxt->src.val = ctxt->eflags;
1905 if (rc != X86EMUL_CONTINUE)
1908 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1910 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1912 if (rc != X86EMUL_CONTINUE)
1915 ctxt->src.val = ctxt->_eip;
1917 if (rc != X86EMUL_CONTINUE)
1920 ops->get_idt(ctxt, &dt);
1922 eip_addr = dt.address + (irq << 2);
1923 cs_addr = dt.address + (irq << 2) + 2;
1925 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1926 if (rc != X86EMUL_CONTINUE)
1929 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1930 if (rc != X86EMUL_CONTINUE)
1933 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1934 if (rc != X86EMUL_CONTINUE)
1942 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1946 invalidate_registers(ctxt);
1947 rc = __emulate_int_real(ctxt, irq);
1948 if (rc == X86EMUL_CONTINUE)
1949 writeback_registers(ctxt);
1953 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1955 switch(ctxt->mode) {
1956 case X86EMUL_MODE_REAL:
1957 return __emulate_int_real(ctxt, irq);
1958 case X86EMUL_MODE_VM86:
1959 case X86EMUL_MODE_PROT16:
1960 case X86EMUL_MODE_PROT32:
1961 case X86EMUL_MODE_PROT64:
1963 /* Protected mode interrupts unimplemented yet */
1964 return X86EMUL_UNHANDLEABLE;
1968 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1970 int rc = X86EMUL_CONTINUE;
1971 unsigned long temp_eip = 0;
1972 unsigned long temp_eflags = 0;
1973 unsigned long cs = 0;
1974 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1975 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1976 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1977 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1979 /* TODO: Add stack limit check */
1981 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1983 if (rc != X86EMUL_CONTINUE)
1986 if (temp_eip & ~0xffff)
1987 return emulate_gp(ctxt, 0);
1989 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1991 if (rc != X86EMUL_CONTINUE)
1994 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1996 if (rc != X86EMUL_CONTINUE)
1999 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2001 if (rc != X86EMUL_CONTINUE)
2004 ctxt->_eip = temp_eip;
2007 if (ctxt->op_bytes == 4)
2008 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2009 else if (ctxt->op_bytes == 2) {
2010 ctxt->eflags &= ~0xffff;
2011 ctxt->eflags |= temp_eflags;
2014 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2015 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2020 static int em_iret(struct x86_emulate_ctxt *ctxt)
2022 switch(ctxt->mode) {
2023 case X86EMUL_MODE_REAL:
2024 return emulate_iret_real(ctxt);
2025 case X86EMUL_MODE_VM86:
2026 case X86EMUL_MODE_PROT16:
2027 case X86EMUL_MODE_PROT32:
2028 case X86EMUL_MODE_PROT64:
2030 /* iret from protected mode unimplemented yet */
2031 return X86EMUL_UNHANDLEABLE;
2035 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2038 unsigned short sel, old_sel;
2039 struct desc_struct old_desc, new_desc;
2040 const struct x86_emulate_ops *ops = ctxt->ops;
2041 u8 cpl = ctxt->ops->cpl(ctxt);
2043 /* Assignment of RIP may only fail in 64-bit mode */
2044 if (ctxt->mode == X86EMUL_MODE_PROT64)
2045 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2048 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2050 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2052 if (rc != X86EMUL_CONTINUE)
2055 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2056 if (rc != X86EMUL_CONTINUE) {
2057 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2058 /* assigning eip failed; restore the old cs */
2059 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2065 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2067 return assign_eip_near(ctxt, ctxt->src.val);
2070 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2075 old_eip = ctxt->_eip;
2076 rc = assign_eip_near(ctxt, ctxt->src.val);
2077 if (rc != X86EMUL_CONTINUE)
2079 ctxt->src.val = old_eip;
2084 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2086 u64 old = ctxt->dst.orig_val64;
2088 if (ctxt->dst.bytes == 16)
2089 return X86EMUL_UNHANDLEABLE;
2091 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2092 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2093 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2094 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2095 ctxt->eflags &= ~EFLG_ZF;
2097 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2098 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2100 ctxt->eflags |= EFLG_ZF;
2102 return X86EMUL_CONTINUE;
2105 static int em_ret(struct x86_emulate_ctxt *ctxt)
2110 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2111 if (rc != X86EMUL_CONTINUE)
2114 return assign_eip_near(ctxt, eip);
2117 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2120 unsigned long eip, cs;
2122 int cpl = ctxt->ops->cpl(ctxt);
2123 struct desc_struct old_desc, new_desc;
2124 const struct x86_emulate_ops *ops = ctxt->ops;
2126 if (ctxt->mode == X86EMUL_MODE_PROT64)
2127 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2130 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2131 if (rc != X86EMUL_CONTINUE)
2133 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2134 if (rc != X86EMUL_CONTINUE)
2136 /* Outer-privilege level return is not implemented */
2137 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2138 return X86EMUL_UNHANDLEABLE;
2139 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2141 if (rc != X86EMUL_CONTINUE)
2143 rc = assign_eip_far(ctxt, eip, new_desc.l);
2144 if (rc != X86EMUL_CONTINUE) {
2145 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2146 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2151 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2155 rc = em_ret_far(ctxt);
2156 if (rc != X86EMUL_CONTINUE)
2158 rsp_increment(ctxt, ctxt->src.val);
2159 return X86EMUL_CONTINUE;
2162 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2164 /* Save real source value, then compare EAX against destination. */
2165 ctxt->dst.orig_val = ctxt->dst.val;
2166 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2167 ctxt->src.orig_val = ctxt->src.val;
2168 ctxt->src.val = ctxt->dst.orig_val;
2169 fastop(ctxt, em_cmp);
2171 if (ctxt->eflags & EFLG_ZF) {
2172 /* Success: write back to memory. */
2173 ctxt->dst.val = ctxt->src.orig_val;
2175 /* Failure: write the value we saw to EAX. */
2176 ctxt->dst.type = OP_REG;
2177 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2178 ctxt->dst.val = ctxt->dst.orig_val;
2180 return X86EMUL_CONTINUE;
2183 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2185 int seg = ctxt->src2.val;
2189 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2191 rc = load_segment_descriptor(ctxt, sel, seg);
2192 if (rc != X86EMUL_CONTINUE)
2195 ctxt->dst.val = ctxt->src.val;
2200 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2201 struct desc_struct *cs, struct desc_struct *ss)
2203 cs->l = 0; /* will be adjusted later */
2204 set_desc_base(cs, 0); /* flat segment */
2205 cs->g = 1; /* 4kb granularity */
2206 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2207 cs->type = 0x0b; /* Read, Execute, Accessed */
2209 cs->dpl = 0; /* will be adjusted later */
2214 set_desc_base(ss, 0); /* flat segment */
2215 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2216 ss->g = 1; /* 4kb granularity */
2218 ss->type = 0x03; /* Read/Write, Accessed */
2219 ss->d = 1; /* 32bit stack segment */
2226 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2228 u32 eax, ebx, ecx, edx;
2231 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2232 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2233 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2234 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2237 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2239 const struct x86_emulate_ops *ops = ctxt->ops;
2240 u32 eax, ebx, ecx, edx;
2243 * syscall should always be enabled in longmode - so only become
2244 * vendor specific (cpuid) if other modes are active...
2246 if (ctxt->mode == X86EMUL_MODE_PROT64)
2251 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2253 * Intel ("GenuineIntel")
2254 * remark: Intel CPUs only support "syscall" in 64bit
2255 * longmode. Also an 64bit guest with a
2256 * 32bit compat-app running will #UD !! While this
2257 * behaviour can be fixed (by emulating) into AMD
2258 * response - CPUs of AMD can't behave like Intel.
2260 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2261 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2262 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2265 /* AMD ("AuthenticAMD") */
2266 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2267 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2268 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2271 /* AMD ("AMDisbetter!") */
2272 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2273 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2274 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2277 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2281 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2283 const struct x86_emulate_ops *ops = ctxt->ops;
2284 struct desc_struct cs, ss;
2289 /* syscall is not available in real mode */
2290 if (ctxt->mode == X86EMUL_MODE_REAL ||
2291 ctxt->mode == X86EMUL_MODE_VM86)
2292 return emulate_ud(ctxt);
2294 if (!(em_syscall_is_enabled(ctxt)))
2295 return emulate_ud(ctxt);
2297 ops->get_msr(ctxt, MSR_EFER, &efer);
2298 setup_syscalls_segments(ctxt, &cs, &ss);
2300 if (!(efer & EFER_SCE))
2301 return emulate_ud(ctxt);
2303 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2305 cs_sel = (u16)(msr_data & 0xfffc);
2306 ss_sel = (u16)(msr_data + 8);
2308 if (efer & EFER_LMA) {
2312 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2313 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2315 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2316 if (efer & EFER_LMA) {
2317 #ifdef CONFIG_X86_64
2318 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2321 ctxt->mode == X86EMUL_MODE_PROT64 ?
2322 MSR_LSTAR : MSR_CSTAR, &msr_data);
2323 ctxt->_eip = msr_data;
2325 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2326 ctxt->eflags &= ~msr_data;
2327 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2331 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2332 ctxt->_eip = (u32)msr_data;
2334 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2337 return X86EMUL_CONTINUE;
2340 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2342 const struct x86_emulate_ops *ops = ctxt->ops;
2343 struct desc_struct cs, ss;
2348 ops->get_msr(ctxt, MSR_EFER, &efer);
2349 /* inject #GP if in real mode */
2350 if (ctxt->mode == X86EMUL_MODE_REAL)
2351 return emulate_gp(ctxt, 0);
2354 * Not recognized on AMD in compat mode (but is recognized in legacy
2357 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2358 && !vendor_intel(ctxt))
2359 return emulate_ud(ctxt);
2361 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2362 * Therefore, we inject an #UD.
2364 if (ctxt->mode == X86EMUL_MODE_PROT64)
2365 return emulate_ud(ctxt);
2367 setup_syscalls_segments(ctxt, &cs, &ss);
2369 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2370 switch (ctxt->mode) {
2371 case X86EMUL_MODE_PROT32:
2372 if ((msr_data & 0xfffc) == 0x0)
2373 return emulate_gp(ctxt, 0);
2375 case X86EMUL_MODE_PROT64:
2376 if (msr_data == 0x0)
2377 return emulate_gp(ctxt, 0);
2383 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2384 cs_sel = (u16)msr_data;
2385 cs_sel &= ~SELECTOR_RPL_MASK;
2386 ss_sel = cs_sel + 8;
2387 ss_sel &= ~SELECTOR_RPL_MASK;
2388 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2393 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2394 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2396 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2397 ctxt->_eip = msr_data;
2399 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2400 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2402 return X86EMUL_CONTINUE;
2405 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2407 const struct x86_emulate_ops *ops = ctxt->ops;
2408 struct desc_struct cs, ss;
2409 u64 msr_data, rcx, rdx;
2411 u16 cs_sel = 0, ss_sel = 0;
2413 /* inject #GP if in real mode or Virtual 8086 mode */
2414 if (ctxt->mode == X86EMUL_MODE_REAL ||
2415 ctxt->mode == X86EMUL_MODE_VM86)
2416 return emulate_gp(ctxt, 0);
2418 setup_syscalls_segments(ctxt, &cs, &ss);
2420 if ((ctxt->rex_prefix & 0x8) != 0x0)
2421 usermode = X86EMUL_MODE_PROT64;
2423 usermode = X86EMUL_MODE_PROT32;
2425 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2426 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2430 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2432 case X86EMUL_MODE_PROT32:
2433 cs_sel = (u16)(msr_data + 16);
2434 if ((msr_data & 0xfffc) == 0x0)
2435 return emulate_gp(ctxt, 0);
2436 ss_sel = (u16)(msr_data + 24);
2440 case X86EMUL_MODE_PROT64:
2441 cs_sel = (u16)(msr_data + 32);
2442 if (msr_data == 0x0)
2443 return emulate_gp(ctxt, 0);
2444 ss_sel = cs_sel + 8;
2447 if (is_noncanonical_address(rcx) ||
2448 is_noncanonical_address(rdx))
2449 return emulate_gp(ctxt, 0);
2452 cs_sel |= SELECTOR_RPL_MASK;
2453 ss_sel |= SELECTOR_RPL_MASK;
2455 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2456 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2459 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2461 return X86EMUL_CONTINUE;
2464 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2467 if (ctxt->mode == X86EMUL_MODE_REAL)
2469 if (ctxt->mode == X86EMUL_MODE_VM86)
2471 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2472 return ctxt->ops->cpl(ctxt) > iopl;
2475 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2478 const struct x86_emulate_ops *ops = ctxt->ops;
2479 struct desc_struct tr_seg;
2482 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2483 unsigned mask = (1 << len) - 1;
2486 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2489 if (desc_limit_scaled(&tr_seg) < 103)
2491 base = get_desc_base(&tr_seg);
2492 #ifdef CONFIG_X86_64
2493 base |= ((u64)base3) << 32;
2495 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2496 if (r != X86EMUL_CONTINUE)
2498 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2500 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2501 if (r != X86EMUL_CONTINUE)
2503 if ((perm >> bit_idx) & mask)
2508 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2514 if (emulator_bad_iopl(ctxt))
2515 if (!emulator_io_port_access_allowed(ctxt, port, len))
2518 ctxt->perm_ok = true;
2523 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2524 struct tss_segment_16 *tss)
2526 tss->ip = ctxt->_eip;
2527 tss->flag = ctxt->eflags;
2528 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2529 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2530 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2531 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2532 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2533 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2534 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2535 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2537 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2538 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2539 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2540 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2541 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2544 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2545 struct tss_segment_16 *tss)
2550 ctxt->_eip = tss->ip;
2551 ctxt->eflags = tss->flag | 2;
2552 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2553 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2554 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2555 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2556 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2557 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2558 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2559 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2562 * SDM says that segment selectors are loaded before segment
2565 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2566 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2567 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2568 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2569 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2574 * Now load segment descriptors. If fault happens at this stage
2575 * it is handled in a context of new task
2577 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2579 if (ret != X86EMUL_CONTINUE)
2581 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2583 if (ret != X86EMUL_CONTINUE)
2585 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2587 if (ret != X86EMUL_CONTINUE)
2589 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2591 if (ret != X86EMUL_CONTINUE)
2593 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2595 if (ret != X86EMUL_CONTINUE)
2598 return X86EMUL_CONTINUE;
2601 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2602 u16 tss_selector, u16 old_tss_sel,
2603 ulong old_tss_base, struct desc_struct *new_desc)
2605 const struct x86_emulate_ops *ops = ctxt->ops;
2606 struct tss_segment_16 tss_seg;
2608 u32 new_tss_base = get_desc_base(new_desc);
2610 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2612 if (ret != X86EMUL_CONTINUE)
2613 /* FIXME: need to provide precise fault address */
2616 save_state_to_tss16(ctxt, &tss_seg);
2618 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2620 if (ret != X86EMUL_CONTINUE)
2621 /* FIXME: need to provide precise fault address */
2624 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2626 if (ret != X86EMUL_CONTINUE)
2627 /* FIXME: need to provide precise fault address */
2630 if (old_tss_sel != 0xffff) {
2631 tss_seg.prev_task_link = old_tss_sel;
2633 ret = ops->write_std(ctxt, new_tss_base,
2634 &tss_seg.prev_task_link,
2635 sizeof tss_seg.prev_task_link,
2637 if (ret != X86EMUL_CONTINUE)
2638 /* FIXME: need to provide precise fault address */
2642 return load_state_from_tss16(ctxt, &tss_seg);
2645 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2646 struct tss_segment_32 *tss)
2648 /* CR3 and ldt selector are not saved intentionally */
2649 tss->eip = ctxt->_eip;
2650 tss->eflags = ctxt->eflags;
2651 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2652 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2653 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2654 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2655 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2656 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2657 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2658 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2660 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2661 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2662 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2663 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2664 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2665 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2668 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2669 struct tss_segment_32 *tss)
2674 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2675 return emulate_gp(ctxt, 0);
2676 ctxt->_eip = tss->eip;
2677 ctxt->eflags = tss->eflags | 2;
2679 /* General purpose registers */
2680 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2681 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2682 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2683 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2684 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2685 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2686 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2687 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2690 * SDM says that segment selectors are loaded before segment
2691 * descriptors. This is important because CPL checks will
2694 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2695 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2696 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2697 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2698 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2699 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2700 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2703 * If we're switching between Protected Mode and VM86, we need to make
2704 * sure to update the mode before loading the segment descriptors so
2705 * that the selectors are interpreted correctly.
2707 if (ctxt->eflags & X86_EFLAGS_VM) {
2708 ctxt->mode = X86EMUL_MODE_VM86;
2711 ctxt->mode = X86EMUL_MODE_PROT32;
2716 * Now load segment descriptors. If fault happenes at this stage
2717 * it is handled in a context of new task
2719 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2721 if (ret != X86EMUL_CONTINUE)
2723 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2725 if (ret != X86EMUL_CONTINUE)
2727 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2729 if (ret != X86EMUL_CONTINUE)
2731 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2733 if (ret != X86EMUL_CONTINUE)
2735 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2737 if (ret != X86EMUL_CONTINUE)
2739 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2741 if (ret != X86EMUL_CONTINUE)
2743 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2745 if (ret != X86EMUL_CONTINUE)
2748 return X86EMUL_CONTINUE;
2751 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2752 u16 tss_selector, u16 old_tss_sel,
2753 ulong old_tss_base, struct desc_struct *new_desc)
2755 const struct x86_emulate_ops *ops = ctxt->ops;
2756 struct tss_segment_32 tss_seg;
2758 u32 new_tss_base = get_desc_base(new_desc);
2759 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2760 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2762 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2764 if (ret != X86EMUL_CONTINUE)
2765 /* FIXME: need to provide precise fault address */
2768 save_state_to_tss32(ctxt, &tss_seg);
2770 /* Only GP registers and segment selectors are saved */
2771 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2772 ldt_sel_offset - eip_offset, &ctxt->exception);
2773 if (ret != X86EMUL_CONTINUE)
2774 /* FIXME: need to provide precise fault address */
2777 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2779 if (ret != X86EMUL_CONTINUE)
2780 /* FIXME: need to provide precise fault address */
2783 if (old_tss_sel != 0xffff) {
2784 tss_seg.prev_task_link = old_tss_sel;
2786 ret = ops->write_std(ctxt, new_tss_base,
2787 &tss_seg.prev_task_link,
2788 sizeof tss_seg.prev_task_link,
2790 if (ret != X86EMUL_CONTINUE)
2791 /* FIXME: need to provide precise fault address */
2795 return load_state_from_tss32(ctxt, &tss_seg);
2798 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2799 u16 tss_selector, int idt_index, int reason,
2800 bool has_error_code, u32 error_code)
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct curr_tss_desc, next_tss_desc;
2805 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2806 ulong old_tss_base =
2807 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2811 /* FIXME: old_tss_base == ~0 ? */
2813 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2814 if (ret != X86EMUL_CONTINUE)
2816 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2817 if (ret != X86EMUL_CONTINUE)
2820 /* FIXME: check that next_tss_desc is tss */
2823 * Check privileges. The three cases are task switch caused by...
2825 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2826 * 2. Exception/IRQ/iret: No check is performed
2827 * 3. jmp/call to TSS: Check against DPL of the TSS
2829 if (reason == TASK_SWITCH_GATE) {
2830 if (idt_index != -1) {
2831 /* Software interrupts */
2832 struct desc_struct task_gate_desc;
2835 ret = read_interrupt_descriptor(ctxt, idt_index,
2837 if (ret != X86EMUL_CONTINUE)
2840 dpl = task_gate_desc.dpl;
2841 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2842 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2844 } else if (reason != TASK_SWITCH_IRET) {
2845 int dpl = next_tss_desc.dpl;
2846 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2847 return emulate_gp(ctxt, tss_selector);
2851 desc_limit = desc_limit_scaled(&next_tss_desc);
2852 if (!next_tss_desc.p ||
2853 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2854 desc_limit < 0x2b)) {
2855 return emulate_ts(ctxt, tss_selector & 0xfffc);
2858 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2859 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2860 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2863 if (reason == TASK_SWITCH_IRET)
2864 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2866 /* set back link to prev task only if NT bit is set in eflags
2867 note that old_tss_sel is not used after this point */
2868 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2869 old_tss_sel = 0xffff;
2871 if (next_tss_desc.type & 8)
2872 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2873 old_tss_base, &next_tss_desc);
2875 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2876 old_tss_base, &next_tss_desc);
2877 if (ret != X86EMUL_CONTINUE)
2880 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2881 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2883 if (reason != TASK_SWITCH_IRET) {
2884 next_tss_desc.type |= (1 << 1); /* set busy flag */
2885 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2888 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2889 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2891 if (has_error_code) {
2892 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2893 ctxt->lock_prefix = 0;
2894 ctxt->src.val = (unsigned long) error_code;
2895 ret = em_push(ctxt);
2901 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2902 u16 tss_selector, int idt_index, int reason,
2903 bool has_error_code, u32 error_code)
2907 invalidate_registers(ctxt);
2908 ctxt->_eip = ctxt->eip;
2909 ctxt->dst.type = OP_NONE;
2911 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2912 has_error_code, error_code);
2914 if (rc == X86EMUL_CONTINUE) {
2915 ctxt->eip = ctxt->_eip;
2916 writeback_registers(ctxt);
2919 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2922 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2925 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2927 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2928 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2931 static int em_das(struct x86_emulate_ctxt *ctxt)
2934 bool af, cf, old_cf;
2936 cf = ctxt->eflags & X86_EFLAGS_CF;
2942 af = ctxt->eflags & X86_EFLAGS_AF;
2943 if ((al & 0x0f) > 9 || af) {
2945 cf = old_cf | (al >= 250);
2950 if (old_al > 0x99 || old_cf) {
2956 /* Set PF, ZF, SF */
2957 ctxt->src.type = OP_IMM;
2959 ctxt->src.bytes = 1;
2960 fastop(ctxt, em_or);
2961 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2963 ctxt->eflags |= X86_EFLAGS_CF;
2965 ctxt->eflags |= X86_EFLAGS_AF;
2966 return X86EMUL_CONTINUE;
2969 static int em_aam(struct x86_emulate_ctxt *ctxt)
2973 if (ctxt->src.val == 0)
2974 return emulate_de(ctxt);
2976 al = ctxt->dst.val & 0xff;
2977 ah = al / ctxt->src.val;
2978 al %= ctxt->src.val;
2980 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2982 /* Set PF, ZF, SF */
2983 ctxt->src.type = OP_IMM;
2985 ctxt->src.bytes = 1;
2986 fastop(ctxt, em_or);
2988 return X86EMUL_CONTINUE;
2991 static int em_aad(struct x86_emulate_ctxt *ctxt)
2993 u8 al = ctxt->dst.val & 0xff;
2994 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2996 al = (al + (ah * ctxt->src.val)) & 0xff;
2998 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3000 /* Set PF, ZF, SF */
3001 ctxt->src.type = OP_IMM;
3003 ctxt->src.bytes = 1;
3004 fastop(ctxt, em_or);
3006 return X86EMUL_CONTINUE;
3009 static int em_call(struct x86_emulate_ctxt *ctxt)
3012 long rel = ctxt->src.val;
3014 ctxt->src.val = (unsigned long)ctxt->_eip;
3015 rc = jmp_rel(ctxt, rel);
3016 if (rc != X86EMUL_CONTINUE)
3018 return em_push(ctxt);
3021 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3026 struct desc_struct old_desc, new_desc;
3027 const struct x86_emulate_ops *ops = ctxt->ops;
3028 int cpl = ctxt->ops->cpl(ctxt);
3030 old_eip = ctxt->_eip;
3031 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3033 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3034 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3036 if (rc != X86EMUL_CONTINUE)
3037 return X86EMUL_CONTINUE;
3039 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3040 if (rc != X86EMUL_CONTINUE)
3043 ctxt->src.val = old_cs;
3045 if (rc != X86EMUL_CONTINUE)
3048 ctxt->src.val = old_eip;
3050 /* If we failed, we tainted the memory, but the very least we should
3052 if (rc != X86EMUL_CONTINUE)
3056 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3061 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3066 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3067 if (rc != X86EMUL_CONTINUE)
3069 rc = assign_eip_near(ctxt, eip);
3070 if (rc != X86EMUL_CONTINUE)
3072 rsp_increment(ctxt, ctxt->src.val);
3073 return X86EMUL_CONTINUE;
3076 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3078 /* Write back the register source. */
3079 ctxt->src.val = ctxt->dst.val;
3080 write_register_operand(&ctxt->src);
3082 /* Write back the memory destination with implicit LOCK prefix. */
3083 ctxt->dst.val = ctxt->src.orig_val;
3084 ctxt->lock_prefix = 1;
3085 return X86EMUL_CONTINUE;
3088 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3090 ctxt->dst.val = ctxt->src2.val;
3091 return fastop(ctxt, em_imul);
3094 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3096 ctxt->dst.type = OP_REG;
3097 ctxt->dst.bytes = ctxt->src.bytes;
3098 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3099 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3101 return X86EMUL_CONTINUE;
3104 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3108 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3109 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3110 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3111 return X86EMUL_CONTINUE;
3114 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3118 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3119 return emulate_gp(ctxt, 0);
3120 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3121 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3122 return X86EMUL_CONTINUE;
3125 static int em_mov(struct x86_emulate_ctxt *ctxt)
3127 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3128 return X86EMUL_CONTINUE;
3131 #define FFL(x) bit(X86_FEATURE_##x)
3133 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3135 u32 ebx, ecx, edx, eax = 1;
3139 * Check MOVBE is set in the guest-visible CPUID leaf.
3141 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3142 if (!(ecx & FFL(MOVBE)))
3143 return emulate_ud(ctxt);
3145 switch (ctxt->op_bytes) {
3148 * From MOVBE definition: "...When the operand size is 16 bits,
3149 * the upper word of the destination register remains unchanged
3152 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3153 * rules so we have to do the operation almost per hand.
3155 tmp = (u16)ctxt->src.val;
3156 ctxt->dst.val &= ~0xffffUL;
3157 ctxt->dst.val |= (unsigned long)swab16(tmp);
3160 ctxt->dst.val = swab32((u32)ctxt->src.val);
3163 ctxt->dst.val = swab64(ctxt->src.val);
3168 return X86EMUL_CONTINUE;
3171 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3173 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3174 return emulate_gp(ctxt, 0);
3176 /* Disable writeback. */
3177 ctxt->dst.type = OP_NONE;
3178 return X86EMUL_CONTINUE;
3181 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3185 if (ctxt->mode == X86EMUL_MODE_PROT64)
3186 val = ctxt->src.val & ~0ULL;
3188 val = ctxt->src.val & ~0U;
3190 /* #UD condition is already handled. */
3191 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3192 return emulate_gp(ctxt, 0);
3194 /* Disable writeback. */
3195 ctxt->dst.type = OP_NONE;
3196 return X86EMUL_CONTINUE;
3199 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3203 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3204 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3205 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3206 return emulate_gp(ctxt, 0);
3208 return X86EMUL_CONTINUE;
3211 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3215 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3216 return emulate_gp(ctxt, 0);
3218 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3219 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3220 return X86EMUL_CONTINUE;
3223 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3225 if (ctxt->modrm_reg > VCPU_SREG_GS)
3226 return emulate_ud(ctxt);
3228 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3229 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3230 ctxt->dst.bytes = 2;
3231 return X86EMUL_CONTINUE;
3234 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3236 u16 sel = ctxt->src.val;
3238 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3239 return emulate_ud(ctxt);
3241 if (ctxt->modrm_reg == VCPU_SREG_SS)
3242 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3244 /* Disable writeback. */
3245 ctxt->dst.type = OP_NONE;
3246 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3249 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3251 u16 sel = ctxt->src.val;
3253 /* Disable writeback. */
3254 ctxt->dst.type = OP_NONE;
3255 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3258 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3260 u16 sel = ctxt->src.val;
3262 /* Disable writeback. */
3263 ctxt->dst.type = OP_NONE;
3264 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3267 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3272 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3273 if (rc == X86EMUL_CONTINUE)
3274 ctxt->ops->invlpg(ctxt, linear);
3275 /* Disable writeback. */
3276 ctxt->dst.type = OP_NONE;
3277 return X86EMUL_CONTINUE;
3280 static int em_clts(struct x86_emulate_ctxt *ctxt)
3284 cr0 = ctxt->ops->get_cr(ctxt, 0);
3286 ctxt->ops->set_cr(ctxt, 0, cr0);
3287 return X86EMUL_CONTINUE;
3290 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3292 int rc = ctxt->ops->fix_hypercall(ctxt);
3294 if (rc != X86EMUL_CONTINUE)
3297 /* Let the processor re-execute the fixed hypercall */
3298 ctxt->_eip = ctxt->eip;
3299 /* Disable writeback. */
3300 ctxt->dst.type = OP_NONE;
3301 return X86EMUL_CONTINUE;
3304 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3305 void (*get)(struct x86_emulate_ctxt *ctxt,
3306 struct desc_ptr *ptr))
3308 struct desc_ptr desc_ptr;
3310 if (ctxt->mode == X86EMUL_MODE_PROT64)
3312 get(ctxt, &desc_ptr);
3313 if (ctxt->op_bytes == 2) {
3315 desc_ptr.address &= 0x00ffffff;
3317 /* Disable writeback. */
3318 ctxt->dst.type = OP_NONE;
3319 return segmented_write(ctxt, ctxt->dst.addr.mem,
3320 &desc_ptr, 2 + ctxt->op_bytes);
3323 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3325 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3328 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3330 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3333 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3335 struct desc_ptr desc_ptr;
3338 if (ctxt->mode == X86EMUL_MODE_PROT64)
3340 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3341 &desc_ptr.size, &desc_ptr.address,
3343 if (rc != X86EMUL_CONTINUE)
3345 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3346 is_noncanonical_address(desc_ptr.address))
3347 return emulate_gp(ctxt, 0);
3349 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3351 ctxt->ops->set_idt(ctxt, &desc_ptr);
3352 /* Disable writeback. */
3353 ctxt->dst.type = OP_NONE;
3354 return X86EMUL_CONTINUE;
3357 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3359 return em_lgdt_lidt(ctxt, true);
3362 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3366 rc = ctxt->ops->fix_hypercall(ctxt);
3368 /* Disable writeback. */
3369 ctxt->dst.type = OP_NONE;
3373 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3375 return em_lgdt_lidt(ctxt, false);
3378 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3380 if (ctxt->dst.type == OP_MEM)
3381 ctxt->dst.bytes = 2;
3382 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3383 return X86EMUL_CONTINUE;
3386 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3388 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3389 | (ctxt->src.val & 0x0f));
3390 ctxt->dst.type = OP_NONE;
3391 return X86EMUL_CONTINUE;
3394 static int em_loop(struct x86_emulate_ctxt *ctxt)
3396 int rc = X86EMUL_CONTINUE;
3398 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3399 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3400 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3401 rc = jmp_rel(ctxt, ctxt->src.val);
3406 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3408 int rc = X86EMUL_CONTINUE;
3410 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3411 rc = jmp_rel(ctxt, ctxt->src.val);
3416 static int em_in(struct x86_emulate_ctxt *ctxt)
3418 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3420 return X86EMUL_IO_NEEDED;
3422 return X86EMUL_CONTINUE;
3425 static int em_out(struct x86_emulate_ctxt *ctxt)
3427 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3429 /* Disable writeback. */
3430 ctxt->dst.type = OP_NONE;
3431 return X86EMUL_CONTINUE;
3434 static int em_cli(struct x86_emulate_ctxt *ctxt)
3436 if (emulator_bad_iopl(ctxt))
3437 return emulate_gp(ctxt, 0);
3439 ctxt->eflags &= ~X86_EFLAGS_IF;
3440 return X86EMUL_CONTINUE;
3443 static int em_sti(struct x86_emulate_ctxt *ctxt)
3445 if (emulator_bad_iopl(ctxt))
3446 return emulate_gp(ctxt, 0);
3448 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3449 ctxt->eflags |= X86_EFLAGS_IF;
3450 return X86EMUL_CONTINUE;
3453 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3455 u32 eax, ebx, ecx, edx;
3457 eax = reg_read(ctxt, VCPU_REGS_RAX);
3458 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3459 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3460 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3461 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3462 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3463 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3464 return X86EMUL_CONTINUE;
3467 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3471 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3472 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3474 ctxt->eflags &= ~0xffUL;
3475 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3476 return X86EMUL_CONTINUE;
3479 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3481 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3482 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3483 return X86EMUL_CONTINUE;
3486 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3488 switch (ctxt->op_bytes) {
3489 #ifdef CONFIG_X86_64
3491 asm("bswap %0" : "+r"(ctxt->dst.val));
3495 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3498 return X86EMUL_CONTINUE;
3501 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3503 /* emulating clflush regardless of cpuid */
3504 return X86EMUL_CONTINUE;
3507 static bool valid_cr(int nr)
3519 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3521 if (!valid_cr(ctxt->modrm_reg))
3522 return emulate_ud(ctxt);
3524 return X86EMUL_CONTINUE;
3527 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3529 u64 new_val = ctxt->src.val64;
3530 int cr = ctxt->modrm_reg;
3533 static u64 cr_reserved_bits[] = {
3534 0xffffffff00000000ULL,
3535 0, 0, 0, /* CR3 checked later */
3542 return emulate_ud(ctxt);
3544 if (new_val & cr_reserved_bits[cr])
3545 return emulate_gp(ctxt, 0);
3550 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3551 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3552 return emulate_gp(ctxt, 0);
3554 cr4 = ctxt->ops->get_cr(ctxt, 4);
3555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3557 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3558 !(cr4 & X86_CR4_PAE))
3559 return emulate_gp(ctxt, 0);
3566 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3567 if (efer & EFER_LMA)
3568 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3571 return emulate_gp(ctxt, 0);
3576 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3578 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3579 return emulate_gp(ctxt, 0);
3585 return X86EMUL_CONTINUE;
3588 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3592 ctxt->ops->get_dr(ctxt, 7, &dr7);
3594 /* Check if DR7.Global_Enable is set */
3595 return dr7 & (1 << 13);
3598 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3600 int dr = ctxt->modrm_reg;
3604 return emulate_ud(ctxt);
3606 cr4 = ctxt->ops->get_cr(ctxt, 4);
3607 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3608 return emulate_ud(ctxt);
3610 if (check_dr7_gd(ctxt)) {
3613 ctxt->ops->get_dr(ctxt, 6, &dr6);
3615 dr6 |= DR6_BD | DR6_RTM;
3616 ctxt->ops->set_dr(ctxt, 6, dr6);
3617 return emulate_db(ctxt);
3620 return X86EMUL_CONTINUE;
3623 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3625 u64 new_val = ctxt->src.val64;
3626 int dr = ctxt->modrm_reg;
3628 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3629 return emulate_gp(ctxt, 0);
3631 return check_dr_read(ctxt);
3634 static int check_svme(struct x86_emulate_ctxt *ctxt)
3638 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3640 if (!(efer & EFER_SVME))
3641 return emulate_ud(ctxt);
3643 return X86EMUL_CONTINUE;
3646 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3648 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3650 /* Valid physical address? */
3651 if (rax & 0xffff000000000000ULL)
3652 return emulate_gp(ctxt, 0);
3654 return check_svme(ctxt);
3657 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3659 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3661 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3662 return emulate_ud(ctxt);
3664 return X86EMUL_CONTINUE;
3667 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3669 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3670 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3672 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3673 ctxt->ops->check_pmc(ctxt, rcx))
3674 return emulate_gp(ctxt, 0);
3676 return X86EMUL_CONTINUE;
3679 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3681 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3682 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3683 return emulate_gp(ctxt, 0);
3685 return X86EMUL_CONTINUE;
3688 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3690 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3691 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3692 return emulate_gp(ctxt, 0);
3694 return X86EMUL_CONTINUE;
3697 #define D(_y) { .flags = (_y) }
3698 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3699 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3700 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3701 #define N D(NotImpl)
3702 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3703 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3704 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3705 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3706 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3707 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3708 #define II(_f, _e, _i) \
3709 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3710 #define IIP(_f, _e, _i, _p) \
3711 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3712 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3713 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3715 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3716 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3717 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3718 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3719 #define I2bvIP(_f, _e, _i, _p) \
3720 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3722 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3723 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3724 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3726 static const struct opcode group7_rm0[] = {
3728 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3732 static const struct opcode group7_rm1[] = {
3733 DI(SrcNone | Priv, monitor),
3734 DI(SrcNone | Priv, mwait),
3738 static const struct opcode group7_rm3[] = {
3739 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3740 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3741 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3742 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3743 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3744 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3745 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3746 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3749 static const struct opcode group7_rm7[] = {
3751 DIP(SrcNone, rdtscp, check_rdtsc),
3755 static const struct opcode group1[] = {
3757 F(Lock | PageTable, em_or),
3760 F(Lock | PageTable, em_and),
3766 static const struct opcode group1A[] = {
3767 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3770 static const struct opcode group2[] = {
3771 F(DstMem | ModRM, em_rol),
3772 F(DstMem | ModRM, em_ror),
3773 F(DstMem | ModRM, em_rcl),
3774 F(DstMem | ModRM, em_rcr),
3775 F(DstMem | ModRM, em_shl),
3776 F(DstMem | ModRM, em_shr),
3777 F(DstMem | ModRM, em_shl),
3778 F(DstMem | ModRM, em_sar),
3781 static const struct opcode group3[] = {
3782 F(DstMem | SrcImm | NoWrite, em_test),
3783 F(DstMem | SrcImm | NoWrite, em_test),
3784 F(DstMem | SrcNone | Lock, em_not),
3785 F(DstMem | SrcNone | Lock, em_neg),
3786 F(DstXacc | Src2Mem, em_mul_ex),
3787 F(DstXacc | Src2Mem, em_imul_ex),
3788 F(DstXacc | Src2Mem, em_div_ex),
3789 F(DstXacc | Src2Mem, em_idiv_ex),
3792 static const struct opcode group4[] = {
3793 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3794 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3798 static const struct opcode group5[] = {
3799 F(DstMem | SrcNone | Lock, em_inc),
3800 F(DstMem | SrcNone | Lock, em_dec),
3801 I(SrcMem | NearBranch, em_call_near_abs),
3802 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3803 I(SrcMem | NearBranch, em_jmp_abs),
3804 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3805 I(SrcMem | Stack, em_push), D(Undefined),
3808 static const struct opcode group6[] = {
3811 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3812 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3816 static const struct group_dual group7 = { {
3817 II(Mov | DstMem, em_sgdt, sgdt),
3818 II(Mov | DstMem, em_sidt, sidt),
3819 II(SrcMem | Priv, em_lgdt, lgdt),
3820 II(SrcMem | Priv, em_lidt, lidt),
3821 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3822 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3823 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3827 N, EXT(0, group7_rm3),
3828 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3829 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3833 static const struct opcode group8[] = {
3835 F(DstMem | SrcImmByte | NoWrite, em_bt),
3836 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3837 F(DstMem | SrcImmByte | Lock, em_btr),
3838 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3841 static const struct group_dual group9 = { {
3842 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3844 N, N, N, N, N, N, N, N,
3847 static const struct opcode group11[] = {
3848 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3852 static const struct gprefix pfx_0f_ae_7 = {
3853 I(SrcMem | ByteOp, em_clflush), N, N, N,
3856 static const struct group_dual group15 = { {
3857 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3859 N, N, N, N, N, N, N, N,
3862 static const struct gprefix pfx_0f_6f_0f_7f = {
3863 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3866 static const struct gprefix pfx_0f_2b = {
3867 I(0, em_mov), I(0, em_mov), N, N,
3870 static const struct gprefix pfx_0f_28_0f_29 = {
3871 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3874 static const struct gprefix pfx_0f_e7 = {
3875 N, I(Sse, em_mov), N, N,
3878 static const struct escape escape_d9 = { {
3879 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3882 N, N, N, N, N, N, N, N,
3884 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, N, N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, N, N, N, N, N,
3896 N, N, N, N, N, N, N, N,
3899 static const struct escape escape_db = { {
3900 N, N, N, N, N, N, N, N,
3903 N, N, N, N, N, N, N, N,
3905 N, N, N, N, N, N, N, N,
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, N, N, N, N, N,
3911 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3917 N, N, N, N, N, N, N, N,
3920 static const struct escape escape_dd = { {
3921 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3924 N, N, N, N, N, N, N, N,
3926 N, N, N, N, N, N, N, N,
3928 N, N, N, N, N, N, N, N,
3930 N, N, N, N, N, N, N, N,
3932 N, N, N, N, N, N, N, N,
3934 N, N, N, N, N, N, N, N,
3936 N, N, N, N, N, N, N, N,
3938 N, N, N, N, N, N, N, N,
3941 static const struct opcode opcode_table[256] = {
3943 F6ALU(Lock, em_add),
3944 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3945 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3947 F6ALU(Lock | PageTable, em_or),
3948 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3951 F6ALU(Lock, em_adc),
3952 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3953 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3955 F6ALU(Lock, em_sbb),
3956 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3957 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3959 F6ALU(Lock | PageTable, em_and), N, N,
3961 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3963 F6ALU(Lock, em_xor), N, N,
3965 F6ALU(NoWrite, em_cmp), N, N,
3967 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3969 X8(I(SrcReg | Stack, em_push)),
3971 X8(I(DstReg | Stack, em_pop)),
3973 I(ImplicitOps | Stack | No64, em_pusha),
3974 I(ImplicitOps | Stack | No64, em_popa),
3975 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3978 I(SrcImm | Mov | Stack, em_push),
3979 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3980 I(SrcImmByte | Mov | Stack, em_push),
3981 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3982 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3983 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3985 X16(D(SrcImmByte | NearBranch)),
3987 G(ByteOp | DstMem | SrcImm, group1),
3988 G(DstMem | SrcImm, group1),
3989 G(ByteOp | DstMem | SrcImm | No64, group1),
3990 G(DstMem | SrcImmByte, group1),
3991 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3992 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3994 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3995 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3996 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3997 D(ModRM | SrcMem | NoAccess | DstReg),
3998 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4001 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4003 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4004 I(SrcImmFAddr | No64, em_call_far), N,
4005 II(ImplicitOps | Stack, em_pushf, pushf),
4006 II(ImplicitOps | Stack, em_popf, popf),
4007 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4009 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4010 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4011 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4012 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4014 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4015 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4016 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4017 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4019 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4021 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4023 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4024 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4025 I(ImplicitOps | NearBranch, em_ret),
4026 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4027 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4028 G(ByteOp, group11), G(0, group11),
4030 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4031 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4032 I(ImplicitOps | Stack, em_ret_far),
4033 D(ImplicitOps), DI(SrcImmByte, intn),
4034 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4036 G(Src2One | ByteOp, group2), G(Src2One, group2),
4037 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4038 I(DstAcc | SrcImmUByte | No64, em_aam),
4039 I(DstAcc | SrcImmUByte | No64, em_aad),
4040 F(DstAcc | ByteOp | No64, em_salc),
4041 I(DstAcc | SrcXLat | ByteOp, em_mov),
4043 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4045 X3(I(SrcImmByte | NearBranch, em_loop)),
4046 I(SrcImmByte | NearBranch, em_jcxz),
4047 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4048 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4050 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4051 I(SrcImmFAddr | No64, em_jmp_far),
4052 D(SrcImmByte | ImplicitOps | NearBranch),
4053 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4054 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4056 N, DI(ImplicitOps, icebp), N, N,
4057 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4058 G(ByteOp, group3), G(0, group3),
4060 D(ImplicitOps), D(ImplicitOps),
4061 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4062 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4065 static const struct opcode twobyte_table[256] = {
4067 G(0, group6), GD(0, &group7), N, N,
4068 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4069 II(ImplicitOps | Priv, em_clts, clts), N,
4070 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4071 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4073 N, N, N, N, N, N, N, N,
4074 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4075 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4077 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4078 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4079 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4081 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4084 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4085 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4086 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4089 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4090 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4091 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4092 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4093 I(ImplicitOps | EmulateOnUD, em_sysenter),
4094 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4096 N, N, N, N, N, N, N, N,
4098 X16(D(DstReg | SrcMem | ModRM)),
4100 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4105 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4110 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4112 X16(D(SrcImm | NearBranch)),
4114 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4116 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4117 II(ImplicitOps, em_cpuid, cpuid),
4118 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4119 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4120 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4122 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4123 DI(ImplicitOps, rsm),
4124 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4125 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4126 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4127 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4129 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4130 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4131 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4132 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4133 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4134 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4138 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4139 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4140 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4142 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4143 N, D(DstMem | SrcReg | ModRM | Mov),
4144 N, N, N, GD(0, &group9),
4146 X8(I(DstReg, em_bswap)),
4148 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4150 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4151 N, N, N, N, N, N, N, N,
4153 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4156 static const struct gprefix three_byte_0f_38_f0 = {
4157 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4160 static const struct gprefix three_byte_0f_38_f1 = {
4161 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4165 * Insns below are selected by the prefix which indexed by the third opcode
4168 static const struct opcode opcode_map_0f_38[256] = {
4170 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4172 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4174 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4175 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4194 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4198 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4204 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4205 unsigned size, bool sign_extension)
4207 int rc = X86EMUL_CONTINUE;
4211 op->addr.mem.ea = ctxt->_eip;
4212 /* NB. Immediates are sign-extended as necessary. */
4213 switch (op->bytes) {
4215 op->val = insn_fetch(s8, ctxt);
4218 op->val = insn_fetch(s16, ctxt);
4221 op->val = insn_fetch(s32, ctxt);
4224 op->val = insn_fetch(s64, ctxt);
4227 if (!sign_extension) {
4228 switch (op->bytes) {
4236 op->val &= 0xffffffff;
4244 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4247 int rc = X86EMUL_CONTINUE;
4251 decode_register_operand(ctxt, op);
4254 rc = decode_imm(ctxt, op, 1, false);
4257 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4261 if (ctxt->d & BitOp)
4262 fetch_bit_operand(ctxt);
4263 op->orig_val = op->val;
4266 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4270 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4271 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4272 fetch_register_operand(op);
4273 op->orig_val = op->val;
4277 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4278 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4279 fetch_register_operand(op);
4280 op->orig_val = op->val;
4283 if (ctxt->d & ByteOp) {
4288 op->bytes = ctxt->op_bytes;
4289 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4290 fetch_register_operand(op);
4291 op->orig_val = op->val;
4295 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4297 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4298 op->addr.mem.seg = VCPU_SREG_ES;
4305 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4306 fetch_register_operand(op);
4310 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4313 rc = decode_imm(ctxt, op, 1, true);
4320 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4323 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4326 ctxt->memop.bytes = 1;
4327 if (ctxt->memop.type == OP_REG) {
4328 ctxt->memop.addr.reg = decode_register(ctxt,
4329 ctxt->modrm_rm, true);
4330 fetch_register_operand(&ctxt->memop);
4334 ctxt->memop.bytes = 2;
4337 ctxt->memop.bytes = 4;
4340 rc = decode_imm(ctxt, op, 2, false);
4343 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4347 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4349 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4350 op->addr.mem.seg = ctxt->seg_override;
4356 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4358 register_address(ctxt,
4359 reg_read(ctxt, VCPU_REGS_RBX) +
4360 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4361 op->addr.mem.seg = ctxt->seg_override;
4366 op->addr.mem.ea = ctxt->_eip;
4367 op->bytes = ctxt->op_bytes + 2;
4368 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4371 ctxt->memop.bytes = ctxt->op_bytes + 2;
4374 op->val = VCPU_SREG_ES;
4377 op->val = VCPU_SREG_CS;
4380 op->val = VCPU_SREG_SS;
4383 op->val = VCPU_SREG_DS;
4386 op->val = VCPU_SREG_FS;
4389 op->val = VCPU_SREG_GS;
4392 /* Special instructions do their own operand decoding. */
4394 op->type = OP_NONE; /* Disable writeback. */
4402 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4404 int rc = X86EMUL_CONTINUE;
4405 int mode = ctxt->mode;
4406 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4407 bool op_prefix = false;
4408 bool has_seg_override = false;
4409 struct opcode opcode;
4411 ctxt->memop.type = OP_NONE;
4412 ctxt->memopp = NULL;
4413 ctxt->_eip = ctxt->eip;
4414 ctxt->fetch.ptr = ctxt->fetch.data;
4415 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4416 ctxt->opcode_len = 1;
4418 memcpy(ctxt->fetch.data, insn, insn_len);
4420 rc = __do_insn_fetch_bytes(ctxt, 1);
4421 if (rc != X86EMUL_CONTINUE)
4426 case X86EMUL_MODE_REAL:
4427 case X86EMUL_MODE_VM86:
4428 case X86EMUL_MODE_PROT16:
4429 def_op_bytes = def_ad_bytes = 2;
4431 case X86EMUL_MODE_PROT32:
4432 def_op_bytes = def_ad_bytes = 4;
4434 #ifdef CONFIG_X86_64
4435 case X86EMUL_MODE_PROT64:
4441 return EMULATION_FAILED;
4444 ctxt->op_bytes = def_op_bytes;
4445 ctxt->ad_bytes = def_ad_bytes;
4447 /* Legacy prefixes. */
4449 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4450 case 0x66: /* operand-size override */
4452 /* switch between 2/4 bytes */
4453 ctxt->op_bytes = def_op_bytes ^ 6;
4455 case 0x67: /* address-size override */
4456 if (mode == X86EMUL_MODE_PROT64)
4457 /* switch between 4/8 bytes */
4458 ctxt->ad_bytes = def_ad_bytes ^ 12;
4460 /* switch between 2/4 bytes */
4461 ctxt->ad_bytes = def_ad_bytes ^ 6;
4463 case 0x26: /* ES override */
4464 case 0x2e: /* CS override */
4465 case 0x36: /* SS override */
4466 case 0x3e: /* DS override */
4467 has_seg_override = true;
4468 ctxt->seg_override = (ctxt->b >> 3) & 3;
4470 case 0x64: /* FS override */
4471 case 0x65: /* GS override */
4472 has_seg_override = true;
4473 ctxt->seg_override = ctxt->b & 7;
4475 case 0x40 ... 0x4f: /* REX */
4476 if (mode != X86EMUL_MODE_PROT64)
4478 ctxt->rex_prefix = ctxt->b;
4480 case 0xf0: /* LOCK */
4481 ctxt->lock_prefix = 1;
4483 case 0xf2: /* REPNE/REPNZ */
4484 case 0xf3: /* REP/REPE/REPZ */
4485 ctxt->rep_prefix = ctxt->b;
4491 /* Any legacy prefix after a REX prefix nullifies its effect. */
4493 ctxt->rex_prefix = 0;
4499 if (ctxt->rex_prefix & 8)
4500 ctxt->op_bytes = 8; /* REX.W */
4502 /* Opcode byte(s). */
4503 opcode = opcode_table[ctxt->b];
4504 /* Two-byte opcode? */
4505 if (ctxt->b == 0x0f) {
4506 ctxt->opcode_len = 2;
4507 ctxt->b = insn_fetch(u8, ctxt);
4508 opcode = twobyte_table[ctxt->b];
4510 /* 0F_38 opcode map */
4511 if (ctxt->b == 0x38) {
4512 ctxt->opcode_len = 3;
4513 ctxt->b = insn_fetch(u8, ctxt);
4514 opcode = opcode_map_0f_38[ctxt->b];
4517 ctxt->d = opcode.flags;
4519 if (ctxt->d & ModRM)
4520 ctxt->modrm = insn_fetch(u8, ctxt);
4522 /* vex-prefix instructions are not implemented */
4523 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4524 (mode == X86EMUL_MODE_PROT64 ||
4525 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4529 while (ctxt->d & GroupMask) {
4530 switch (ctxt->d & GroupMask) {
4532 goffset = (ctxt->modrm >> 3) & 7;
4533 opcode = opcode.u.group[goffset];
4536 goffset = (ctxt->modrm >> 3) & 7;
4537 if ((ctxt->modrm >> 6) == 3)
4538 opcode = opcode.u.gdual->mod3[goffset];
4540 opcode = opcode.u.gdual->mod012[goffset];
4543 goffset = ctxt->modrm & 7;
4544 opcode = opcode.u.group[goffset];
4547 if (ctxt->rep_prefix && op_prefix)
4548 return EMULATION_FAILED;
4549 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4550 switch (simd_prefix) {
4551 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4552 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4553 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4554 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4558 if (ctxt->modrm > 0xbf)
4559 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4561 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4564 return EMULATION_FAILED;
4567 ctxt->d &= ~(u64)GroupMask;
4568 ctxt->d |= opcode.flags;
4573 return EMULATION_FAILED;
4575 ctxt->execute = opcode.u.execute;
4577 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4578 return EMULATION_FAILED;
4580 if (unlikely(ctxt->d &
4581 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4583 * These are copied unconditionally here, and checked unconditionally
4584 * in x86_emulate_insn.
4586 ctxt->check_perm = opcode.check_perm;
4587 ctxt->intercept = opcode.intercept;
4589 if (ctxt->d & NotImpl)
4590 return EMULATION_FAILED;
4592 if (mode == X86EMUL_MODE_PROT64) {
4593 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4595 else if (ctxt->d & NearBranch)
4599 if (ctxt->d & Op3264) {
4600 if (mode == X86EMUL_MODE_PROT64)
4607 ctxt->op_bytes = 16;
4608 else if (ctxt->d & Mmx)
4612 /* ModRM and SIB bytes. */
4613 if (ctxt->d & ModRM) {
4614 rc = decode_modrm(ctxt, &ctxt->memop);
4615 if (!has_seg_override) {
4616 has_seg_override = true;
4617 ctxt->seg_override = ctxt->modrm_seg;
4619 } else if (ctxt->d & MemAbs)
4620 rc = decode_abs(ctxt, &ctxt->memop);
4621 if (rc != X86EMUL_CONTINUE)
4624 if (!has_seg_override)
4625 ctxt->seg_override = VCPU_SREG_DS;
4627 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4630 * Decode and fetch the source operand: register, memory
4633 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4634 if (rc != X86EMUL_CONTINUE)
4638 * Decode and fetch the second source operand: register, memory
4641 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4642 if (rc != X86EMUL_CONTINUE)
4645 /* Decode and fetch the destination operand: register or memory. */
4646 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4648 if (ctxt->rip_relative)
4649 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4652 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4655 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4657 return ctxt->d & PageTable;
4660 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4662 /* The second termination condition only applies for REPE
4663 * and REPNE. Test if the repeat string operation prefix is
4664 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4665 * corresponding termination condition according to:
4666 * - if REPE/REPZ and ZF = 0 then done
4667 * - if REPNE/REPNZ and ZF = 1 then done
4669 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4670 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4671 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4672 ((ctxt->eflags & EFLG_ZF) == 0))
4673 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4674 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4680 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4684 ctxt->ops->get_fpu(ctxt);
4685 asm volatile("1: fwait \n\t"
4687 ".pushsection .fixup,\"ax\" \n\t"
4689 "movb $1, %[fault] \n\t"
4692 _ASM_EXTABLE(1b, 3b)
4693 : [fault]"+qm"(fault));
4694 ctxt->ops->put_fpu(ctxt);
4696 if (unlikely(fault))
4697 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4699 return X86EMUL_CONTINUE;
4702 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4705 if (op->type == OP_MM)
4706 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4709 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4711 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4712 if (!(ctxt->d & ByteOp))
4713 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4714 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4715 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4717 : "c"(ctxt->src2.val));
4718 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4719 if (!fop) /* exception is returned in fop variable */
4720 return emulate_de(ctxt);
4721 return X86EMUL_CONTINUE;
4724 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4726 memset(&ctxt->rip_relative, 0,
4727 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4729 ctxt->io_read.pos = 0;
4730 ctxt->io_read.end = 0;
4731 ctxt->mem_read.end = 0;
4734 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4736 const struct x86_emulate_ops *ops = ctxt->ops;
4737 int rc = X86EMUL_CONTINUE;
4738 int saved_dst_type = ctxt->dst.type;
4740 ctxt->mem_read.pos = 0;
4742 /* LOCK prefix is allowed only with some instructions */
4743 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4744 rc = emulate_ud(ctxt);
4748 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4749 rc = emulate_ud(ctxt);
4753 if (unlikely(ctxt->d &
4754 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4755 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4756 (ctxt->d & Undefined)) {
4757 rc = emulate_ud(ctxt);
4761 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4762 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4763 rc = emulate_ud(ctxt);
4767 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4768 rc = emulate_nm(ctxt);
4772 if (ctxt->d & Mmx) {
4773 rc = flush_pending_x87_faults(ctxt);
4774 if (rc != X86EMUL_CONTINUE)
4777 * Now that we know the fpu is exception safe, we can fetch
4780 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4781 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4782 if (!(ctxt->d & Mov))
4783 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4786 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4787 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4788 X86_ICPT_PRE_EXCEPT);
4789 if (rc != X86EMUL_CONTINUE)
4793 /* Privileged instruction can be executed only in CPL=0 */
4794 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4795 if (ctxt->d & PrivUD)
4796 rc = emulate_ud(ctxt);
4798 rc = emulate_gp(ctxt, 0);
4802 /* Instruction can only be executed in protected mode */
4803 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4804 rc = emulate_ud(ctxt);
4808 /* Do instruction specific permission checks */
4809 if (ctxt->d & CheckPerm) {
4810 rc = ctxt->check_perm(ctxt);
4811 if (rc != X86EMUL_CONTINUE)
4815 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4816 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4817 X86_ICPT_POST_EXCEPT);
4818 if (rc != X86EMUL_CONTINUE)
4822 if (ctxt->rep_prefix && (ctxt->d & String)) {
4823 /* All REP prefixes have the same first termination condition */
4824 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4825 ctxt->eip = ctxt->_eip;
4826 ctxt->eflags &= ~EFLG_RF;
4832 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4833 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4834 ctxt->src.valptr, ctxt->src.bytes);
4835 if (rc != X86EMUL_CONTINUE)
4837 ctxt->src.orig_val64 = ctxt->src.val64;
4840 if (ctxt->src2.type == OP_MEM) {
4841 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4842 &ctxt->src2.val, ctxt->src2.bytes);
4843 if (rc != X86EMUL_CONTINUE)
4847 if ((ctxt->d & DstMask) == ImplicitOps)
4851 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4852 /* optimisation - avoid slow emulated read if Mov */
4853 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4854 &ctxt->dst.val, ctxt->dst.bytes);
4855 if (rc != X86EMUL_CONTINUE)
4858 ctxt->dst.orig_val = ctxt->dst.val;
4862 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4863 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4864 X86_ICPT_POST_MEMACCESS);
4865 if (rc != X86EMUL_CONTINUE)
4869 if (ctxt->rep_prefix && (ctxt->d & String))
4870 ctxt->eflags |= EFLG_RF;
4872 ctxt->eflags &= ~EFLG_RF;
4874 if (ctxt->execute) {
4875 if (ctxt->d & Fastop) {
4876 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4877 rc = fastop(ctxt, fop);
4878 if (rc != X86EMUL_CONTINUE)
4882 rc = ctxt->execute(ctxt);
4883 if (rc != X86EMUL_CONTINUE)
4888 if (ctxt->opcode_len == 2)
4890 else if (ctxt->opcode_len == 3)
4891 goto threebyte_insn;
4894 case 0x63: /* movsxd */
4895 if (ctxt->mode != X86EMUL_MODE_PROT64)
4896 goto cannot_emulate;
4897 ctxt->dst.val = (s32) ctxt->src.val;
4899 case 0x70 ... 0x7f: /* jcc (short) */
4900 if (test_cc(ctxt->b, ctxt->eflags))
4901 rc = jmp_rel(ctxt, ctxt->src.val);
4903 case 0x8d: /* lea r16/r32, m */
4904 ctxt->dst.val = ctxt->src.addr.mem.ea;
4906 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4907 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4908 ctxt->dst.type = OP_NONE;
4912 case 0x98: /* cbw/cwde/cdqe */
4913 switch (ctxt->op_bytes) {
4914 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4915 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4916 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4919 case 0xcc: /* int3 */
4920 rc = emulate_int(ctxt, 3);
4922 case 0xcd: /* int n */
4923 rc = emulate_int(ctxt, ctxt->src.val);
4925 case 0xce: /* into */
4926 if (ctxt->eflags & EFLG_OF)
4927 rc = emulate_int(ctxt, 4);
4929 case 0xe9: /* jmp rel */
4930 case 0xeb: /* jmp rel short */
4931 rc = jmp_rel(ctxt, ctxt->src.val);
4932 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4934 case 0xf4: /* hlt */
4935 ctxt->ops->halt(ctxt);
4937 case 0xf5: /* cmc */
4938 /* complement carry flag from eflags reg */
4939 ctxt->eflags ^= EFLG_CF;
4941 case 0xf8: /* clc */
4942 ctxt->eflags &= ~EFLG_CF;
4944 case 0xf9: /* stc */
4945 ctxt->eflags |= EFLG_CF;
4947 case 0xfc: /* cld */
4948 ctxt->eflags &= ~EFLG_DF;
4950 case 0xfd: /* std */
4951 ctxt->eflags |= EFLG_DF;
4954 goto cannot_emulate;
4957 if (rc != X86EMUL_CONTINUE)
4961 if (ctxt->d & SrcWrite) {
4962 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4963 rc = writeback(ctxt, &ctxt->src);
4964 if (rc != X86EMUL_CONTINUE)
4967 if (!(ctxt->d & NoWrite)) {
4968 rc = writeback(ctxt, &ctxt->dst);
4969 if (rc != X86EMUL_CONTINUE)
4974 * restore dst type in case the decoding will be reused
4975 * (happens for string instruction )
4977 ctxt->dst.type = saved_dst_type;
4979 if ((ctxt->d & SrcMask) == SrcSI)
4980 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4982 if ((ctxt->d & DstMask) == DstDI)
4983 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4985 if (ctxt->rep_prefix && (ctxt->d & String)) {
4987 struct read_cache *r = &ctxt->io_read;
4988 if ((ctxt->d & SrcMask) == SrcSI)
4989 count = ctxt->src.count;
4991 count = ctxt->dst.count;
4992 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4995 if (!string_insn_completed(ctxt)) {
4997 * Re-enter guest when pio read ahead buffer is empty
4998 * or, if it is not used, after each 1024 iteration.
5000 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5001 (r->end == 0 || r->end != r->pos)) {
5003 * Reset read cache. Usually happens before
5004 * decode, but since instruction is restarted
5005 * we have to do it here.
5007 ctxt->mem_read.end = 0;
5008 writeback_registers(ctxt);
5009 return EMULATION_RESTART;
5011 goto done; /* skip rip writeback */
5013 ctxt->eflags &= ~EFLG_RF;
5016 ctxt->eip = ctxt->_eip;
5019 if (rc == X86EMUL_PROPAGATE_FAULT) {
5020 WARN_ON(ctxt->exception.vector > 0x1f);
5021 ctxt->have_exception = true;
5023 if (rc == X86EMUL_INTERCEPTED)
5024 return EMULATION_INTERCEPTED;
5026 if (rc == X86EMUL_CONTINUE)
5027 writeback_registers(ctxt);
5029 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5033 case 0x09: /* wbinvd */
5034 (ctxt->ops->wbinvd)(ctxt);
5036 case 0x08: /* invd */
5037 case 0x0d: /* GrpP (prefetch) */
5038 case 0x18: /* Grp16 (prefetch/nop) */
5039 case 0x1f: /* nop */
5041 case 0x20: /* mov cr, reg */
5042 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5044 case 0x21: /* mov from dr to reg */
5045 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5047 case 0x40 ... 0x4f: /* cmov */
5048 if (test_cc(ctxt->b, ctxt->eflags))
5049 ctxt->dst.val = ctxt->src.val;
5050 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5051 ctxt->op_bytes != 4)
5052 ctxt->dst.type = OP_NONE; /* no writeback */
5054 case 0x80 ... 0x8f: /* jnz rel, etc*/
5055 if (test_cc(ctxt->b, ctxt->eflags))
5056 rc = jmp_rel(ctxt, ctxt->src.val);
5058 case 0x90 ... 0x9f: /* setcc r/m8 */
5059 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5061 case 0xb6 ... 0xb7: /* movzx */
5062 ctxt->dst.bytes = ctxt->op_bytes;
5063 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5064 : (u16) ctxt->src.val;
5066 case 0xbe ... 0xbf: /* movsx */
5067 ctxt->dst.bytes = ctxt->op_bytes;
5068 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5069 (s16) ctxt->src.val;
5071 case 0xc3: /* movnti */
5072 ctxt->dst.bytes = ctxt->op_bytes;
5073 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5074 (u32) ctxt->src.val;
5077 goto cannot_emulate;
5082 if (rc != X86EMUL_CONTINUE)
5088 return EMULATION_FAILED;
5091 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5093 invalidate_registers(ctxt);
5096 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5098 writeback_registers(ctxt);