1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 #define FOP3E(op, dst, src, src2) \
384 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
386 /* 3-operand, word-only, src2=cl */
387 #define FASTOP3WCL(op) \
390 FOP3E(op##w, ax, dx, cl) \
391 FOP3E(op##l, eax, edx, cl) \
392 ON64(FOP3E(op##q, rax, rdx, cl)) \
395 /* Special case for SETcc - 1 instruction per cc */
396 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
398 asm(".global kvm_fastop_exception \n"
399 "kvm_fastop_exception: xor %esi, %esi; ret");
420 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
423 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
424 enum x86_intercept intercept,
425 enum x86_intercept_stage stage)
427 struct x86_instruction_info info = {
428 .intercept = intercept,
429 .rep_prefix = ctxt->rep_prefix,
430 .modrm_mod = ctxt->modrm_mod,
431 .modrm_reg = ctxt->modrm_reg,
432 .modrm_rm = ctxt->modrm_rm,
433 .src_val = ctxt->src.val64,
434 .dst_val = ctxt->dst.val64,
435 .src_bytes = ctxt->src.bytes,
436 .dst_bytes = ctxt->dst.bytes,
437 .ad_bytes = ctxt->ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static void assign_masked(ulong *dest, ulong src, ulong mask)
446 *dest = (*dest & ~mask) | (src & mask);
449 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
451 return (1UL << (ctxt->ad_bytes << 3)) - 1;
454 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
457 struct desc_struct ss;
459 if (ctxt->mode == X86EMUL_MODE_PROT64)
461 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
462 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
465 static int stack_size(struct x86_emulate_ctxt *ctxt)
467 return (__fls(stack_mask(ctxt)) + 1) >> 3;
470 /* Access/update address held in a register, based on addressing mode. */
471 static inline unsigned long
472 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474 if (ctxt->ad_bytes == sizeof(unsigned long))
477 return reg & ad_mask(ctxt);
480 static inline unsigned long
481 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 return address_mask(ctxt, reg);
486 static void masked_increment(ulong *reg, ulong mask, int inc)
488 assign_masked(reg, *reg + inc, mask);
492 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
496 if (ctxt->ad_bytes == sizeof(unsigned long))
499 mask = ad_mask(ctxt);
500 masked_increment(reg, mask, inc);
503 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
505 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
508 static u32 desc_limit_scaled(struct desc_struct *desc)
510 u32 limit = get_desc_limit(desc);
512 return desc->g ? (limit << 12) | 0xfff : limit;
515 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
517 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
520 return ctxt->ops->get_cached_segment_base(ctxt, seg);
523 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
524 u32 error, bool valid)
527 ctxt->exception.vector = vec;
528 ctxt->exception.error_code = error;
529 ctxt->exception.error_code_valid = valid;
530 return X86EMUL_PROPAGATE_FAULT;
533 static int emulate_db(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DB_VECTOR, 0, false);
538 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
540 return emulate_exception(ctxt, GP_VECTOR, err, true);
543 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
545 return emulate_exception(ctxt, SS_VECTOR, err, true);
548 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
550 return emulate_exception(ctxt, UD_VECTOR, 0, false);
553 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, TS_VECTOR, err, true);
558 static int emulate_de(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, DE_VECTOR, 0, false);
563 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
565 return emulate_exception(ctxt, NM_VECTOR, 0, false);
568 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
571 switch (ctxt->op_bytes) {
573 ctxt->_eip = (u16)dst;
576 ctxt->_eip = (u32)dst;
580 if ((cs_l && is_noncanonical_address(dst)) ||
581 (!cs_l && (dst >> 32) != 0))
582 return emulate_gp(ctxt, 0);
587 WARN(1, "unsupported eip assignment size\n");
589 return X86EMUL_CONTINUE;
592 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
594 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
597 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
599 return assign_eip_near(ctxt, ctxt->_eip + rel);
602 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
605 struct desc_struct desc;
607 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
611 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
616 struct desc_struct desc;
618 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
619 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
623 * x86 defines three classes of vector instructions: explicitly
624 * aligned, explicitly unaligned, and the rest, which change behaviour
625 * depending on whether they're AVX encoded or not.
627 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
628 * subject to the same check.
630 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
632 if (likely(size < 16))
635 if (ctxt->d & Aligned)
637 else if (ctxt->d & Unaligned)
639 else if (ctxt->d & Avx)
645 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
646 struct segmented_address addr,
647 unsigned *max_size, unsigned size,
648 bool write, bool fetch,
651 struct desc_struct desc;
658 la = seg_base(ctxt, addr.seg) + addr.ea;
660 switch (ctxt->mode) {
661 case X86EMUL_MODE_PROT64:
662 if (is_noncanonical_address(la))
663 return emulate_gp(ctxt, 0);
665 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
666 if (size > *max_size)
670 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
674 /* code segment in protected mode or read-only data segment */
675 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
676 || !(desc.type & 2)) && write)
678 /* unreadable code segment */
679 if (!fetch && (desc.type & 8) && !(desc.type & 2))
681 lim = desc_limit_scaled(&desc);
682 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
683 (ctxt->d & NoBigReal)) {
684 /* la is between zero and 0xffff */
687 *max_size = 0x10000 - la;
688 } else if ((desc.type & 8) || !(desc.type & 4)) {
689 /* expand-up segment */
692 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
694 /* expand-down segment */
697 lim = desc.d ? 0xffffffff : 0xffff;
700 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
702 if (size > *max_size)
704 cpl = ctxt->ops->cpl(ctxt);
705 if (!(desc.type & 8)) {
709 } else if ((desc.type & 8) && !(desc.type & 4)) {
710 /* nonconforming code segment */
713 } else if ((desc.type & 8) && (desc.type & 4)) {
714 /* conforming code segment */
720 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
722 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
723 return emulate_gp(ctxt, 0);
725 return X86EMUL_CONTINUE;
727 if (addr.seg == VCPU_SREG_SS)
728 return emulate_ss(ctxt, 0);
730 return emulate_gp(ctxt, 0);
733 static int linearize(struct x86_emulate_ctxt *ctxt,
734 struct segmented_address addr,
735 unsigned size, bool write,
739 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
743 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
744 struct segmented_address addr,
751 rc = linearize(ctxt, addr, size, false, &linear);
752 if (rc != X86EMUL_CONTINUE)
754 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
758 * Prefetch the remaining bytes of the instruction without crossing page
759 * boundary if they are not in fetch_cache yet.
761 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
764 unsigned size, max_size;
765 unsigned long linear;
766 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
767 struct segmented_address addr = { .seg = VCPU_SREG_CS,
768 .ea = ctxt->eip + cur_size };
771 * We do not know exactly how many bytes will be needed, and
772 * __linearize is expensive, so fetch as much as possible. We
773 * just have to avoid going beyond the 15 byte limit, the end
774 * of the segment, or the end of the page.
776 * __linearize is called with size 0 so that it does not do any
777 * boundary check itself. Instead, we use max_size to check
780 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
781 if (unlikely(rc != X86EMUL_CONTINUE))
784 size = min_t(unsigned, 15UL ^ cur_size, max_size);
785 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
788 * One instruction can only straddle two pages,
789 * and one has been loaded at the beginning of
790 * x86_decode_insn. So, if not enough bytes
791 * still, we must have hit the 15-byte boundary.
793 if (unlikely(size < op_size))
794 return emulate_gp(ctxt, 0);
796 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
797 size, &ctxt->exception);
798 if (unlikely(rc != X86EMUL_CONTINUE))
800 ctxt->fetch.end += size;
801 return X86EMUL_CONTINUE;
804 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
807 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
809 if (unlikely(done_size < size))
810 return __do_insn_fetch_bytes(ctxt, size - done_size);
812 return X86EMUL_CONTINUE;
815 /* Fetch next part of the instruction being emulated. */
816 #define insn_fetch(_type, _ctxt) \
819 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
820 if (rc != X86EMUL_CONTINUE) \
822 ctxt->_eip += sizeof(_type); \
823 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
824 ctxt->fetch.ptr += sizeof(_type); \
828 #define insn_fetch_arr(_arr, _size, _ctxt) \
830 rc = do_insn_fetch_bytes(_ctxt, _size); \
831 if (rc != X86EMUL_CONTINUE) \
833 ctxt->_eip += (_size); \
834 memcpy(_arr, ctxt->fetch.ptr, _size); \
835 ctxt->fetch.ptr += (_size); \
839 * Given the 'reg' portion of a ModRM byte, and a register block, return a
840 * pointer into the block that addresses the relevant register.
841 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
843 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
847 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
849 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
850 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
852 p = reg_rmw(ctxt, modrm_reg);
856 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
857 struct segmented_address addr,
858 u16 *size, unsigned long *address, int op_bytes)
865 rc = segmented_read_std(ctxt, addr, size, 2);
866 if (rc != X86EMUL_CONTINUE)
869 rc = segmented_read_std(ctxt, addr, address, op_bytes);
883 FASTOP1SRC2(mul, mul_ex);
884 FASTOP1SRC2(imul, imul_ex);
885 FASTOP1SRC2EX(div, div_ex);
886 FASTOP1SRC2EX(idiv, idiv_ex);
915 static u8 test_cc(unsigned int condition, unsigned long flags)
918 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
920 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
921 asm("push %[flags]; popf; call *%[fastop]"
922 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
926 static void fetch_register_operand(struct operand *op)
930 op->val = *(u8 *)op->addr.reg;
933 op->val = *(u16 *)op->addr.reg;
936 op->val = *(u32 *)op->addr.reg;
939 op->val = *(u64 *)op->addr.reg;
944 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
946 ctxt->ops->get_fpu(ctxt);
948 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
949 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
950 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
951 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
952 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
953 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
954 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
955 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
957 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
958 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
959 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
960 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
961 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
962 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
963 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
964 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
968 ctxt->ops->put_fpu(ctxt);
971 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
974 ctxt->ops->get_fpu(ctxt);
976 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
977 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
978 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
979 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
980 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
981 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
982 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
983 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
985 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
986 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
987 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
988 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
989 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
990 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
991 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
992 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
996 ctxt->ops->put_fpu(ctxt);
999 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1001 ctxt->ops->get_fpu(ctxt);
1003 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1004 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1005 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1006 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1007 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1008 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1009 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1010 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1013 ctxt->ops->put_fpu(ctxt);
1016 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1018 ctxt->ops->get_fpu(ctxt);
1020 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1021 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1022 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1023 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1024 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1025 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1026 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1027 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1030 ctxt->ops->put_fpu(ctxt);
1033 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1035 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1036 return emulate_nm(ctxt);
1038 ctxt->ops->get_fpu(ctxt);
1039 asm volatile("fninit");
1040 ctxt->ops->put_fpu(ctxt);
1041 return X86EMUL_CONTINUE;
1044 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1048 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1049 return emulate_nm(ctxt);
1051 ctxt->ops->get_fpu(ctxt);
1052 asm volatile("fnstcw %0": "+m"(fcw));
1053 ctxt->ops->put_fpu(ctxt);
1055 /* force 2 byte destination */
1056 ctxt->dst.bytes = 2;
1057 ctxt->dst.val = fcw;
1059 return X86EMUL_CONTINUE;
1062 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1066 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1067 return emulate_nm(ctxt);
1069 ctxt->ops->get_fpu(ctxt);
1070 asm volatile("fnstsw %0": "+m"(fsw));
1071 ctxt->ops->put_fpu(ctxt);
1073 /* force 2 byte destination */
1074 ctxt->dst.bytes = 2;
1075 ctxt->dst.val = fsw;
1077 return X86EMUL_CONTINUE;
1080 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1083 unsigned reg = ctxt->modrm_reg;
1085 if (!(ctxt->d & ModRM))
1086 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1088 if (ctxt->d & Sse) {
1092 read_sse_reg(ctxt, &op->vec_val, reg);
1095 if (ctxt->d & Mmx) {
1104 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1105 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1107 fetch_register_operand(op);
1108 op->orig_val = op->val;
1111 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1113 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1114 ctxt->modrm_seg = VCPU_SREG_SS;
1117 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1121 int index_reg, base_reg, scale;
1122 int rc = X86EMUL_CONTINUE;
1125 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1126 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1127 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1129 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1130 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1131 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1132 ctxt->modrm_seg = VCPU_SREG_DS;
1134 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1136 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1137 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1139 if (ctxt->d & Sse) {
1142 op->addr.xmm = ctxt->modrm_rm;
1143 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1146 if (ctxt->d & Mmx) {
1149 op->addr.mm = ctxt->modrm_rm & 7;
1152 fetch_register_operand(op);
1158 if (ctxt->ad_bytes == 2) {
1159 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1160 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1161 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1162 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1164 /* 16-bit ModR/M decode. */
1165 switch (ctxt->modrm_mod) {
1167 if (ctxt->modrm_rm == 6)
1168 modrm_ea += insn_fetch(u16, ctxt);
1171 modrm_ea += insn_fetch(s8, ctxt);
1174 modrm_ea += insn_fetch(u16, ctxt);
1177 switch (ctxt->modrm_rm) {
1179 modrm_ea += bx + si;
1182 modrm_ea += bx + di;
1185 modrm_ea += bp + si;
1188 modrm_ea += bp + di;
1197 if (ctxt->modrm_mod != 0)
1204 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1205 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1206 ctxt->modrm_seg = VCPU_SREG_SS;
1207 modrm_ea = (u16)modrm_ea;
1209 /* 32/64-bit ModR/M decode. */
1210 if ((ctxt->modrm_rm & 7) == 4) {
1211 sib = insn_fetch(u8, ctxt);
1212 index_reg |= (sib >> 3) & 7;
1213 base_reg |= sib & 7;
1216 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1217 modrm_ea += insn_fetch(s32, ctxt);
1219 modrm_ea += reg_read(ctxt, base_reg);
1220 adjust_modrm_seg(ctxt, base_reg);
1223 modrm_ea += reg_read(ctxt, index_reg) << scale;
1224 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1225 if (ctxt->mode == X86EMUL_MODE_PROT64)
1226 ctxt->rip_relative = 1;
1228 base_reg = ctxt->modrm_rm;
1229 modrm_ea += reg_read(ctxt, base_reg);
1230 adjust_modrm_seg(ctxt, base_reg);
1232 switch (ctxt->modrm_mod) {
1234 if (ctxt->modrm_rm == 5)
1235 modrm_ea += insn_fetch(s32, ctxt);
1238 modrm_ea += insn_fetch(s8, ctxt);
1241 modrm_ea += insn_fetch(s32, ctxt);
1245 op->addr.mem.ea = modrm_ea;
1246 if (ctxt->ad_bytes != 8)
1247 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1253 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1256 int rc = X86EMUL_CONTINUE;
1259 switch (ctxt->ad_bytes) {
1261 op->addr.mem.ea = insn_fetch(u16, ctxt);
1264 op->addr.mem.ea = insn_fetch(u32, ctxt);
1267 op->addr.mem.ea = insn_fetch(u64, ctxt);
1274 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1278 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1279 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1281 if (ctxt->src.bytes == 2)
1282 sv = (s16)ctxt->src.val & (s16)mask;
1283 else if (ctxt->src.bytes == 4)
1284 sv = (s32)ctxt->src.val & (s32)mask;
1286 sv = (s64)ctxt->src.val & (s64)mask;
1288 ctxt->dst.addr.mem.ea += (sv >> 3);
1291 /* only subword offset */
1292 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1295 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1296 unsigned long addr, void *dest, unsigned size)
1299 struct read_cache *mc = &ctxt->mem_read;
1301 if (mc->pos < mc->end)
1304 WARN_ON((mc->end + size) >= sizeof(mc->data));
1306 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1308 if (rc != X86EMUL_CONTINUE)
1314 memcpy(dest, mc->data + mc->pos, size);
1316 return X86EMUL_CONTINUE;
1319 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1320 struct segmented_address addr,
1327 rc = linearize(ctxt, addr, size, false, &linear);
1328 if (rc != X86EMUL_CONTINUE)
1330 return read_emulated(ctxt, linear, data, size);
1333 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1334 struct segmented_address addr,
1341 rc = linearize(ctxt, addr, size, true, &linear);
1342 if (rc != X86EMUL_CONTINUE)
1344 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1348 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1349 struct segmented_address addr,
1350 const void *orig_data, const void *data,
1356 rc = linearize(ctxt, addr, size, true, &linear);
1357 if (rc != X86EMUL_CONTINUE)
1359 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1360 size, &ctxt->exception);
1363 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1364 unsigned int size, unsigned short port,
1367 struct read_cache *rc = &ctxt->io_read;
1369 if (rc->pos == rc->end) { /* refill pio read ahead */
1370 unsigned int in_page, n;
1371 unsigned int count = ctxt->rep_prefix ?
1372 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1373 in_page = (ctxt->eflags & EFLG_DF) ?
1374 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1375 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1376 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1379 rc->pos = rc->end = 0;
1380 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1385 if (ctxt->rep_prefix && (ctxt->d & String) &&
1386 !(ctxt->eflags & EFLG_DF)) {
1387 ctxt->dst.data = rc->data + rc->pos;
1388 ctxt->dst.type = OP_MEM_STR;
1389 ctxt->dst.count = (rc->end - rc->pos) / size;
1392 memcpy(dest, rc->data + rc->pos, size);
1398 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1399 u16 index, struct desc_struct *desc)
1404 ctxt->ops->get_idt(ctxt, &dt);
1406 if (dt.size < index * 8 + 7)
1407 return emulate_gp(ctxt, index << 3 | 0x2);
1409 addr = dt.address + index * 8;
1410 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1414 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1415 u16 selector, struct desc_ptr *dt)
1417 const struct x86_emulate_ops *ops = ctxt->ops;
1420 if (selector & 1 << 2) {
1421 struct desc_struct desc;
1424 memset (dt, 0, sizeof *dt);
1425 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1429 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1430 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1432 ops->get_gdt(ctxt, dt);
1435 /* allowed just for 8 bytes segments */
1436 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1437 u16 selector, struct desc_struct *desc,
1441 u16 index = selector >> 3;
1444 get_descriptor_table_ptr(ctxt, selector, &dt);
1446 if (dt.size < index * 8 + 7)
1447 return emulate_gp(ctxt, selector & 0xfffc);
1449 *desc_addr_p = addr = dt.address + index * 8;
1450 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1454 /* allowed just for 8 bytes segments */
1455 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1456 u16 selector, struct desc_struct *desc)
1459 u16 index = selector >> 3;
1462 get_descriptor_table_ptr(ctxt, selector, &dt);
1464 if (dt.size < index * 8 + 7)
1465 return emulate_gp(ctxt, selector & 0xfffc);
1467 addr = dt.address + index * 8;
1468 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1472 /* Does not support long mode */
1473 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1474 u16 selector, int seg, u8 cpl,
1475 bool in_task_switch,
1476 struct desc_struct *desc)
1478 struct desc_struct seg_desc, old_desc;
1480 unsigned err_vec = GP_VECTOR;
1482 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1488 memset(&seg_desc, 0, sizeof seg_desc);
1490 if (ctxt->mode == X86EMUL_MODE_REAL) {
1491 /* set real mode segment descriptor (keep limit etc. for
1493 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1494 set_desc_base(&seg_desc, selector << 4);
1496 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1497 /* VM86 needs a clean new segment descriptor */
1498 set_desc_base(&seg_desc, selector << 4);
1499 set_desc_limit(&seg_desc, 0xffff);
1509 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1510 if ((seg == VCPU_SREG_CS
1511 || (seg == VCPU_SREG_SS
1512 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1513 || seg == VCPU_SREG_TR)
1517 /* TR should be in GDT only */
1518 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1521 if (null_selector) /* for NULL selector skip all following checks */
1524 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1525 if (ret != X86EMUL_CONTINUE)
1528 err_code = selector & 0xfffc;
1529 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1531 /* can't load system descriptor into segment selector */
1532 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1536 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1545 * segment is not a writable data segment or segment
1546 * selector's RPL != CPL or segment selector's RPL != CPL
1548 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1552 if (!(seg_desc.type & 8))
1555 if (seg_desc.type & 4) {
1561 if (rpl > cpl || dpl != cpl)
1564 /* in long-mode d/b must be clear if l is set */
1565 if (seg_desc.d && seg_desc.l) {
1568 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1569 if (efer & EFER_LMA)
1573 /* CS(RPL) <- CPL */
1574 selector = (selector & 0xfffc) | cpl;
1577 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1579 old_desc = seg_desc;
1580 seg_desc.type |= 2; /* busy */
1581 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1582 sizeof(seg_desc), &ctxt->exception);
1583 if (ret != X86EMUL_CONTINUE)
1586 case VCPU_SREG_LDTR:
1587 if (seg_desc.s || seg_desc.type != 2)
1590 default: /* DS, ES, FS, or GS */
1592 * segment is not a data or readable code segment or
1593 * ((segment is a data or nonconforming code segment)
1594 * and (both RPL and CPL > DPL))
1596 if ((seg_desc.type & 0xa) == 0x8 ||
1597 (((seg_desc.type & 0xc) != 0xc) &&
1598 (rpl > dpl && cpl > dpl)))
1604 /* mark segment as accessed */
1606 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1607 if (ret != X86EMUL_CONTINUE)
1609 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1610 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1611 sizeof(base3), &ctxt->exception);
1612 if (ret != X86EMUL_CONTINUE)
1616 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1619 return X86EMUL_CONTINUE;
1621 return emulate_exception(ctxt, err_vec, err_code, true);
1624 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1625 u16 selector, int seg)
1627 u8 cpl = ctxt->ops->cpl(ctxt);
1628 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1631 static void write_register_operand(struct operand *op)
1633 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1634 switch (op->bytes) {
1636 *(u8 *)op->addr.reg = (u8)op->val;
1639 *(u16 *)op->addr.reg = (u16)op->val;
1642 *op->addr.reg = (u32)op->val;
1643 break; /* 64b: zero-extend */
1645 *op->addr.reg = op->val;
1650 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1654 write_register_operand(op);
1657 if (ctxt->lock_prefix)
1658 return segmented_cmpxchg(ctxt,
1664 return segmented_write(ctxt,
1670 return segmented_write(ctxt,
1673 op->bytes * op->count);
1676 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1679 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1687 return X86EMUL_CONTINUE;
1690 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1692 struct segmented_address addr;
1694 rsp_increment(ctxt, -bytes);
1695 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1696 addr.seg = VCPU_SREG_SS;
1698 return segmented_write(ctxt, addr, data, bytes);
1701 static int em_push(struct x86_emulate_ctxt *ctxt)
1703 /* Disable writeback. */
1704 ctxt->dst.type = OP_NONE;
1705 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1708 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1709 void *dest, int len)
1712 struct segmented_address addr;
1714 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1715 addr.seg = VCPU_SREG_SS;
1716 rc = segmented_read(ctxt, addr, dest, len);
1717 if (rc != X86EMUL_CONTINUE)
1720 rsp_increment(ctxt, len);
1724 static int em_pop(struct x86_emulate_ctxt *ctxt)
1726 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1729 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1730 void *dest, int len)
1733 unsigned long val, change_mask;
1734 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1735 int cpl = ctxt->ops->cpl(ctxt);
1737 rc = emulate_pop(ctxt, &val, len);
1738 if (rc != X86EMUL_CONTINUE)
1741 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1742 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1744 switch(ctxt->mode) {
1745 case X86EMUL_MODE_PROT64:
1746 case X86EMUL_MODE_PROT32:
1747 case X86EMUL_MODE_PROT16:
1749 change_mask |= EFLG_IOPL;
1751 change_mask |= EFLG_IF;
1753 case X86EMUL_MODE_VM86:
1755 return emulate_gp(ctxt, 0);
1756 change_mask |= EFLG_IF;
1758 default: /* real mode */
1759 change_mask |= (EFLG_IOPL | EFLG_IF);
1763 *(unsigned long *)dest =
1764 (ctxt->eflags & ~change_mask) | (val & change_mask);
1769 static int em_popf(struct x86_emulate_ctxt *ctxt)
1771 ctxt->dst.type = OP_REG;
1772 ctxt->dst.addr.reg = &ctxt->eflags;
1773 ctxt->dst.bytes = ctxt->op_bytes;
1774 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1777 static int em_enter(struct x86_emulate_ctxt *ctxt)
1780 unsigned frame_size = ctxt->src.val;
1781 unsigned nesting_level = ctxt->src2.val & 31;
1785 return X86EMUL_UNHANDLEABLE;
1787 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1788 rc = push(ctxt, &rbp, stack_size(ctxt));
1789 if (rc != X86EMUL_CONTINUE)
1791 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1793 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1794 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1796 return X86EMUL_CONTINUE;
1799 static int em_leave(struct x86_emulate_ctxt *ctxt)
1801 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1803 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1806 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1808 int seg = ctxt->src2.val;
1810 ctxt->src.val = get_segment_selector(ctxt, seg);
1812 return em_push(ctxt);
1815 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1817 int seg = ctxt->src2.val;
1818 unsigned long selector;
1821 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1822 if (rc != X86EMUL_CONTINUE)
1825 if (ctxt->modrm_reg == VCPU_SREG_SS)
1826 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1828 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1832 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1834 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1835 int rc = X86EMUL_CONTINUE;
1836 int reg = VCPU_REGS_RAX;
1838 while (reg <= VCPU_REGS_RDI) {
1839 (reg == VCPU_REGS_RSP) ?
1840 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1843 if (rc != X86EMUL_CONTINUE)
1852 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1854 ctxt->src.val = (unsigned long)ctxt->eflags;
1855 return em_push(ctxt);
1858 static int em_popa(struct x86_emulate_ctxt *ctxt)
1860 int rc = X86EMUL_CONTINUE;
1861 int reg = VCPU_REGS_RDI;
1863 while (reg >= VCPU_REGS_RAX) {
1864 if (reg == VCPU_REGS_RSP) {
1865 rsp_increment(ctxt, ctxt->op_bytes);
1869 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1870 if (rc != X86EMUL_CONTINUE)
1877 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1879 const struct x86_emulate_ops *ops = ctxt->ops;
1886 /* TODO: Add limit checks */
1887 ctxt->src.val = ctxt->eflags;
1889 if (rc != X86EMUL_CONTINUE)
1892 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1894 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1896 if (rc != X86EMUL_CONTINUE)
1899 ctxt->src.val = ctxt->_eip;
1901 if (rc != X86EMUL_CONTINUE)
1904 ops->get_idt(ctxt, &dt);
1906 eip_addr = dt.address + (irq << 2);
1907 cs_addr = dt.address + (irq << 2) + 2;
1909 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1910 if (rc != X86EMUL_CONTINUE)
1913 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1914 if (rc != X86EMUL_CONTINUE)
1917 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1918 if (rc != X86EMUL_CONTINUE)
1926 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1930 invalidate_registers(ctxt);
1931 rc = __emulate_int_real(ctxt, irq);
1932 if (rc == X86EMUL_CONTINUE)
1933 writeback_registers(ctxt);
1937 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1939 switch(ctxt->mode) {
1940 case X86EMUL_MODE_REAL:
1941 return __emulate_int_real(ctxt, irq);
1942 case X86EMUL_MODE_VM86:
1943 case X86EMUL_MODE_PROT16:
1944 case X86EMUL_MODE_PROT32:
1945 case X86EMUL_MODE_PROT64:
1947 /* Protected mode interrupts unimplemented yet */
1948 return X86EMUL_UNHANDLEABLE;
1952 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1954 int rc = X86EMUL_CONTINUE;
1955 unsigned long temp_eip = 0;
1956 unsigned long temp_eflags = 0;
1957 unsigned long cs = 0;
1958 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1959 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1960 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1961 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1963 /* TODO: Add stack limit check */
1965 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1967 if (rc != X86EMUL_CONTINUE)
1970 if (temp_eip & ~0xffff)
1971 return emulate_gp(ctxt, 0);
1973 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1975 if (rc != X86EMUL_CONTINUE)
1978 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1980 if (rc != X86EMUL_CONTINUE)
1983 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1985 if (rc != X86EMUL_CONTINUE)
1988 ctxt->_eip = temp_eip;
1991 if (ctxt->op_bytes == 4)
1992 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1993 else if (ctxt->op_bytes == 2) {
1994 ctxt->eflags &= ~0xffff;
1995 ctxt->eflags |= temp_eflags;
1998 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1999 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2004 static int em_iret(struct x86_emulate_ctxt *ctxt)
2006 switch(ctxt->mode) {
2007 case X86EMUL_MODE_REAL:
2008 return emulate_iret_real(ctxt);
2009 case X86EMUL_MODE_VM86:
2010 case X86EMUL_MODE_PROT16:
2011 case X86EMUL_MODE_PROT32:
2012 case X86EMUL_MODE_PROT64:
2014 /* iret from protected mode unimplemented yet */
2015 return X86EMUL_UNHANDLEABLE;
2019 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2022 unsigned short sel, old_sel;
2023 struct desc_struct old_desc, new_desc;
2024 const struct x86_emulate_ops *ops = ctxt->ops;
2025 u8 cpl = ctxt->ops->cpl(ctxt);
2027 /* Assignment of RIP may only fail in 64-bit mode */
2028 if (ctxt->mode == X86EMUL_MODE_PROT64)
2029 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2032 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2034 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2036 if (rc != X86EMUL_CONTINUE)
2039 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2040 if (rc != X86EMUL_CONTINUE) {
2041 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2042 /* assigning eip failed; restore the old cs */
2043 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2049 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2051 return assign_eip_near(ctxt, ctxt->src.val);
2054 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2059 old_eip = ctxt->_eip;
2060 rc = assign_eip_near(ctxt, ctxt->src.val);
2061 if (rc != X86EMUL_CONTINUE)
2063 ctxt->src.val = old_eip;
2068 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2070 u64 old = ctxt->dst.orig_val64;
2072 if (ctxt->dst.bytes == 16)
2073 return X86EMUL_UNHANDLEABLE;
2075 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2076 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2077 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2078 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2079 ctxt->eflags &= ~EFLG_ZF;
2081 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2082 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2084 ctxt->eflags |= EFLG_ZF;
2086 return X86EMUL_CONTINUE;
2089 static int em_ret(struct x86_emulate_ctxt *ctxt)
2094 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2095 if (rc != X86EMUL_CONTINUE)
2098 return assign_eip_near(ctxt, eip);
2101 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2104 unsigned long eip, cs;
2106 int cpl = ctxt->ops->cpl(ctxt);
2107 struct desc_struct old_desc, new_desc;
2108 const struct x86_emulate_ops *ops = ctxt->ops;
2110 if (ctxt->mode == X86EMUL_MODE_PROT64)
2111 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2114 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2115 if (rc != X86EMUL_CONTINUE)
2117 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2118 if (rc != X86EMUL_CONTINUE)
2120 /* Outer-privilege level return is not implemented */
2121 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2122 return X86EMUL_UNHANDLEABLE;
2123 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2125 if (rc != X86EMUL_CONTINUE)
2127 rc = assign_eip_far(ctxt, eip, new_desc.l);
2128 if (rc != X86EMUL_CONTINUE) {
2129 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2130 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2135 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2139 rc = em_ret_far(ctxt);
2140 if (rc != X86EMUL_CONTINUE)
2142 rsp_increment(ctxt, ctxt->src.val);
2143 return X86EMUL_CONTINUE;
2146 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2148 /* Save real source value, then compare EAX against destination. */
2149 ctxt->dst.orig_val = ctxt->dst.val;
2150 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2151 ctxt->src.orig_val = ctxt->src.val;
2152 ctxt->src.val = ctxt->dst.orig_val;
2153 fastop(ctxt, em_cmp);
2155 if (ctxt->eflags & EFLG_ZF) {
2156 /* Success: write back to memory. */
2157 ctxt->dst.val = ctxt->src.orig_val;
2159 /* Failure: write the value we saw to EAX. */
2160 ctxt->dst.type = OP_REG;
2161 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2162 ctxt->dst.val = ctxt->dst.orig_val;
2164 return X86EMUL_CONTINUE;
2167 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2169 int seg = ctxt->src2.val;
2173 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2175 rc = load_segment_descriptor(ctxt, sel, seg);
2176 if (rc != X86EMUL_CONTINUE)
2179 ctxt->dst.val = ctxt->src.val;
2184 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2185 struct desc_struct *cs, struct desc_struct *ss)
2187 cs->l = 0; /* will be adjusted later */
2188 set_desc_base(cs, 0); /* flat segment */
2189 cs->g = 1; /* 4kb granularity */
2190 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2191 cs->type = 0x0b; /* Read, Execute, Accessed */
2193 cs->dpl = 0; /* will be adjusted later */
2198 set_desc_base(ss, 0); /* flat segment */
2199 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2200 ss->g = 1; /* 4kb granularity */
2202 ss->type = 0x03; /* Read/Write, Accessed */
2203 ss->d = 1; /* 32bit stack segment */
2210 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2212 u32 eax, ebx, ecx, edx;
2215 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2216 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2217 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2218 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2221 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2223 const struct x86_emulate_ops *ops = ctxt->ops;
2224 u32 eax, ebx, ecx, edx;
2227 * syscall should always be enabled in longmode - so only become
2228 * vendor specific (cpuid) if other modes are active...
2230 if (ctxt->mode == X86EMUL_MODE_PROT64)
2235 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2237 * Intel ("GenuineIntel")
2238 * remark: Intel CPUs only support "syscall" in 64bit
2239 * longmode. Also an 64bit guest with a
2240 * 32bit compat-app running will #UD !! While this
2241 * behaviour can be fixed (by emulating) into AMD
2242 * response - CPUs of AMD can't behave like Intel.
2244 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2245 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2246 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2249 /* AMD ("AuthenticAMD") */
2250 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2251 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2252 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2255 /* AMD ("AMDisbetter!") */
2256 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2257 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2258 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2261 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2265 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2267 const struct x86_emulate_ops *ops = ctxt->ops;
2268 struct desc_struct cs, ss;
2273 /* syscall is not available in real mode */
2274 if (ctxt->mode == X86EMUL_MODE_REAL ||
2275 ctxt->mode == X86EMUL_MODE_VM86)
2276 return emulate_ud(ctxt);
2278 if (!(em_syscall_is_enabled(ctxt)))
2279 return emulate_ud(ctxt);
2281 ops->get_msr(ctxt, MSR_EFER, &efer);
2282 setup_syscalls_segments(ctxt, &cs, &ss);
2284 if (!(efer & EFER_SCE))
2285 return emulate_ud(ctxt);
2287 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2289 cs_sel = (u16)(msr_data & 0xfffc);
2290 ss_sel = (u16)(msr_data + 8);
2292 if (efer & EFER_LMA) {
2296 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2297 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2299 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2300 if (efer & EFER_LMA) {
2301 #ifdef CONFIG_X86_64
2302 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2305 ctxt->mode == X86EMUL_MODE_PROT64 ?
2306 MSR_LSTAR : MSR_CSTAR, &msr_data);
2307 ctxt->_eip = msr_data;
2309 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2310 ctxt->eflags &= ~msr_data;
2314 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2315 ctxt->_eip = (u32)msr_data;
2317 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2320 return X86EMUL_CONTINUE;
2323 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2325 const struct x86_emulate_ops *ops = ctxt->ops;
2326 struct desc_struct cs, ss;
2331 ops->get_msr(ctxt, MSR_EFER, &efer);
2332 /* inject #GP if in real mode */
2333 if (ctxt->mode == X86EMUL_MODE_REAL)
2334 return emulate_gp(ctxt, 0);
2337 * Not recognized on AMD in compat mode (but is recognized in legacy
2340 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2341 && !vendor_intel(ctxt))
2342 return emulate_ud(ctxt);
2344 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2345 * Therefore, we inject an #UD.
2347 if (ctxt->mode == X86EMUL_MODE_PROT64)
2348 return emulate_ud(ctxt);
2350 setup_syscalls_segments(ctxt, &cs, &ss);
2352 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2353 switch (ctxt->mode) {
2354 case X86EMUL_MODE_PROT32:
2355 if ((msr_data & 0xfffc) == 0x0)
2356 return emulate_gp(ctxt, 0);
2358 case X86EMUL_MODE_PROT64:
2359 if (msr_data == 0x0)
2360 return emulate_gp(ctxt, 0);
2366 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2367 cs_sel = (u16)msr_data;
2368 cs_sel &= ~SELECTOR_RPL_MASK;
2369 ss_sel = cs_sel + 8;
2370 ss_sel &= ~SELECTOR_RPL_MASK;
2371 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2376 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2377 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2379 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2380 ctxt->_eip = msr_data;
2382 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2383 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2385 return X86EMUL_CONTINUE;
2388 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2390 const struct x86_emulate_ops *ops = ctxt->ops;
2391 struct desc_struct cs, ss;
2392 u64 msr_data, rcx, rdx;
2394 u16 cs_sel = 0, ss_sel = 0;
2396 /* inject #GP if in real mode or Virtual 8086 mode */
2397 if (ctxt->mode == X86EMUL_MODE_REAL ||
2398 ctxt->mode == X86EMUL_MODE_VM86)
2399 return emulate_gp(ctxt, 0);
2401 setup_syscalls_segments(ctxt, &cs, &ss);
2403 if ((ctxt->rex_prefix & 0x8) != 0x0)
2404 usermode = X86EMUL_MODE_PROT64;
2406 usermode = X86EMUL_MODE_PROT32;
2408 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2409 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2413 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2415 case X86EMUL_MODE_PROT32:
2416 cs_sel = (u16)(msr_data + 16);
2417 if ((msr_data & 0xfffc) == 0x0)
2418 return emulate_gp(ctxt, 0);
2419 ss_sel = (u16)(msr_data + 24);
2421 case X86EMUL_MODE_PROT64:
2422 cs_sel = (u16)(msr_data + 32);
2423 if (msr_data == 0x0)
2424 return emulate_gp(ctxt, 0);
2425 ss_sel = cs_sel + 8;
2428 if (is_noncanonical_address(rcx) ||
2429 is_noncanonical_address(rdx))
2430 return emulate_gp(ctxt, 0);
2433 cs_sel |= SELECTOR_RPL_MASK;
2434 ss_sel |= SELECTOR_RPL_MASK;
2436 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2437 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2440 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2442 return X86EMUL_CONTINUE;
2445 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2448 if (ctxt->mode == X86EMUL_MODE_REAL)
2450 if (ctxt->mode == X86EMUL_MODE_VM86)
2452 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2453 return ctxt->ops->cpl(ctxt) > iopl;
2456 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2459 const struct x86_emulate_ops *ops = ctxt->ops;
2460 struct desc_struct tr_seg;
2463 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2464 unsigned mask = (1 << len) - 1;
2467 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2470 if (desc_limit_scaled(&tr_seg) < 103)
2472 base = get_desc_base(&tr_seg);
2473 #ifdef CONFIG_X86_64
2474 base |= ((u64)base3) << 32;
2476 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2477 if (r != X86EMUL_CONTINUE)
2479 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2481 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2482 if (r != X86EMUL_CONTINUE)
2484 if ((perm >> bit_idx) & mask)
2489 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2495 if (emulator_bad_iopl(ctxt))
2496 if (!emulator_io_port_access_allowed(ctxt, port, len))
2499 ctxt->perm_ok = true;
2504 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2505 struct tss_segment_16 *tss)
2507 tss->ip = ctxt->_eip;
2508 tss->flag = ctxt->eflags;
2509 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2510 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2511 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2512 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2513 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2514 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2515 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2516 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2518 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2519 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2520 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2521 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2522 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2525 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2526 struct tss_segment_16 *tss)
2531 ctxt->_eip = tss->ip;
2532 ctxt->eflags = tss->flag | 2;
2533 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2534 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2535 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2536 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2537 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2538 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2539 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2540 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2543 * SDM says that segment selectors are loaded before segment
2546 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2547 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2548 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2549 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2550 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2555 * Now load segment descriptors. If fault happens at this stage
2556 * it is handled in a context of new task
2558 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2560 if (ret != X86EMUL_CONTINUE)
2562 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2564 if (ret != X86EMUL_CONTINUE)
2566 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2568 if (ret != X86EMUL_CONTINUE)
2570 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2579 return X86EMUL_CONTINUE;
2582 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2583 u16 tss_selector, u16 old_tss_sel,
2584 ulong old_tss_base, struct desc_struct *new_desc)
2586 const struct x86_emulate_ops *ops = ctxt->ops;
2587 struct tss_segment_16 tss_seg;
2589 u32 new_tss_base = get_desc_base(new_desc);
2591 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2593 if (ret != X86EMUL_CONTINUE)
2594 /* FIXME: need to provide precise fault address */
2597 save_state_to_tss16(ctxt, &tss_seg);
2599 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2601 if (ret != X86EMUL_CONTINUE)
2602 /* FIXME: need to provide precise fault address */
2605 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2607 if (ret != X86EMUL_CONTINUE)
2608 /* FIXME: need to provide precise fault address */
2611 if (old_tss_sel != 0xffff) {
2612 tss_seg.prev_task_link = old_tss_sel;
2614 ret = ops->write_std(ctxt, new_tss_base,
2615 &tss_seg.prev_task_link,
2616 sizeof tss_seg.prev_task_link,
2618 if (ret != X86EMUL_CONTINUE)
2619 /* FIXME: need to provide precise fault address */
2623 return load_state_from_tss16(ctxt, &tss_seg);
2626 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2627 struct tss_segment_32 *tss)
2629 /* CR3 and ldt selector are not saved intentionally */
2630 tss->eip = ctxt->_eip;
2631 tss->eflags = ctxt->eflags;
2632 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2633 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2634 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2635 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2636 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2637 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2638 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2639 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2641 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2642 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2643 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2644 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2645 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2646 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2649 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2650 struct tss_segment_32 *tss)
2655 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2656 return emulate_gp(ctxt, 0);
2657 ctxt->_eip = tss->eip;
2658 ctxt->eflags = tss->eflags | 2;
2660 /* General purpose registers */
2661 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2662 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2663 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2664 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2665 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2666 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2667 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2668 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2671 * SDM says that segment selectors are loaded before segment
2672 * descriptors. This is important because CPL checks will
2675 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2676 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2677 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2678 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2679 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2680 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2681 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2684 * If we're switching between Protected Mode and VM86, we need to make
2685 * sure to update the mode before loading the segment descriptors so
2686 * that the selectors are interpreted correctly.
2688 if (ctxt->eflags & X86_EFLAGS_VM) {
2689 ctxt->mode = X86EMUL_MODE_VM86;
2692 ctxt->mode = X86EMUL_MODE_PROT32;
2697 * Now load segment descriptors. If fault happenes at this stage
2698 * it is handled in a context of new task
2700 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2702 if (ret != X86EMUL_CONTINUE)
2704 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2706 if (ret != X86EMUL_CONTINUE)
2708 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2710 if (ret != X86EMUL_CONTINUE)
2712 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2726 if (ret != X86EMUL_CONTINUE)
2729 return X86EMUL_CONTINUE;
2732 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2733 u16 tss_selector, u16 old_tss_sel,
2734 ulong old_tss_base, struct desc_struct *new_desc)
2736 const struct x86_emulate_ops *ops = ctxt->ops;
2737 struct tss_segment_32 tss_seg;
2739 u32 new_tss_base = get_desc_base(new_desc);
2740 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2741 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2743 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2745 if (ret != X86EMUL_CONTINUE)
2746 /* FIXME: need to provide precise fault address */
2749 save_state_to_tss32(ctxt, &tss_seg);
2751 /* Only GP registers and segment selectors are saved */
2752 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2753 ldt_sel_offset - eip_offset, &ctxt->exception);
2754 if (ret != X86EMUL_CONTINUE)
2755 /* FIXME: need to provide precise fault address */
2758 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2760 if (ret != X86EMUL_CONTINUE)
2761 /* FIXME: need to provide precise fault address */
2764 if (old_tss_sel != 0xffff) {
2765 tss_seg.prev_task_link = old_tss_sel;
2767 ret = ops->write_std(ctxt, new_tss_base,
2768 &tss_seg.prev_task_link,
2769 sizeof tss_seg.prev_task_link,
2771 if (ret != X86EMUL_CONTINUE)
2772 /* FIXME: need to provide precise fault address */
2776 return load_state_from_tss32(ctxt, &tss_seg);
2779 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2780 u16 tss_selector, int idt_index, int reason,
2781 bool has_error_code, u32 error_code)
2783 const struct x86_emulate_ops *ops = ctxt->ops;
2784 struct desc_struct curr_tss_desc, next_tss_desc;
2786 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2787 ulong old_tss_base =
2788 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2792 /* FIXME: old_tss_base == ~0 ? */
2794 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2795 if (ret != X86EMUL_CONTINUE)
2797 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2798 if (ret != X86EMUL_CONTINUE)
2801 /* FIXME: check that next_tss_desc is tss */
2804 * Check privileges. The three cases are task switch caused by...
2806 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2807 * 2. Exception/IRQ/iret: No check is performed
2808 * 3. jmp/call to TSS: Check against DPL of the TSS
2810 if (reason == TASK_SWITCH_GATE) {
2811 if (idt_index != -1) {
2812 /* Software interrupts */
2813 struct desc_struct task_gate_desc;
2816 ret = read_interrupt_descriptor(ctxt, idt_index,
2818 if (ret != X86EMUL_CONTINUE)
2821 dpl = task_gate_desc.dpl;
2822 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2823 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2825 } else if (reason != TASK_SWITCH_IRET) {
2826 int dpl = next_tss_desc.dpl;
2827 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2828 return emulate_gp(ctxt, tss_selector);
2832 desc_limit = desc_limit_scaled(&next_tss_desc);
2833 if (!next_tss_desc.p ||
2834 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2835 desc_limit < 0x2b)) {
2836 return emulate_ts(ctxt, tss_selector & 0xfffc);
2839 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2840 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2841 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2844 if (reason == TASK_SWITCH_IRET)
2845 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2847 /* set back link to prev task only if NT bit is set in eflags
2848 note that old_tss_sel is not used after this point */
2849 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2850 old_tss_sel = 0xffff;
2852 if (next_tss_desc.type & 8)
2853 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2854 old_tss_base, &next_tss_desc);
2856 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2857 old_tss_base, &next_tss_desc);
2858 if (ret != X86EMUL_CONTINUE)
2861 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2862 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2864 if (reason != TASK_SWITCH_IRET) {
2865 next_tss_desc.type |= (1 << 1); /* set busy flag */
2866 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2869 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2870 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2872 if (has_error_code) {
2873 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2874 ctxt->lock_prefix = 0;
2875 ctxt->src.val = (unsigned long) error_code;
2876 ret = em_push(ctxt);
2882 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2883 u16 tss_selector, int idt_index, int reason,
2884 bool has_error_code, u32 error_code)
2888 invalidate_registers(ctxt);
2889 ctxt->_eip = ctxt->eip;
2890 ctxt->dst.type = OP_NONE;
2892 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2893 has_error_code, error_code);
2895 if (rc == X86EMUL_CONTINUE) {
2896 ctxt->eip = ctxt->_eip;
2897 writeback_registers(ctxt);
2900 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2903 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2906 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2908 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2909 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2912 static int em_das(struct x86_emulate_ctxt *ctxt)
2915 bool af, cf, old_cf;
2917 cf = ctxt->eflags & X86_EFLAGS_CF;
2923 af = ctxt->eflags & X86_EFLAGS_AF;
2924 if ((al & 0x0f) > 9 || af) {
2926 cf = old_cf | (al >= 250);
2931 if (old_al > 0x99 || old_cf) {
2937 /* Set PF, ZF, SF */
2938 ctxt->src.type = OP_IMM;
2940 ctxt->src.bytes = 1;
2941 fastop(ctxt, em_or);
2942 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2944 ctxt->eflags |= X86_EFLAGS_CF;
2946 ctxt->eflags |= X86_EFLAGS_AF;
2947 return X86EMUL_CONTINUE;
2950 static int em_aam(struct x86_emulate_ctxt *ctxt)
2954 if (ctxt->src.val == 0)
2955 return emulate_de(ctxt);
2957 al = ctxt->dst.val & 0xff;
2958 ah = al / ctxt->src.val;
2959 al %= ctxt->src.val;
2961 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2963 /* Set PF, ZF, SF */
2964 ctxt->src.type = OP_IMM;
2966 ctxt->src.bytes = 1;
2967 fastop(ctxt, em_or);
2969 return X86EMUL_CONTINUE;
2972 static int em_aad(struct x86_emulate_ctxt *ctxt)
2974 u8 al = ctxt->dst.val & 0xff;
2975 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2977 al = (al + (ah * ctxt->src.val)) & 0xff;
2979 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2981 /* Set PF, ZF, SF */
2982 ctxt->src.type = OP_IMM;
2984 ctxt->src.bytes = 1;
2985 fastop(ctxt, em_or);
2987 return X86EMUL_CONTINUE;
2990 static int em_call(struct x86_emulate_ctxt *ctxt)
2993 long rel = ctxt->src.val;
2995 ctxt->src.val = (unsigned long)ctxt->_eip;
2996 rc = jmp_rel(ctxt, rel);
2997 if (rc != X86EMUL_CONTINUE)
2999 return em_push(ctxt);
3002 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3007 struct desc_struct old_desc, new_desc;
3008 const struct x86_emulate_ops *ops = ctxt->ops;
3009 int cpl = ctxt->ops->cpl(ctxt);
3011 old_eip = ctxt->_eip;
3012 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3014 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3015 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3017 if (rc != X86EMUL_CONTINUE)
3018 return X86EMUL_CONTINUE;
3020 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3021 if (rc != X86EMUL_CONTINUE)
3024 ctxt->src.val = old_cs;
3026 if (rc != X86EMUL_CONTINUE)
3029 ctxt->src.val = old_eip;
3031 /* If we failed, we tainted the memory, but the very least we should
3033 if (rc != X86EMUL_CONTINUE)
3037 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3042 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3047 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3048 if (rc != X86EMUL_CONTINUE)
3050 rc = assign_eip_near(ctxt, eip);
3051 if (rc != X86EMUL_CONTINUE)
3053 rsp_increment(ctxt, ctxt->src.val);
3054 return X86EMUL_CONTINUE;
3057 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3059 /* Write back the register source. */
3060 ctxt->src.val = ctxt->dst.val;
3061 write_register_operand(&ctxt->src);
3063 /* Write back the memory destination with implicit LOCK prefix. */
3064 ctxt->dst.val = ctxt->src.orig_val;
3065 ctxt->lock_prefix = 1;
3066 return X86EMUL_CONTINUE;
3069 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3071 ctxt->dst.val = ctxt->src2.val;
3072 return fastop(ctxt, em_imul);
3075 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3077 ctxt->dst.type = OP_REG;
3078 ctxt->dst.bytes = ctxt->src.bytes;
3079 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3080 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3082 return X86EMUL_CONTINUE;
3085 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3089 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3090 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3091 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3092 return X86EMUL_CONTINUE;
3095 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3099 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3100 return emulate_gp(ctxt, 0);
3101 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3102 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3103 return X86EMUL_CONTINUE;
3106 static int em_mov(struct x86_emulate_ctxt *ctxt)
3108 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3109 return X86EMUL_CONTINUE;
3112 #define FFL(x) bit(X86_FEATURE_##x)
3114 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3116 u32 ebx, ecx, edx, eax = 1;
3120 * Check MOVBE is set in the guest-visible CPUID leaf.
3122 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3123 if (!(ecx & FFL(MOVBE)))
3124 return emulate_ud(ctxt);
3126 switch (ctxt->op_bytes) {
3129 * From MOVBE definition: "...When the operand size is 16 bits,
3130 * the upper word of the destination register remains unchanged
3133 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3134 * rules so we have to do the operation almost per hand.
3136 tmp = (u16)ctxt->src.val;
3137 ctxt->dst.val &= ~0xffffUL;
3138 ctxt->dst.val |= (unsigned long)swab16(tmp);
3141 ctxt->dst.val = swab32((u32)ctxt->src.val);
3144 ctxt->dst.val = swab64(ctxt->src.val);
3149 return X86EMUL_CONTINUE;
3152 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3154 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3155 return emulate_gp(ctxt, 0);
3157 /* Disable writeback. */
3158 ctxt->dst.type = OP_NONE;
3159 return X86EMUL_CONTINUE;
3162 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3166 if (ctxt->mode == X86EMUL_MODE_PROT64)
3167 val = ctxt->src.val & ~0ULL;
3169 val = ctxt->src.val & ~0U;
3171 /* #UD condition is already handled. */
3172 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3173 return emulate_gp(ctxt, 0);
3175 /* Disable writeback. */
3176 ctxt->dst.type = OP_NONE;
3177 return X86EMUL_CONTINUE;
3180 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3184 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3185 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3186 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3187 return emulate_gp(ctxt, 0);
3189 return X86EMUL_CONTINUE;
3192 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3196 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3197 return emulate_gp(ctxt, 0);
3199 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3200 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3201 return X86EMUL_CONTINUE;
3204 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3206 if (ctxt->modrm_reg > VCPU_SREG_GS)
3207 return emulate_ud(ctxt);
3209 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3210 return X86EMUL_CONTINUE;
3213 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3215 u16 sel = ctxt->src.val;
3217 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3218 return emulate_ud(ctxt);
3220 if (ctxt->modrm_reg == VCPU_SREG_SS)
3221 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3223 /* Disable writeback. */
3224 ctxt->dst.type = OP_NONE;
3225 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3228 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3230 u16 sel = ctxt->src.val;
3232 /* Disable writeback. */
3233 ctxt->dst.type = OP_NONE;
3234 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3237 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3239 u16 sel = ctxt->src.val;
3241 /* Disable writeback. */
3242 ctxt->dst.type = OP_NONE;
3243 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3246 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3251 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3252 if (rc == X86EMUL_CONTINUE)
3253 ctxt->ops->invlpg(ctxt, linear);
3254 /* Disable writeback. */
3255 ctxt->dst.type = OP_NONE;
3256 return X86EMUL_CONTINUE;
3259 static int em_clts(struct x86_emulate_ctxt *ctxt)
3263 cr0 = ctxt->ops->get_cr(ctxt, 0);
3265 ctxt->ops->set_cr(ctxt, 0, cr0);
3266 return X86EMUL_CONTINUE;
3269 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3271 int rc = ctxt->ops->fix_hypercall(ctxt);
3273 if (rc != X86EMUL_CONTINUE)
3276 /* Let the processor re-execute the fixed hypercall */
3277 ctxt->_eip = ctxt->eip;
3278 /* Disable writeback. */
3279 ctxt->dst.type = OP_NONE;
3280 return X86EMUL_CONTINUE;
3283 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3284 void (*get)(struct x86_emulate_ctxt *ctxt,
3285 struct desc_ptr *ptr))
3287 struct desc_ptr desc_ptr;
3289 if (ctxt->mode == X86EMUL_MODE_PROT64)
3291 get(ctxt, &desc_ptr);
3292 if (ctxt->op_bytes == 2) {
3294 desc_ptr.address &= 0x00ffffff;
3296 /* Disable writeback. */
3297 ctxt->dst.type = OP_NONE;
3298 return segmented_write(ctxt, ctxt->dst.addr.mem,
3299 &desc_ptr, 2 + ctxt->op_bytes);
3302 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3304 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3307 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3309 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3312 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3314 struct desc_ptr desc_ptr;
3317 if (ctxt->mode == X86EMUL_MODE_PROT64)
3319 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3320 &desc_ptr.size, &desc_ptr.address,
3322 if (rc != X86EMUL_CONTINUE)
3324 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3325 /* Disable writeback. */
3326 ctxt->dst.type = OP_NONE;
3327 return X86EMUL_CONTINUE;
3330 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3334 rc = ctxt->ops->fix_hypercall(ctxt);
3336 /* Disable writeback. */
3337 ctxt->dst.type = OP_NONE;
3341 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3343 struct desc_ptr desc_ptr;
3346 if (ctxt->mode == X86EMUL_MODE_PROT64)
3348 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3349 &desc_ptr.size, &desc_ptr.address,
3351 if (rc != X86EMUL_CONTINUE)
3353 ctxt->ops->set_idt(ctxt, &desc_ptr);
3354 /* Disable writeback. */
3355 ctxt->dst.type = OP_NONE;
3356 return X86EMUL_CONTINUE;
3359 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3361 if (ctxt->dst.type == OP_MEM)
3362 ctxt->dst.bytes = 2;
3363 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3364 return X86EMUL_CONTINUE;
3367 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3369 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3370 | (ctxt->src.val & 0x0f));
3371 ctxt->dst.type = OP_NONE;
3372 return X86EMUL_CONTINUE;
3375 static int em_loop(struct x86_emulate_ctxt *ctxt)
3377 int rc = X86EMUL_CONTINUE;
3379 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3380 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3381 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3382 rc = jmp_rel(ctxt, ctxt->src.val);
3387 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3389 int rc = X86EMUL_CONTINUE;
3391 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3392 rc = jmp_rel(ctxt, ctxt->src.val);
3397 static int em_in(struct x86_emulate_ctxt *ctxt)
3399 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3401 return X86EMUL_IO_NEEDED;
3403 return X86EMUL_CONTINUE;
3406 static int em_out(struct x86_emulate_ctxt *ctxt)
3408 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3410 /* Disable writeback. */
3411 ctxt->dst.type = OP_NONE;
3412 return X86EMUL_CONTINUE;
3415 static int em_cli(struct x86_emulate_ctxt *ctxt)
3417 if (emulator_bad_iopl(ctxt))
3418 return emulate_gp(ctxt, 0);
3420 ctxt->eflags &= ~X86_EFLAGS_IF;
3421 return X86EMUL_CONTINUE;
3424 static int em_sti(struct x86_emulate_ctxt *ctxt)
3426 if (emulator_bad_iopl(ctxt))
3427 return emulate_gp(ctxt, 0);
3429 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3430 ctxt->eflags |= X86_EFLAGS_IF;
3431 return X86EMUL_CONTINUE;
3434 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3436 u32 eax, ebx, ecx, edx;
3438 eax = reg_read(ctxt, VCPU_REGS_RAX);
3439 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3440 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3441 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3442 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3443 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3444 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3445 return X86EMUL_CONTINUE;
3448 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3452 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3453 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3455 ctxt->eflags &= ~0xffUL;
3456 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3457 return X86EMUL_CONTINUE;
3460 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3462 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3463 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3464 return X86EMUL_CONTINUE;
3467 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3469 switch (ctxt->op_bytes) {
3470 #ifdef CONFIG_X86_64
3472 asm("bswap %0" : "+r"(ctxt->dst.val));
3476 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3479 return X86EMUL_CONTINUE;
3482 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3484 /* emulating clflush regardless of cpuid */
3485 return X86EMUL_CONTINUE;
3488 static bool valid_cr(int nr)
3500 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3502 if (!valid_cr(ctxt->modrm_reg))
3503 return emulate_ud(ctxt);
3505 return X86EMUL_CONTINUE;
3508 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3510 u64 new_val = ctxt->src.val64;
3511 int cr = ctxt->modrm_reg;
3514 static u64 cr_reserved_bits[] = {
3515 0xffffffff00000000ULL,
3516 0, 0, 0, /* CR3 checked later */
3523 return emulate_ud(ctxt);
3525 if (new_val & cr_reserved_bits[cr])
3526 return emulate_gp(ctxt, 0);
3531 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3532 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3533 return emulate_gp(ctxt, 0);
3535 cr4 = ctxt->ops->get_cr(ctxt, 4);
3536 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3538 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3539 !(cr4 & X86_CR4_PAE))
3540 return emulate_gp(ctxt, 0);
3547 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3548 if (efer & EFER_LMA)
3549 rsvd = CR3_L_MODE_RESERVED_BITS;
3552 return emulate_gp(ctxt, 0);
3557 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3559 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3560 return emulate_gp(ctxt, 0);
3566 return X86EMUL_CONTINUE;
3569 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3573 ctxt->ops->get_dr(ctxt, 7, &dr7);
3575 /* Check if DR7.Global_Enable is set */
3576 return dr7 & (1 << 13);
3579 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3581 int dr = ctxt->modrm_reg;
3585 return emulate_ud(ctxt);
3587 cr4 = ctxt->ops->get_cr(ctxt, 4);
3588 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3589 return emulate_ud(ctxt);
3591 if (check_dr7_gd(ctxt))
3592 return emulate_db(ctxt);
3594 return X86EMUL_CONTINUE;
3597 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3599 u64 new_val = ctxt->src.val64;
3600 int dr = ctxt->modrm_reg;
3602 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3603 return emulate_gp(ctxt, 0);
3605 return check_dr_read(ctxt);
3608 static int check_svme(struct x86_emulate_ctxt *ctxt)
3612 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3614 if (!(efer & EFER_SVME))
3615 return emulate_ud(ctxt);
3617 return X86EMUL_CONTINUE;
3620 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3622 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3624 /* Valid physical address? */
3625 if (rax & 0xffff000000000000ULL)
3626 return emulate_gp(ctxt, 0);
3628 return check_svme(ctxt);
3631 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3633 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3635 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3636 return emulate_ud(ctxt);
3638 return X86EMUL_CONTINUE;
3641 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3643 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3644 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3646 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3647 ctxt->ops->check_pmc(ctxt, rcx))
3648 return emulate_gp(ctxt, 0);
3650 return X86EMUL_CONTINUE;
3653 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3655 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3656 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3657 return emulate_gp(ctxt, 0);
3659 return X86EMUL_CONTINUE;
3662 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3664 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3665 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3666 return emulate_gp(ctxt, 0);
3668 return X86EMUL_CONTINUE;
3671 #define D(_y) { .flags = (_y) }
3672 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3673 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3674 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3675 #define N D(NotImpl)
3676 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3677 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3678 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3679 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3680 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3681 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3682 #define II(_f, _e, _i) \
3683 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3684 #define IIP(_f, _e, _i, _p) \
3685 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3686 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3687 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3689 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3690 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3691 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3692 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3693 #define I2bvIP(_f, _e, _i, _p) \
3694 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3696 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3697 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3698 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3700 static const struct opcode group7_rm0[] = {
3702 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3706 static const struct opcode group7_rm1[] = {
3707 DI(SrcNone | Priv, monitor),
3708 DI(SrcNone | Priv, mwait),
3712 static const struct opcode group7_rm3[] = {
3713 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3714 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3715 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3716 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3717 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3718 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3719 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3720 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3723 static const struct opcode group7_rm7[] = {
3725 DIP(SrcNone, rdtscp, check_rdtsc),
3729 static const struct opcode group1[] = {
3731 F(Lock | PageTable, em_or),
3734 F(Lock | PageTable, em_and),
3740 static const struct opcode group1A[] = {
3741 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3744 static const struct opcode group2[] = {
3745 F(DstMem | ModRM, em_rol),
3746 F(DstMem | ModRM, em_ror),
3747 F(DstMem | ModRM, em_rcl),
3748 F(DstMem | ModRM, em_rcr),
3749 F(DstMem | ModRM, em_shl),
3750 F(DstMem | ModRM, em_shr),
3751 F(DstMem | ModRM, em_shl),
3752 F(DstMem | ModRM, em_sar),
3755 static const struct opcode group3[] = {
3756 F(DstMem | SrcImm | NoWrite, em_test),
3757 F(DstMem | SrcImm | NoWrite, em_test),
3758 F(DstMem | SrcNone | Lock, em_not),
3759 F(DstMem | SrcNone | Lock, em_neg),
3760 F(DstXacc | Src2Mem, em_mul_ex),
3761 F(DstXacc | Src2Mem, em_imul_ex),
3762 F(DstXacc | Src2Mem, em_div_ex),
3763 F(DstXacc | Src2Mem, em_idiv_ex),
3766 static const struct opcode group4[] = {
3767 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3768 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3772 static const struct opcode group5[] = {
3773 F(DstMem | SrcNone | Lock, em_inc),
3774 F(DstMem | SrcNone | Lock, em_dec),
3775 I(SrcMem | NearBranch, em_call_near_abs),
3776 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3777 I(SrcMem | NearBranch, em_jmp_abs),
3778 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3779 I(SrcMem | Stack, em_push), D(Undefined),
3782 static const struct opcode group6[] = {
3785 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3786 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3790 static const struct group_dual group7 = { {
3791 II(Mov | DstMem, em_sgdt, sgdt),
3792 II(Mov | DstMem, em_sidt, sidt),
3793 II(SrcMem | Priv, em_lgdt, lgdt),
3794 II(SrcMem | Priv, em_lidt, lidt),
3795 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3796 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3797 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3801 N, EXT(0, group7_rm3),
3802 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3803 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3807 static const struct opcode group8[] = {
3809 F(DstMem | SrcImmByte | NoWrite, em_bt),
3810 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3811 F(DstMem | SrcImmByte | Lock, em_btr),
3812 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3815 static const struct group_dual group9 = { {
3816 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3818 N, N, N, N, N, N, N, N,
3821 static const struct opcode group11[] = {
3822 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3826 static const struct gprefix pfx_0f_ae_7 = {
3827 I(SrcMem | ByteOp, em_clflush), N, N, N,
3830 static const struct group_dual group15 = { {
3831 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3833 N, N, N, N, N, N, N, N,
3836 static const struct gprefix pfx_0f_6f_0f_7f = {
3837 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3840 static const struct gprefix pfx_0f_2b = {
3841 I(0, em_mov), I(0, em_mov), N, N,
3844 static const struct gprefix pfx_0f_28_0f_29 = {
3845 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3848 static const struct gprefix pfx_0f_e7 = {
3849 N, I(Sse, em_mov), N, N,
3852 static const struct escape escape_d9 = { {
3853 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3856 N, N, N, N, N, N, N, N,
3858 N, N, N, N, N, N, N, N,
3860 N, N, N, N, N, N, N, N,
3862 N, N, N, N, N, N, N, N,
3864 N, N, N, N, N, N, N, N,
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3873 static const struct escape escape_db = { {
3874 N, N, N, N, N, N, N, N,
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3894 static const struct escape escape_dd = { {
3895 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3898 N, N, N, N, N, N, N, N,
3900 N, N, N, N, N, N, N, N,
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, N, N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3915 static const struct opcode opcode_table[256] = {
3917 F6ALU(Lock, em_add),
3918 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3919 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3921 F6ALU(Lock | PageTable, em_or),
3922 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3925 F6ALU(Lock, em_adc),
3926 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3927 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3929 F6ALU(Lock, em_sbb),
3930 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3931 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3933 F6ALU(Lock | PageTable, em_and), N, N,
3935 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3937 F6ALU(Lock, em_xor), N, N,
3939 F6ALU(NoWrite, em_cmp), N, N,
3941 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3943 X8(I(SrcReg | Stack, em_push)),
3945 X8(I(DstReg | Stack, em_pop)),
3947 I(ImplicitOps | Stack | No64, em_pusha),
3948 I(ImplicitOps | Stack | No64, em_popa),
3949 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3952 I(SrcImm | Mov | Stack, em_push),
3953 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3954 I(SrcImmByte | Mov | Stack, em_push),
3955 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3956 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3957 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3959 X16(D(SrcImmByte | NearBranch)),
3961 G(ByteOp | DstMem | SrcImm, group1),
3962 G(DstMem | SrcImm, group1),
3963 G(ByteOp | DstMem | SrcImm | No64, group1),
3964 G(DstMem | SrcImmByte, group1),
3965 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3966 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3968 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3969 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3970 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3971 D(ModRM | SrcMem | NoAccess | DstReg),
3972 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3975 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3977 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3978 I(SrcImmFAddr | No64, em_call_far), N,
3979 II(ImplicitOps | Stack, em_pushf, pushf),
3980 II(ImplicitOps | Stack, em_popf, popf),
3981 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3983 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3984 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3985 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3986 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3988 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3989 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3990 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3991 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3993 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3995 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3997 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3998 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
3999 I(ImplicitOps | NearBranch, em_ret),
4000 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4001 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4002 G(ByteOp, group11), G(0, group11),
4004 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4005 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4006 I(ImplicitOps | Stack, em_ret_far),
4007 D(ImplicitOps), DI(SrcImmByte, intn),
4008 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4010 G(Src2One | ByteOp, group2), G(Src2One, group2),
4011 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4012 I(DstAcc | SrcImmUByte | No64, em_aam),
4013 I(DstAcc | SrcImmUByte | No64, em_aad),
4014 F(DstAcc | ByteOp | No64, em_salc),
4015 I(DstAcc | SrcXLat | ByteOp, em_mov),
4017 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4019 X3(I(SrcImmByte | NearBranch, em_loop)),
4020 I(SrcImmByte | NearBranch, em_jcxz),
4021 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4022 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4024 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4025 I(SrcImmFAddr | No64, em_jmp_far),
4026 D(SrcImmByte | ImplicitOps | NearBranch),
4027 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4028 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4030 N, DI(ImplicitOps, icebp), N, N,
4031 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4032 G(ByteOp, group3), G(0, group3),
4034 D(ImplicitOps), D(ImplicitOps),
4035 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4036 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4039 static const struct opcode twobyte_table[256] = {
4041 G(0, group6), GD(0, &group7), N, N,
4042 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4043 II(ImplicitOps | Priv, em_clts, clts), N,
4044 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4045 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4047 N, N, N, N, N, N, N, N,
4048 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4049 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4051 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4052 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4053 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4055 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4058 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4059 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4060 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4063 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4064 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4065 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4066 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4067 I(ImplicitOps | EmulateOnUD, em_sysenter),
4068 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4070 N, N, N, N, N, N, N, N,
4072 X16(D(DstReg | SrcMem | ModRM)),
4074 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4079 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4084 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4086 X16(D(SrcImm | NearBranch)),
4088 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4090 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4091 II(ImplicitOps, em_cpuid, cpuid),
4092 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4093 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4094 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4096 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4097 DI(ImplicitOps, rsm),
4098 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4099 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4100 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4101 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4103 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4104 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4105 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4106 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4107 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4108 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4112 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4113 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4114 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4116 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4117 N, D(DstMem | SrcReg | ModRM | Mov),
4118 N, N, N, GD(0, &group9),
4120 X8(I(DstReg, em_bswap)),
4122 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4124 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4125 N, N, N, N, N, N, N, N,
4127 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4130 static const struct gprefix three_byte_0f_38_f0 = {
4131 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4134 static const struct gprefix three_byte_0f_38_f1 = {
4135 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4139 * Insns below are selected by the prefix which indexed by the third opcode
4142 static const struct opcode opcode_map_0f_38[256] = {
4144 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4146 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4148 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4149 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4168 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4172 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4178 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4179 unsigned size, bool sign_extension)
4181 int rc = X86EMUL_CONTINUE;
4185 op->addr.mem.ea = ctxt->_eip;
4186 /* NB. Immediates are sign-extended as necessary. */
4187 switch (op->bytes) {
4189 op->val = insn_fetch(s8, ctxt);
4192 op->val = insn_fetch(s16, ctxt);
4195 op->val = insn_fetch(s32, ctxt);
4198 op->val = insn_fetch(s64, ctxt);
4201 if (!sign_extension) {
4202 switch (op->bytes) {
4210 op->val &= 0xffffffff;
4218 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4221 int rc = X86EMUL_CONTINUE;
4225 decode_register_operand(ctxt, op);
4228 rc = decode_imm(ctxt, op, 1, false);
4231 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4235 if (ctxt->d & BitOp)
4236 fetch_bit_operand(ctxt);
4237 op->orig_val = op->val;
4240 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4244 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4245 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4246 fetch_register_operand(op);
4247 op->orig_val = op->val;
4251 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4252 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4253 fetch_register_operand(op);
4254 op->orig_val = op->val;
4257 if (ctxt->d & ByteOp) {
4262 op->bytes = ctxt->op_bytes;
4263 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4264 fetch_register_operand(op);
4265 op->orig_val = op->val;
4269 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4271 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4272 op->addr.mem.seg = VCPU_SREG_ES;
4279 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4280 fetch_register_operand(op);
4284 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4287 rc = decode_imm(ctxt, op, 1, true);
4294 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4297 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4300 ctxt->memop.bytes = 1;
4301 if (ctxt->memop.type == OP_REG) {
4302 ctxt->memop.addr.reg = decode_register(ctxt,
4303 ctxt->modrm_rm, true);
4304 fetch_register_operand(&ctxt->memop);
4308 ctxt->memop.bytes = 2;
4311 ctxt->memop.bytes = 4;
4314 rc = decode_imm(ctxt, op, 2, false);
4317 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4321 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4323 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4324 op->addr.mem.seg = ctxt->seg_override;
4330 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4332 register_address(ctxt,
4333 reg_read(ctxt, VCPU_REGS_RBX) +
4334 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4335 op->addr.mem.seg = ctxt->seg_override;
4340 op->addr.mem.ea = ctxt->_eip;
4341 op->bytes = ctxt->op_bytes + 2;
4342 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4345 ctxt->memop.bytes = ctxt->op_bytes + 2;
4348 op->val = VCPU_SREG_ES;
4351 op->val = VCPU_SREG_CS;
4354 op->val = VCPU_SREG_SS;
4357 op->val = VCPU_SREG_DS;
4360 op->val = VCPU_SREG_FS;
4363 op->val = VCPU_SREG_GS;
4366 /* Special instructions do their own operand decoding. */
4368 op->type = OP_NONE; /* Disable writeback. */
4376 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4378 int rc = X86EMUL_CONTINUE;
4379 int mode = ctxt->mode;
4380 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4381 bool op_prefix = false;
4382 bool has_seg_override = false;
4383 struct opcode opcode;
4385 ctxt->memop.type = OP_NONE;
4386 ctxt->memopp = NULL;
4387 ctxt->_eip = ctxt->eip;
4388 ctxt->fetch.ptr = ctxt->fetch.data;
4389 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4390 ctxt->opcode_len = 1;
4392 memcpy(ctxt->fetch.data, insn, insn_len);
4394 rc = __do_insn_fetch_bytes(ctxt, 1);
4395 if (rc != X86EMUL_CONTINUE)
4400 case X86EMUL_MODE_REAL:
4401 case X86EMUL_MODE_VM86:
4402 case X86EMUL_MODE_PROT16:
4403 def_op_bytes = def_ad_bytes = 2;
4405 case X86EMUL_MODE_PROT32:
4406 def_op_bytes = def_ad_bytes = 4;
4408 #ifdef CONFIG_X86_64
4409 case X86EMUL_MODE_PROT64:
4415 return EMULATION_FAILED;
4418 ctxt->op_bytes = def_op_bytes;
4419 ctxt->ad_bytes = def_ad_bytes;
4421 /* Legacy prefixes. */
4423 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4424 case 0x66: /* operand-size override */
4426 /* switch between 2/4 bytes */
4427 ctxt->op_bytes = def_op_bytes ^ 6;
4429 case 0x67: /* address-size override */
4430 if (mode == X86EMUL_MODE_PROT64)
4431 /* switch between 4/8 bytes */
4432 ctxt->ad_bytes = def_ad_bytes ^ 12;
4434 /* switch between 2/4 bytes */
4435 ctxt->ad_bytes = def_ad_bytes ^ 6;
4437 case 0x26: /* ES override */
4438 case 0x2e: /* CS override */
4439 case 0x36: /* SS override */
4440 case 0x3e: /* DS override */
4441 has_seg_override = true;
4442 ctxt->seg_override = (ctxt->b >> 3) & 3;
4444 case 0x64: /* FS override */
4445 case 0x65: /* GS override */
4446 has_seg_override = true;
4447 ctxt->seg_override = ctxt->b & 7;
4449 case 0x40 ... 0x4f: /* REX */
4450 if (mode != X86EMUL_MODE_PROT64)
4452 ctxt->rex_prefix = ctxt->b;
4454 case 0xf0: /* LOCK */
4455 ctxt->lock_prefix = 1;
4457 case 0xf2: /* REPNE/REPNZ */
4458 case 0xf3: /* REP/REPE/REPZ */
4459 ctxt->rep_prefix = ctxt->b;
4465 /* Any legacy prefix after a REX prefix nullifies its effect. */
4467 ctxt->rex_prefix = 0;
4473 if (ctxt->rex_prefix & 8)
4474 ctxt->op_bytes = 8; /* REX.W */
4476 /* Opcode byte(s). */
4477 opcode = opcode_table[ctxt->b];
4478 /* Two-byte opcode? */
4479 if (ctxt->b == 0x0f) {
4480 ctxt->opcode_len = 2;
4481 ctxt->b = insn_fetch(u8, ctxt);
4482 opcode = twobyte_table[ctxt->b];
4484 /* 0F_38 opcode map */
4485 if (ctxt->b == 0x38) {
4486 ctxt->opcode_len = 3;
4487 ctxt->b = insn_fetch(u8, ctxt);
4488 opcode = opcode_map_0f_38[ctxt->b];
4491 ctxt->d = opcode.flags;
4493 if (ctxt->d & ModRM)
4494 ctxt->modrm = insn_fetch(u8, ctxt);
4496 /* vex-prefix instructions are not implemented */
4497 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4498 (mode == X86EMUL_MODE_PROT64 ||
4499 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4503 while (ctxt->d & GroupMask) {
4504 switch (ctxt->d & GroupMask) {
4506 goffset = (ctxt->modrm >> 3) & 7;
4507 opcode = opcode.u.group[goffset];
4510 goffset = (ctxt->modrm >> 3) & 7;
4511 if ((ctxt->modrm >> 6) == 3)
4512 opcode = opcode.u.gdual->mod3[goffset];
4514 opcode = opcode.u.gdual->mod012[goffset];
4517 goffset = ctxt->modrm & 7;
4518 opcode = opcode.u.group[goffset];
4521 if (ctxt->rep_prefix && op_prefix)
4522 return EMULATION_FAILED;
4523 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4524 switch (simd_prefix) {
4525 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4526 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4527 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4528 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4532 if (ctxt->modrm > 0xbf)
4533 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4535 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4538 return EMULATION_FAILED;
4541 ctxt->d &= ~(u64)GroupMask;
4542 ctxt->d |= opcode.flags;
4547 return EMULATION_FAILED;
4549 ctxt->execute = opcode.u.execute;
4551 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4552 return EMULATION_FAILED;
4554 if (unlikely(ctxt->d &
4555 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4557 * These are copied unconditionally here, and checked unconditionally
4558 * in x86_emulate_insn.
4560 ctxt->check_perm = opcode.check_perm;
4561 ctxt->intercept = opcode.intercept;
4563 if (ctxt->d & NotImpl)
4564 return EMULATION_FAILED;
4566 if (mode == X86EMUL_MODE_PROT64) {
4567 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4569 else if (ctxt->d & NearBranch)
4573 if (ctxt->d & Op3264) {
4574 if (mode == X86EMUL_MODE_PROT64)
4581 ctxt->op_bytes = 16;
4582 else if (ctxt->d & Mmx)
4586 /* ModRM and SIB bytes. */
4587 if (ctxt->d & ModRM) {
4588 rc = decode_modrm(ctxt, &ctxt->memop);
4589 if (!has_seg_override) {
4590 has_seg_override = true;
4591 ctxt->seg_override = ctxt->modrm_seg;
4593 } else if (ctxt->d & MemAbs)
4594 rc = decode_abs(ctxt, &ctxt->memop);
4595 if (rc != X86EMUL_CONTINUE)
4598 if (!has_seg_override)
4599 ctxt->seg_override = VCPU_SREG_DS;
4601 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4604 * Decode and fetch the source operand: register, memory
4607 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4608 if (rc != X86EMUL_CONTINUE)
4612 * Decode and fetch the second source operand: register, memory
4615 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4616 if (rc != X86EMUL_CONTINUE)
4619 /* Decode and fetch the destination operand: register or memory. */
4620 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4622 if (ctxt->rip_relative)
4623 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4626 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4629 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4631 return ctxt->d & PageTable;
4634 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4636 /* The second termination condition only applies for REPE
4637 * and REPNE. Test if the repeat string operation prefix is
4638 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4639 * corresponding termination condition according to:
4640 * - if REPE/REPZ and ZF = 0 then done
4641 * - if REPNE/REPNZ and ZF = 1 then done
4643 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4644 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4645 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4646 ((ctxt->eflags & EFLG_ZF) == 0))
4647 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4648 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4654 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4658 ctxt->ops->get_fpu(ctxt);
4659 asm volatile("1: fwait \n\t"
4661 ".pushsection .fixup,\"ax\" \n\t"
4663 "movb $1, %[fault] \n\t"
4666 _ASM_EXTABLE(1b, 3b)
4667 : [fault]"+qm"(fault));
4668 ctxt->ops->put_fpu(ctxt);
4670 if (unlikely(fault))
4671 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4673 return X86EMUL_CONTINUE;
4676 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4679 if (op->type == OP_MM)
4680 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4683 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4685 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4686 if (!(ctxt->d & ByteOp))
4687 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4688 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4689 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4691 : "c"(ctxt->src2.val));
4692 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4693 if (!fop) /* exception is returned in fop variable */
4694 return emulate_de(ctxt);
4695 return X86EMUL_CONTINUE;
4698 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4700 memset(&ctxt->rip_relative, 0,
4701 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4703 ctxt->io_read.pos = 0;
4704 ctxt->io_read.end = 0;
4705 ctxt->mem_read.end = 0;
4708 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4710 const struct x86_emulate_ops *ops = ctxt->ops;
4711 int rc = X86EMUL_CONTINUE;
4712 int saved_dst_type = ctxt->dst.type;
4714 ctxt->mem_read.pos = 0;
4716 /* LOCK prefix is allowed only with some instructions */
4717 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4718 rc = emulate_ud(ctxt);
4722 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4723 rc = emulate_ud(ctxt);
4727 if (unlikely(ctxt->d &
4728 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4729 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4730 (ctxt->d & Undefined)) {
4731 rc = emulate_ud(ctxt);
4735 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4736 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4737 rc = emulate_ud(ctxt);
4741 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4742 rc = emulate_nm(ctxt);
4746 if (ctxt->d & Mmx) {
4747 rc = flush_pending_x87_faults(ctxt);
4748 if (rc != X86EMUL_CONTINUE)
4751 * Now that we know the fpu is exception safe, we can fetch
4754 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4755 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4756 if (!(ctxt->d & Mov))
4757 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4760 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4761 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4762 X86_ICPT_PRE_EXCEPT);
4763 if (rc != X86EMUL_CONTINUE)
4767 /* Privileged instruction can be executed only in CPL=0 */
4768 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4769 if (ctxt->d & PrivUD)
4770 rc = emulate_ud(ctxt);
4772 rc = emulate_gp(ctxt, 0);
4776 /* Instruction can only be executed in protected mode */
4777 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4778 rc = emulate_ud(ctxt);
4782 /* Do instruction specific permission checks */
4783 if (ctxt->d & CheckPerm) {
4784 rc = ctxt->check_perm(ctxt);
4785 if (rc != X86EMUL_CONTINUE)
4789 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4790 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4791 X86_ICPT_POST_EXCEPT);
4792 if (rc != X86EMUL_CONTINUE)
4796 if (ctxt->rep_prefix && (ctxt->d & String)) {
4797 /* All REP prefixes have the same first termination condition */
4798 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4799 ctxt->eip = ctxt->_eip;
4800 ctxt->eflags &= ~EFLG_RF;
4806 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4807 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4808 ctxt->src.valptr, ctxt->src.bytes);
4809 if (rc != X86EMUL_CONTINUE)
4811 ctxt->src.orig_val64 = ctxt->src.val64;
4814 if (ctxt->src2.type == OP_MEM) {
4815 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4816 &ctxt->src2.val, ctxt->src2.bytes);
4817 if (rc != X86EMUL_CONTINUE)
4821 if ((ctxt->d & DstMask) == ImplicitOps)
4825 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4826 /* optimisation - avoid slow emulated read if Mov */
4827 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4828 &ctxt->dst.val, ctxt->dst.bytes);
4829 if (rc != X86EMUL_CONTINUE)
4832 ctxt->dst.orig_val = ctxt->dst.val;
4836 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4837 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4838 X86_ICPT_POST_MEMACCESS);
4839 if (rc != X86EMUL_CONTINUE)
4843 if (ctxt->rep_prefix && (ctxt->d & String))
4844 ctxt->eflags |= EFLG_RF;
4846 ctxt->eflags &= ~EFLG_RF;
4848 if (ctxt->execute) {
4849 if (ctxt->d & Fastop) {
4850 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4851 rc = fastop(ctxt, fop);
4852 if (rc != X86EMUL_CONTINUE)
4856 rc = ctxt->execute(ctxt);
4857 if (rc != X86EMUL_CONTINUE)
4862 if (ctxt->opcode_len == 2)
4864 else if (ctxt->opcode_len == 3)
4865 goto threebyte_insn;
4868 case 0x63: /* movsxd */
4869 if (ctxt->mode != X86EMUL_MODE_PROT64)
4870 goto cannot_emulate;
4871 ctxt->dst.val = (s32) ctxt->src.val;
4873 case 0x70 ... 0x7f: /* jcc (short) */
4874 if (test_cc(ctxt->b, ctxt->eflags))
4875 rc = jmp_rel(ctxt, ctxt->src.val);
4877 case 0x8d: /* lea r16/r32, m */
4878 ctxt->dst.val = ctxt->src.addr.mem.ea;
4880 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4881 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4882 ctxt->dst.type = OP_NONE;
4886 case 0x98: /* cbw/cwde/cdqe */
4887 switch (ctxt->op_bytes) {
4888 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4889 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4890 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4893 case 0xcc: /* int3 */
4894 rc = emulate_int(ctxt, 3);
4896 case 0xcd: /* int n */
4897 rc = emulate_int(ctxt, ctxt->src.val);
4899 case 0xce: /* into */
4900 if (ctxt->eflags & EFLG_OF)
4901 rc = emulate_int(ctxt, 4);
4903 case 0xe9: /* jmp rel */
4904 case 0xeb: /* jmp rel short */
4905 rc = jmp_rel(ctxt, ctxt->src.val);
4906 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4908 case 0xf4: /* hlt */
4909 ctxt->ops->halt(ctxt);
4911 case 0xf5: /* cmc */
4912 /* complement carry flag from eflags reg */
4913 ctxt->eflags ^= EFLG_CF;
4915 case 0xf8: /* clc */
4916 ctxt->eflags &= ~EFLG_CF;
4918 case 0xf9: /* stc */
4919 ctxt->eflags |= EFLG_CF;
4921 case 0xfc: /* cld */
4922 ctxt->eflags &= ~EFLG_DF;
4924 case 0xfd: /* std */
4925 ctxt->eflags |= EFLG_DF;
4928 goto cannot_emulate;
4931 if (rc != X86EMUL_CONTINUE)
4935 if (ctxt->d & SrcWrite) {
4936 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4937 rc = writeback(ctxt, &ctxt->src);
4938 if (rc != X86EMUL_CONTINUE)
4941 if (!(ctxt->d & NoWrite)) {
4942 rc = writeback(ctxt, &ctxt->dst);
4943 if (rc != X86EMUL_CONTINUE)
4948 * restore dst type in case the decoding will be reused
4949 * (happens for string instruction )
4951 ctxt->dst.type = saved_dst_type;
4953 if ((ctxt->d & SrcMask) == SrcSI)
4954 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4956 if ((ctxt->d & DstMask) == DstDI)
4957 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4959 if (ctxt->rep_prefix && (ctxt->d & String)) {
4961 struct read_cache *r = &ctxt->io_read;
4962 if ((ctxt->d & SrcMask) == SrcSI)
4963 count = ctxt->src.count;
4965 count = ctxt->dst.count;
4966 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4969 if (!string_insn_completed(ctxt)) {
4971 * Re-enter guest when pio read ahead buffer is empty
4972 * or, if it is not used, after each 1024 iteration.
4974 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4975 (r->end == 0 || r->end != r->pos)) {
4977 * Reset read cache. Usually happens before
4978 * decode, but since instruction is restarted
4979 * we have to do it here.
4981 ctxt->mem_read.end = 0;
4982 writeback_registers(ctxt);
4983 return EMULATION_RESTART;
4985 goto done; /* skip rip writeback */
4987 ctxt->eflags &= ~EFLG_RF;
4990 ctxt->eip = ctxt->_eip;
4993 if (rc == X86EMUL_PROPAGATE_FAULT) {
4994 WARN_ON(ctxt->exception.vector > 0x1f);
4995 ctxt->have_exception = true;
4997 if (rc == X86EMUL_INTERCEPTED)
4998 return EMULATION_INTERCEPTED;
5000 if (rc == X86EMUL_CONTINUE)
5001 writeback_registers(ctxt);
5003 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5007 case 0x09: /* wbinvd */
5008 (ctxt->ops->wbinvd)(ctxt);
5010 case 0x08: /* invd */
5011 case 0x0d: /* GrpP (prefetch) */
5012 case 0x18: /* Grp16 (prefetch/nop) */
5013 case 0x1f: /* nop */
5015 case 0x20: /* mov cr, reg */
5016 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5018 case 0x21: /* mov from dr to reg */
5019 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5021 case 0x40 ... 0x4f: /* cmov */
5022 if (test_cc(ctxt->b, ctxt->eflags))
5023 ctxt->dst.val = ctxt->src.val;
5024 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5025 ctxt->op_bytes != 4)
5026 ctxt->dst.type = OP_NONE; /* no writeback */
5028 case 0x80 ... 0x8f: /* jnz rel, etc*/
5029 if (test_cc(ctxt->b, ctxt->eflags))
5030 rc = jmp_rel(ctxt, ctxt->src.val);
5032 case 0x90 ... 0x9f: /* setcc r/m8 */
5033 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5035 case 0xb6 ... 0xb7: /* movzx */
5036 ctxt->dst.bytes = ctxt->op_bytes;
5037 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5038 : (u16) ctxt->src.val;
5040 case 0xbe ... 0xbf: /* movsx */
5041 ctxt->dst.bytes = ctxt->op_bytes;
5042 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5043 (s16) ctxt->src.val;
5045 case 0xc3: /* movnti */
5046 ctxt->dst.bytes = ctxt->op_bytes;
5047 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5048 (u32) ctxt->src.val;
5051 goto cannot_emulate;
5056 if (rc != X86EMUL_CONTINUE)
5062 return EMULATION_FAILED;
5065 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5067 invalidate_registers(ctxt);
5070 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5072 writeback_registers(ctxt);