2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/sstep.h>
39 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
41 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
42 kvmppc_core_queue_fpunavail(vcpu);
48 #endif /* CONFIG_PPC_FPU */
51 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
53 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
54 kvmppc_core_queue_vsx_unavail(vcpu);
60 #endif /* CONFIG_VSX */
63 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
65 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
66 kvmppc_core_queue_vec_unavail(vcpu);
72 #endif /* CONFIG_ALTIVEC */
77 * vector loads and stores
79 * Instructions that trap when used on cache-inhibited mappings
80 * are not emulated here: multiple and string instructions,
81 * lq/stq, and the load-reserve/store-conditional instructions.
83 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
85 struct kvm_run *run = vcpu->run;
88 enum emulation_result emulated = EMULATE_FAIL;
90 struct instruction_op op;
92 /* this default type might be overwritten by subcategories */
93 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
95 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96 if (emulated != EMULATE_DONE)
104 * if mmio_vsx_tx_sx_enabled == 0, copy data between
105 * VSR[0..31] and memory
106 * if mmio_vsx_tx_sx_enabled == 1, copy data between
107 * VSR[32..63] and memory
109 vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
110 vcpu->arch.mmio_vsx_copy_nums = 0;
111 vcpu->arch.mmio_vsx_offset = 0;
112 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
113 vcpu->arch.mmio_sp64_extend = 0;
114 vcpu->arch.mmio_sign_extend = 0;
115 vcpu->arch.mmio_vmx_copy_nums = 0;
116 vcpu->arch.mmio_vmx_offset = 0;
117 vcpu->arch.mmio_host_swabbed = 0;
119 emulated = EMULATE_FAIL;
120 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
121 vcpu->arch.regs.ccr = vcpu->arch.cr;
122 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
123 int type = op.type & INSTR_TYPE_MASK;
124 int size = GETSIZE(op.type);
128 int instr_byte_swap = op.type & BYTEREV;
130 if (op.type & SIGNEXT)
131 emulated = kvmppc_handle_loads(run, vcpu,
132 op.reg, size, !instr_byte_swap);
134 emulated = kvmppc_handle_load(run, vcpu,
135 op.reg, size, !instr_byte_swap);
137 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
138 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
142 #ifdef CONFIG_PPC_FPU
144 if (kvmppc_check_fp_disabled(vcpu))
147 if (op.type & FPCONV)
148 vcpu->arch.mmio_sp64_extend = 1;
150 if (op.type & SIGNEXT)
151 emulated = kvmppc_handle_loads(run, vcpu,
152 KVM_MMIO_REG_FPR|op.reg, size, 1);
154 emulated = kvmppc_handle_load(run, vcpu,
155 KVM_MMIO_REG_FPR|op.reg, size, 1);
157 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
158 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
162 #ifdef CONFIG_ALTIVEC
164 if (kvmppc_check_altivec_disabled(vcpu))
167 /* Hardware enforces alignment of VMX accesses */
168 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
169 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
171 if (size == 16) { /* lvx */
172 vcpu->arch.mmio_copy_type =
173 KVMPPC_VMX_COPY_DWORD;
174 } else if (size == 4) { /* lvewx */
175 vcpu->arch.mmio_copy_type =
176 KVMPPC_VMX_COPY_WORD;
177 } else if (size == 2) { /* lvehx */
178 vcpu->arch.mmio_copy_type =
179 KVMPPC_VMX_COPY_HWORD;
180 } else if (size == 1) { /* lvebx */
181 vcpu->arch.mmio_copy_type =
182 KVMPPC_VMX_COPY_BYTE;
186 vcpu->arch.mmio_vmx_offset =
187 (vcpu->arch.vaddr_accessed & 0xf)/size;
190 vcpu->arch.mmio_vmx_copy_nums = 2;
191 emulated = kvmppc_handle_vmx_load(run,
192 vcpu, KVM_MMIO_REG_VMX|op.reg,
195 vcpu->arch.mmio_vmx_copy_nums = 1;
196 emulated = kvmppc_handle_vmx_load(run, vcpu,
197 KVM_MMIO_REG_VMX|op.reg,
206 if (op.vsx_flags & VSX_CHECK_VEC) {
207 if (kvmppc_check_altivec_disabled(vcpu))
210 if (kvmppc_check_vsx_disabled(vcpu))
214 if (op.vsx_flags & VSX_FPCONV)
215 vcpu->arch.mmio_sp64_extend = 1;
217 if (op.element_size == 8) {
218 if (op.vsx_flags & VSX_SPLAT)
219 vcpu->arch.mmio_copy_type =
220 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
222 vcpu->arch.mmio_copy_type =
223 KVMPPC_VSX_COPY_DWORD;
224 } else if (op.element_size == 4) {
225 if (op.vsx_flags & VSX_SPLAT)
226 vcpu->arch.mmio_copy_type =
227 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
229 vcpu->arch.mmio_copy_type =
230 KVMPPC_VSX_COPY_WORD;
234 if (size < op.element_size) {
235 /* precision convert case: lxsspx, etc */
236 vcpu->arch.mmio_vsx_copy_nums = 1;
238 } else { /* lxvw4x, lxvd2x, etc */
239 vcpu->arch.mmio_vsx_copy_nums =
240 size/op.element_size;
241 io_size_each = op.element_size;
244 emulated = kvmppc_handle_vsx_load(run, vcpu,
245 KVM_MMIO_REG_VSX | (op.reg & 0x1f),
246 io_size_each, 1, op.type & SIGNEXT);
251 /* if need byte reverse, op.val has been reversed by
254 emulated = kvmppc_handle_store(run, vcpu, op.val,
257 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
258 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
261 #ifdef CONFIG_PPC_FPU
263 if (kvmppc_check_fp_disabled(vcpu))
266 /* The FP registers need to be flushed so that
267 * kvmppc_handle_store() can read actual FP vals
270 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
274 if (op.type & FPCONV)
275 vcpu->arch.mmio_sp64_extend = 1;
277 emulated = kvmppc_handle_store(run, vcpu,
278 VCPU_FPR(vcpu, op.reg), size, 1);
280 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
281 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
285 #ifdef CONFIG_ALTIVEC
287 if (kvmppc_check_altivec_disabled(vcpu))
290 /* Hardware enforces alignment of VMX accesses. */
291 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
292 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
294 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
295 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
297 if (size == 16) { /* stvx */
298 vcpu->arch.mmio_copy_type =
299 KVMPPC_VMX_COPY_DWORD;
300 } else if (size == 4) { /* stvewx */
301 vcpu->arch.mmio_copy_type =
302 KVMPPC_VMX_COPY_WORD;
303 } else if (size == 2) { /* stvehx */
304 vcpu->arch.mmio_copy_type =
305 KVMPPC_VMX_COPY_HWORD;
306 } else if (size == 1) { /* stvebx */
307 vcpu->arch.mmio_copy_type =
308 KVMPPC_VMX_COPY_BYTE;
312 vcpu->arch.mmio_vmx_offset =
313 (vcpu->arch.vaddr_accessed & 0xf)/size;
316 vcpu->arch.mmio_vmx_copy_nums = 2;
317 emulated = kvmppc_handle_vmx_store(run,
320 vcpu->arch.mmio_vmx_copy_nums = 1;
321 emulated = kvmppc_handle_vmx_store(run,
322 vcpu, op.reg, size, 1);
331 if (op.vsx_flags & VSX_CHECK_VEC) {
332 if (kvmppc_check_altivec_disabled(vcpu))
335 if (kvmppc_check_vsx_disabled(vcpu))
339 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
340 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
343 if (op.vsx_flags & VSX_FPCONV)
344 vcpu->arch.mmio_sp64_extend = 1;
346 if (op.element_size == 8)
347 vcpu->arch.mmio_copy_type =
348 KVMPPC_VSX_COPY_DWORD;
349 else if (op.element_size == 4)
350 vcpu->arch.mmio_copy_type =
351 KVMPPC_VSX_COPY_WORD;
355 if (size < op.element_size) {
356 /* precise conversion case, like stxsspx */
357 vcpu->arch.mmio_vsx_copy_nums = 1;
359 } else { /* stxvw4x, stxvd2x, etc */
360 vcpu->arch.mmio_vsx_copy_nums =
361 size/op.element_size;
362 io_size_each = op.element_size;
365 emulated = kvmppc_handle_vsx_store(run, vcpu,
366 op.reg & 0x1f, io_size_each, 1);
371 /* Do nothing. The guest is performing dcbi because
372 * hardware DMA is not snooped by the dcache, but
373 * emulated DMA either goes through the dcache as
374 * normal writes, or the host kernel has handled dcache
377 emulated = EMULATE_DONE;
384 if (emulated == EMULATE_FAIL) {
386 kvmppc_core_queue_program(vcpu, 0);
389 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
391 /* Advance past emulated instruction. */
393 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);