]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/kvm/emulate_loadstore.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[linux.git] / arch / powerpc / kvm / emulate_loadstore.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/sstep.h>
35 #include "timing.h"
36 #include "trace.h"
37
38 #ifdef CONFIG_PPC_FPU
39 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
40 {
41         if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
42                 kvmppc_core_queue_fpunavail(vcpu);
43                 return true;
44         }
45
46         return false;
47 }
48 #endif /* CONFIG_PPC_FPU */
49
50 #ifdef CONFIG_VSX
51 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
52 {
53         if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
54                 kvmppc_core_queue_vsx_unavail(vcpu);
55                 return true;
56         }
57
58         return false;
59 }
60 #endif /* CONFIG_VSX */
61
62 #ifdef CONFIG_ALTIVEC
63 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
64 {
65         if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
66                 kvmppc_core_queue_vec_unavail(vcpu);
67                 return true;
68         }
69
70         return false;
71 }
72 #endif /* CONFIG_ALTIVEC */
73
74 /*
75  * XXX to do:
76  * lfiwax, lfiwzx
77  * vector loads and stores
78  *
79  * Instructions that trap when used on cache-inhibited mappings
80  * are not emulated here: multiple and string instructions,
81  * lq/stq, and the load-reserve/store-conditional instructions.
82  */
83 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
84 {
85         struct kvm_run *run = vcpu->run;
86         u32 inst;
87         int ra, rs, rt;
88         enum emulation_result emulated = EMULATE_FAIL;
89         int advance = 1;
90         struct instruction_op op;
91
92         /* this default type might be overwritten by subcategories */
93         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
94
95         emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96         if (emulated != EMULATE_DONE)
97                 return emulated;
98
99         ra = get_ra(inst);
100         rs = get_rs(inst);
101         rt = get_rt(inst);
102
103         /*
104          * if mmio_vsx_tx_sx_enabled == 0, copy data between
105          * VSR[0..31] and memory
106          * if mmio_vsx_tx_sx_enabled == 1, copy data between
107          * VSR[32..63] and memory
108          */
109         vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
110         vcpu->arch.mmio_vsx_copy_nums = 0;
111         vcpu->arch.mmio_vsx_offset = 0;
112         vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
113         vcpu->arch.mmio_sp64_extend = 0;
114         vcpu->arch.mmio_sign_extend = 0;
115         vcpu->arch.mmio_vmx_copy_nums = 0;
116         vcpu->arch.mmio_vmx_offset = 0;
117         vcpu->arch.mmio_host_swabbed = 0;
118
119         emulated = EMULATE_FAIL;
120         vcpu->arch.regs.msr = vcpu->arch.shared->msr;
121         vcpu->arch.regs.ccr = vcpu->arch.cr;
122         if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
123                 int type = op.type & INSTR_TYPE_MASK;
124                 int size = GETSIZE(op.type);
125
126                 switch (type) {
127                 case LOAD:  {
128                         int instr_byte_swap = op.type & BYTEREV;
129
130                         if (op.type & SIGNEXT)
131                                 emulated = kvmppc_handle_loads(run, vcpu,
132                                                 op.reg, size, !instr_byte_swap);
133                         else
134                                 emulated = kvmppc_handle_load(run, vcpu,
135                                                 op.reg, size, !instr_byte_swap);
136
137                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
138                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
139
140                         break;
141                 }
142 #ifdef CONFIG_PPC_FPU
143                 case LOAD_FP:
144                         if (kvmppc_check_fp_disabled(vcpu))
145                                 return EMULATE_DONE;
146
147                         if (op.type & FPCONV)
148                                 vcpu->arch.mmio_sp64_extend = 1;
149
150                         if (op.type & SIGNEXT)
151                                 emulated = kvmppc_handle_loads(run, vcpu,
152                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
153                         else
154                                 emulated = kvmppc_handle_load(run, vcpu,
155                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
156
157                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
158                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
159
160                         break;
161 #endif
162 #ifdef CONFIG_ALTIVEC
163                 case LOAD_VMX:
164                         if (kvmppc_check_altivec_disabled(vcpu))
165                                 return EMULATE_DONE;
166
167                         /* Hardware enforces alignment of VMX accesses */
168                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
169                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
170
171                         if (size == 16) { /* lvx */
172                                 vcpu->arch.mmio_copy_type =
173                                                 KVMPPC_VMX_COPY_DWORD;
174                         } else if (size == 4) { /* lvewx  */
175                                 vcpu->arch.mmio_copy_type =
176                                                 KVMPPC_VMX_COPY_WORD;
177                         } else if (size == 2) { /* lvehx  */
178                                 vcpu->arch.mmio_copy_type =
179                                                 KVMPPC_VMX_COPY_HWORD;
180                         } else if (size == 1) { /* lvebx  */
181                                 vcpu->arch.mmio_copy_type =
182                                                 KVMPPC_VMX_COPY_BYTE;
183                         } else
184                                 break;
185
186                         vcpu->arch.mmio_vmx_offset =
187                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
188
189                         if (size == 16) {
190                                 vcpu->arch.mmio_vmx_copy_nums = 2;
191                                 emulated = kvmppc_handle_vmx_load(run,
192                                                 vcpu, KVM_MMIO_REG_VMX|op.reg,
193                                                 8, 1);
194                         } else {
195                                 vcpu->arch.mmio_vmx_copy_nums = 1;
196                                 emulated = kvmppc_handle_vmx_load(run, vcpu,
197                                                 KVM_MMIO_REG_VMX|op.reg,
198                                                 size, 1);
199                         }
200                         break;
201 #endif
202 #ifdef CONFIG_VSX
203                 case LOAD_VSX: {
204                         int io_size_each;
205
206                         if (op.vsx_flags & VSX_CHECK_VEC) {
207                                 if (kvmppc_check_altivec_disabled(vcpu))
208                                         return EMULATE_DONE;
209                         } else {
210                                 if (kvmppc_check_vsx_disabled(vcpu))
211                                         return EMULATE_DONE;
212                         }
213
214                         if (op.vsx_flags & VSX_FPCONV)
215                                 vcpu->arch.mmio_sp64_extend = 1;
216
217                         if (op.element_size == 8)  {
218                                 if (op.vsx_flags & VSX_SPLAT)
219                                         vcpu->arch.mmio_copy_type =
220                                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
221                                 else
222                                         vcpu->arch.mmio_copy_type =
223                                                 KVMPPC_VSX_COPY_DWORD;
224                         } else if (op.element_size == 4) {
225                                 if (op.vsx_flags & VSX_SPLAT)
226                                         vcpu->arch.mmio_copy_type =
227                                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
228                                 else
229                                         vcpu->arch.mmio_copy_type =
230                                                 KVMPPC_VSX_COPY_WORD;
231                         } else
232                                 break;
233
234                         if (size < op.element_size) {
235                                 /* precision convert case: lxsspx, etc */
236                                 vcpu->arch.mmio_vsx_copy_nums = 1;
237                                 io_size_each = size;
238                         } else { /* lxvw4x, lxvd2x, etc */
239                                 vcpu->arch.mmio_vsx_copy_nums =
240                                         size/op.element_size;
241                                 io_size_each = op.element_size;
242                         }
243
244                         emulated = kvmppc_handle_vsx_load(run, vcpu,
245                                         KVM_MMIO_REG_VSX | (op.reg & 0x1f),
246                                         io_size_each, 1, op.type & SIGNEXT);
247                         break;
248                 }
249 #endif
250                 case STORE:
251                         /* if need byte reverse, op.val has been reversed by
252                          * analyse_instr().
253                          */
254                         emulated = kvmppc_handle_store(run, vcpu, op.val,
255                                         size, 1);
256
257                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
258                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
259
260                         break;
261 #ifdef CONFIG_PPC_FPU
262                 case STORE_FP:
263                         if (kvmppc_check_fp_disabled(vcpu))
264                                 return EMULATE_DONE;
265
266                         /* The FP registers need to be flushed so that
267                          * kvmppc_handle_store() can read actual FP vals
268                          * from vcpu->arch.
269                          */
270                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
272                                                 MSR_FP);
273
274                         if (op.type & FPCONV)
275                                 vcpu->arch.mmio_sp64_extend = 1;
276
277                         emulated = kvmppc_handle_store(run, vcpu,
278                                         VCPU_FPR(vcpu, op.reg), size, 1);
279
280                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
281                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
282
283                         break;
284 #endif
285 #ifdef CONFIG_ALTIVEC
286                 case STORE_VMX:
287                         if (kvmppc_check_altivec_disabled(vcpu))
288                                 return EMULATE_DONE;
289
290                         /* Hardware enforces alignment of VMX accesses. */
291                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
292                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
293
294                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
295                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
296                                                 MSR_VEC);
297                         if (size == 16) { /* stvx */
298                                 vcpu->arch.mmio_copy_type =
299                                                 KVMPPC_VMX_COPY_DWORD;
300                         } else if (size == 4) { /* stvewx  */
301                                 vcpu->arch.mmio_copy_type =
302                                                 KVMPPC_VMX_COPY_WORD;
303                         } else if (size == 2) { /* stvehx  */
304                                 vcpu->arch.mmio_copy_type =
305                                                 KVMPPC_VMX_COPY_HWORD;
306                         } else if (size == 1) { /* stvebx  */
307                                 vcpu->arch.mmio_copy_type =
308                                                 KVMPPC_VMX_COPY_BYTE;
309                         } else
310                                 break;
311
312                         vcpu->arch.mmio_vmx_offset =
313                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
314
315                         if (size == 16) {
316                                 vcpu->arch.mmio_vmx_copy_nums = 2;
317                                 emulated = kvmppc_handle_vmx_store(run,
318                                                 vcpu, op.reg, 8, 1);
319                         } else {
320                                 vcpu->arch.mmio_vmx_copy_nums = 1;
321                                 emulated = kvmppc_handle_vmx_store(run,
322                                                 vcpu, op.reg, size, 1);
323                         }
324
325                         break;
326 #endif
327 #ifdef CONFIG_VSX
328                 case STORE_VSX: {
329                         int io_size_each;
330
331                         if (op.vsx_flags & VSX_CHECK_VEC) {
332                                 if (kvmppc_check_altivec_disabled(vcpu))
333                                         return EMULATE_DONE;
334                         } else {
335                                 if (kvmppc_check_vsx_disabled(vcpu))
336                                         return EMULATE_DONE;
337                         }
338
339                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
340                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
341                                                 MSR_VSX);
342
343                         if (op.vsx_flags & VSX_FPCONV)
344                                 vcpu->arch.mmio_sp64_extend = 1;
345
346                         if (op.element_size == 8)
347                                 vcpu->arch.mmio_copy_type =
348                                                 KVMPPC_VSX_COPY_DWORD;
349                         else if (op.element_size == 4)
350                                 vcpu->arch.mmio_copy_type =
351                                                 KVMPPC_VSX_COPY_WORD;
352                         else
353                                 break;
354
355                         if (size < op.element_size) {
356                                 /* precise conversion case, like stxsspx */
357                                 vcpu->arch.mmio_vsx_copy_nums = 1;
358                                 io_size_each = size;
359                         } else { /* stxvw4x, stxvd2x, etc */
360                                 vcpu->arch.mmio_vsx_copy_nums =
361                                                 size/op.element_size;
362                                 io_size_each = op.element_size;
363                         }
364
365                         emulated = kvmppc_handle_vsx_store(run, vcpu,
366                                         op.reg & 0x1f, io_size_each, 1);
367                         break;
368                 }
369 #endif
370                 case CACHEOP:
371                         /* Do nothing. The guest is performing dcbi because
372                          * hardware DMA is not snooped by the dcache, but
373                          * emulated DMA either goes through the dcache as
374                          * normal writes, or the host kernel has handled dcache
375                          * coherence.
376                          */
377                         emulated = EMULATE_DONE;
378                         break;
379                 default:
380                         break;
381                 }
382         }
383
384         if (emulated == EMULATE_FAIL) {
385                 advance = 0;
386                 kvmppc_core_queue_program(vcpu, 0);
387         }
388
389         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
390
391         /* Advance past emulated instruction. */
392         if (advance)
393                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
394
395         return emulated;
396 }