]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/kvm/book3s.c
Merge tag 'vfio-v5.6-rc1' of git://github.com/awilliam/linux-vfio
[linux.git] / arch / powerpc / kvm / book3s.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4  *
5  * Authors:
6  *    Alexander Graf <agraf@suse.de>
7  *    Kevin Wolf <mail@kevin-wolf.de>
8  *
9  * Description:
10  * This file is derived from arch/powerpc/kvm/44x.c,
11  * by Hollis Blanchard <hollisb@us.ibm.com>.
12  */
13
14 #include <linux/kvm_host.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/miscdevice.h>
20 #include <linux/gfp.h>
21 #include <linux/sched.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
24
25 #include <asm/reg.h>
26 #include <asm/cputable.h>
27 #include <asm/cacheflush.h>
28 #include <linux/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <asm/xive.h>
35
36 #include "book3s.h"
37 #include "trace.h"
38
39 #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
40 #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
41
42 /* #define EXIT_DEBUG */
43
44 struct kvm_stats_debugfs_item debugfs_entries[] = {
45         { "exits",       VCPU_STAT(sum_exits) },
46         { "mmio",        VCPU_STAT(mmio_exits) },
47         { "sig",         VCPU_STAT(signal_exits) },
48         { "sysc",        VCPU_STAT(syscall_exits) },
49         { "inst_emu",    VCPU_STAT(emulated_inst_exits) },
50         { "dec",         VCPU_STAT(dec_exits) },
51         { "ext_intr",    VCPU_STAT(ext_intr_exits) },
52         { "queue_intr",  VCPU_STAT(queue_intr) },
53         { "halt_poll_success_ns",       VCPU_STAT(halt_poll_success_ns) },
54         { "halt_poll_fail_ns",          VCPU_STAT(halt_poll_fail_ns) },
55         { "halt_wait_ns",               VCPU_STAT(halt_wait_ns) },
56         { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
57         { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
58         { "halt_successful_wait",       VCPU_STAT(halt_successful_wait) },
59         { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
60         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
61         { "pf_storage",  VCPU_STAT(pf_storage) },
62         { "sp_storage",  VCPU_STAT(sp_storage) },
63         { "pf_instruc",  VCPU_STAT(pf_instruc) },
64         { "sp_instruc",  VCPU_STAT(sp_instruc) },
65         { "ld",          VCPU_STAT(ld) },
66         { "ld_slow",     VCPU_STAT(ld_slow) },
67         { "st",          VCPU_STAT(st) },
68         { "st_slow",     VCPU_STAT(st_slow) },
69         { "pthru_all",       VCPU_STAT(pthru_all) },
70         { "pthru_host",      VCPU_STAT(pthru_host) },
71         { "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
72         { "largepages_2M",    VM_STAT(num_2M_pages, .mode = 0444) },
73         { "largepages_1G",    VM_STAT(num_1G_pages, .mode = 0444) },
74         { NULL }
75 };
76
77 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
78                         unsigned long pending_now, unsigned long old_pending)
79 {
80         if (is_kvmppc_hv_enabled(vcpu->kvm))
81                 return;
82         if (pending_now)
83                 kvmppc_set_int_pending(vcpu, 1);
84         else if (old_pending)
85                 kvmppc_set_int_pending(vcpu, 0);
86 }
87
88 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
89 {
90         ulong crit_raw;
91         ulong crit_r1;
92         bool crit;
93
94         if (is_kvmppc_hv_enabled(vcpu->kvm))
95                 return false;
96
97         crit_raw = kvmppc_get_critical(vcpu);
98         crit_r1 = kvmppc_get_gpr(vcpu, 1);
99
100         /* Truncate crit indicators in 32 bit mode */
101         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
102                 crit_raw &= 0xffffffff;
103                 crit_r1 &= 0xffffffff;
104         }
105
106         /* Critical section when crit == r1 */
107         crit = (crit_raw == crit_r1);
108         /* ... and we're in supervisor mode */
109         crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
110
111         return crit;
112 }
113
114 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
115 {
116         vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
117 }
118
119 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
120 {
121         unsigned int prio;
122
123         switch (vec) {
124         case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;         break;
125         case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;        break;
126         case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;         break;
127         case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;         break;
128         case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;         break;
129         case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;         break;
130         case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;             break;
131         case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;            break;
132         case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;              break;
133         case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;           break;
134         case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;          break;
135         case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;              break;
136         case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;                break;
137         case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;              break;
138         case 0xf40: prio = BOOK3S_IRQPRIO_VSX;                  break;
139         case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;          break;
140         default:    prio = BOOK3S_IRQPRIO_MAX;                  break;
141         }
142
143         return prio;
144 }
145
146 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
147                                           unsigned int vec)
148 {
149         unsigned long old_pending = vcpu->arch.pending_exceptions;
150
151         clear_bit(kvmppc_book3s_vec2irqprio(vec),
152                   &vcpu->arch.pending_exceptions);
153
154         kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
155                                   old_pending);
156 }
157
158 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
159 {
160         vcpu->stat.queue_intr++;
161
162         set_bit(kvmppc_book3s_vec2irqprio(vec),
163                 &vcpu->arch.pending_exceptions);
164 #ifdef EXIT_DEBUG
165         printk(KERN_INFO "Queueing interrupt %x\n", vec);
166 #endif
167 }
168 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
169
170 void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
171 {
172         /* might as well deliver this straight away */
173         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
174 }
175 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
176
177 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
178 {
179         /* might as well deliver this straight away */
180         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
181 }
182 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
183
184 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
185 {
186         /* might as well deliver this straight away */
187         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
188 }
189
190 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
191 {
192         /* might as well deliver this straight away */
193         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
194 }
195
196 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
197 {
198         /* might as well deliver this straight away */
199         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
200 }
201
202 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
203 {
204         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
205 }
206 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
207
208 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
209 {
210         return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
211 }
212 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
213
214 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
215 {
216         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
217 }
218 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
219
220 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
221                                 struct kvm_interrupt *irq)
222 {
223         /*
224          * This case (KVM_INTERRUPT_SET) should never actually arise for
225          * a pseries guest (because pseries guests expect their interrupt
226          * controllers to continue asserting an external interrupt request
227          * until it is acknowledged at the interrupt controller), but is
228          * included to avoid ABI breakage and potentially for other
229          * sorts of guest.
230          *
231          * There is a subtlety here: HV KVM does not test the
232          * external_oneshot flag in the code that synthesizes
233          * external interrupts for the guest just before entering
234          * the guest.  That is OK even if userspace did do a
235          * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
236          * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
237          * which ends up doing a smp_send_reschedule(), which will
238          * pull the guest all the way out to the host, meaning that
239          * we will call kvmppc_core_prepare_to_enter() before entering
240          * the guest again, and that will handle the external_oneshot
241          * flag correctly.
242          */
243         if (irq->irq == KVM_INTERRUPT_SET)
244                 vcpu->arch.external_oneshot = 1;
245
246         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
247 }
248
249 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
250 {
251         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
252 }
253
254 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
255                                     ulong flags)
256 {
257         kvmppc_set_dar(vcpu, dar);
258         kvmppc_set_dsisr(vcpu, flags);
259         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
260 }
261 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
262
263 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
264 {
265         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
266 }
267 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
268
269 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
270                                          unsigned int priority)
271 {
272         int deliver = 1;
273         int vec = 0;
274         bool crit = kvmppc_critical_section(vcpu);
275
276         switch (priority) {
277         case BOOK3S_IRQPRIO_DECREMENTER:
278                 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
279                 vec = BOOK3S_INTERRUPT_DECREMENTER;
280                 break;
281         case BOOK3S_IRQPRIO_EXTERNAL:
282                 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
283                 vec = BOOK3S_INTERRUPT_EXTERNAL;
284                 break;
285         case BOOK3S_IRQPRIO_SYSTEM_RESET:
286                 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
287                 break;
288         case BOOK3S_IRQPRIO_MACHINE_CHECK:
289                 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
290                 break;
291         case BOOK3S_IRQPRIO_DATA_STORAGE:
292                 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
293                 break;
294         case BOOK3S_IRQPRIO_INST_STORAGE:
295                 vec = BOOK3S_INTERRUPT_INST_STORAGE;
296                 break;
297         case BOOK3S_IRQPRIO_DATA_SEGMENT:
298                 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
299                 break;
300         case BOOK3S_IRQPRIO_INST_SEGMENT:
301                 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
302                 break;
303         case BOOK3S_IRQPRIO_ALIGNMENT:
304                 vec = BOOK3S_INTERRUPT_ALIGNMENT;
305                 break;
306         case BOOK3S_IRQPRIO_PROGRAM:
307                 vec = BOOK3S_INTERRUPT_PROGRAM;
308                 break;
309         case BOOK3S_IRQPRIO_VSX:
310                 vec = BOOK3S_INTERRUPT_VSX;
311                 break;
312         case BOOK3S_IRQPRIO_ALTIVEC:
313                 vec = BOOK3S_INTERRUPT_ALTIVEC;
314                 break;
315         case BOOK3S_IRQPRIO_FP_UNAVAIL:
316                 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
317                 break;
318         case BOOK3S_IRQPRIO_SYSCALL:
319                 vec = BOOK3S_INTERRUPT_SYSCALL;
320                 break;
321         case BOOK3S_IRQPRIO_DEBUG:
322                 vec = BOOK3S_INTERRUPT_TRACE;
323                 break;
324         case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
325                 vec = BOOK3S_INTERRUPT_PERFMON;
326                 break;
327         case BOOK3S_IRQPRIO_FAC_UNAVAIL:
328                 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
329                 break;
330         default:
331                 deliver = 0;
332                 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
333                 break;
334         }
335
336 #if 0
337         printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
338 #endif
339
340         if (deliver)
341                 kvmppc_inject_interrupt(vcpu, vec, 0);
342
343         return deliver;
344 }
345
346 /*
347  * This function determines if an irqprio should be cleared once issued.
348  */
349 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
350 {
351         switch (priority) {
352                 case BOOK3S_IRQPRIO_DECREMENTER:
353                         /* DEC interrupts get cleared by mtdec */
354                         return false;
355                 case BOOK3S_IRQPRIO_EXTERNAL:
356                         /*
357                          * External interrupts get cleared by userspace
358                          * except when set by the KVM_INTERRUPT ioctl with
359                          * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
360                          */
361                         if (vcpu->arch.external_oneshot) {
362                                 vcpu->arch.external_oneshot = 0;
363                                 return true;
364                         }
365                         return false;
366         }
367
368         return true;
369 }
370
371 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
372 {
373         unsigned long *pending = &vcpu->arch.pending_exceptions;
374         unsigned long old_pending = vcpu->arch.pending_exceptions;
375         unsigned int priority;
376
377 #ifdef EXIT_DEBUG
378         if (vcpu->arch.pending_exceptions)
379                 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
380 #endif
381         priority = __ffs(*pending);
382         while (priority < BOOK3S_IRQPRIO_MAX) {
383                 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
384                     clear_irqprio(vcpu, priority)) {
385                         clear_bit(priority, &vcpu->arch.pending_exceptions);
386                         break;
387                 }
388
389                 priority = find_next_bit(pending,
390                                          BITS_PER_BYTE * sizeof(*pending),
391                                          priority + 1);
392         }
393
394         /* Tell the guest about our interrupt status */
395         kvmppc_update_int_pending(vcpu, *pending, old_pending);
396
397         return 0;
398 }
399 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
400
401 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
402                         bool *writable)
403 {
404         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
405         gfn_t gfn = gpa >> PAGE_SHIFT;
406
407         if (!(kvmppc_get_msr(vcpu) & MSR_SF))
408                 mp_pa = (uint32_t)mp_pa;
409
410         /* Magic page override */
411         gpa &= ~0xFFFULL;
412         if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
413                 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
414                 kvm_pfn_t pfn;
415
416                 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
417                 get_page(pfn_to_page(pfn));
418                 if (writable)
419                         *writable = true;
420                 return pfn;
421         }
422
423         return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
424 }
425 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
426
427 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
428                  enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
429 {
430         bool data = (xlid == XLATE_DATA);
431         bool iswrite = (xlrw == XLATE_WRITE);
432         int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
433         int r;
434
435         if (relocated) {
436                 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
437         } else {
438                 pte->eaddr = eaddr;
439                 pte->raddr = eaddr & KVM_PAM;
440                 pte->vpage = VSID_REAL | eaddr >> 12;
441                 pte->may_read = true;
442                 pte->may_write = true;
443                 pte->may_execute = true;
444                 r = 0;
445
446                 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
447                     !data) {
448                         if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
449                             ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
450                         pte->raddr &= ~SPLIT_HACK_MASK;
451                 }
452         }
453
454         return r;
455 }
456
457 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
458                 enum instruction_fetch_type type, u32 *inst)
459 {
460         ulong pc = kvmppc_get_pc(vcpu);
461         int r;
462
463         if (type == INST_SC)
464                 pc -= 4;
465
466         r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
467         if (r == EMULATE_DONE)
468                 return r;
469         else
470                 return EMULATE_AGAIN;
471 }
472 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
473
474 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
475 {
476         return 0;
477 }
478
479 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
480 {
481 }
482
483 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
484                                   struct kvm_sregs *sregs)
485 {
486         int ret;
487
488         vcpu_load(vcpu);
489         ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
490         vcpu_put(vcpu);
491
492         return ret;
493 }
494
495 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
496                                   struct kvm_sregs *sregs)
497 {
498         int ret;
499
500         vcpu_load(vcpu);
501         ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
502         vcpu_put(vcpu);
503
504         return ret;
505 }
506
507 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
508 {
509         int i;
510
511         regs->pc = kvmppc_get_pc(vcpu);
512         regs->cr = kvmppc_get_cr(vcpu);
513         regs->ctr = kvmppc_get_ctr(vcpu);
514         regs->lr = kvmppc_get_lr(vcpu);
515         regs->xer = kvmppc_get_xer(vcpu);
516         regs->msr = kvmppc_get_msr(vcpu);
517         regs->srr0 = kvmppc_get_srr0(vcpu);
518         regs->srr1 = kvmppc_get_srr1(vcpu);
519         regs->pid = vcpu->arch.pid;
520         regs->sprg0 = kvmppc_get_sprg0(vcpu);
521         regs->sprg1 = kvmppc_get_sprg1(vcpu);
522         regs->sprg2 = kvmppc_get_sprg2(vcpu);
523         regs->sprg3 = kvmppc_get_sprg3(vcpu);
524         regs->sprg4 = kvmppc_get_sprg4(vcpu);
525         regs->sprg5 = kvmppc_get_sprg5(vcpu);
526         regs->sprg6 = kvmppc_get_sprg6(vcpu);
527         regs->sprg7 = kvmppc_get_sprg7(vcpu);
528
529         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
530                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
531
532         return 0;
533 }
534
535 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
536 {
537         int i;
538
539         kvmppc_set_pc(vcpu, regs->pc);
540         kvmppc_set_cr(vcpu, regs->cr);
541         kvmppc_set_ctr(vcpu, regs->ctr);
542         kvmppc_set_lr(vcpu, regs->lr);
543         kvmppc_set_xer(vcpu, regs->xer);
544         kvmppc_set_msr(vcpu, regs->msr);
545         kvmppc_set_srr0(vcpu, regs->srr0);
546         kvmppc_set_srr1(vcpu, regs->srr1);
547         kvmppc_set_sprg0(vcpu, regs->sprg0);
548         kvmppc_set_sprg1(vcpu, regs->sprg1);
549         kvmppc_set_sprg2(vcpu, regs->sprg2);
550         kvmppc_set_sprg3(vcpu, regs->sprg3);
551         kvmppc_set_sprg4(vcpu, regs->sprg4);
552         kvmppc_set_sprg5(vcpu, regs->sprg5);
553         kvmppc_set_sprg6(vcpu, regs->sprg6);
554         kvmppc_set_sprg7(vcpu, regs->sprg7);
555
556         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
557                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
558
559         return 0;
560 }
561
562 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
563 {
564         return -ENOTSUPP;
565 }
566
567 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
568 {
569         return -ENOTSUPP;
570 }
571
572 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
573                         union kvmppc_one_reg *val)
574 {
575         int r = 0;
576         long int i;
577
578         r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
579         if (r == -EINVAL) {
580                 r = 0;
581                 switch (id) {
582                 case KVM_REG_PPC_DAR:
583                         *val = get_reg_val(id, kvmppc_get_dar(vcpu));
584                         break;
585                 case KVM_REG_PPC_DSISR:
586                         *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
587                         break;
588                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
589                         i = id - KVM_REG_PPC_FPR0;
590                         *val = get_reg_val(id, VCPU_FPR(vcpu, i));
591                         break;
592                 case KVM_REG_PPC_FPSCR:
593                         *val = get_reg_val(id, vcpu->arch.fp.fpscr);
594                         break;
595 #ifdef CONFIG_VSX
596                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
597                         if (cpu_has_feature(CPU_FTR_VSX)) {
598                                 i = id - KVM_REG_PPC_VSR0;
599                                 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
600                                 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
601                         } else {
602                                 r = -ENXIO;
603                         }
604                         break;
605 #endif /* CONFIG_VSX */
606                 case KVM_REG_PPC_DEBUG_INST:
607                         *val = get_reg_val(id, INS_TW);
608                         break;
609 #ifdef CONFIG_KVM_XICS
610                 case KVM_REG_PPC_ICP_STATE:
611                         if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
612                                 r = -ENXIO;
613                                 break;
614                         }
615                         if (xics_on_xive())
616                                 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
617                         else
618                                 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
619                         break;
620 #endif /* CONFIG_KVM_XICS */
621 #ifdef CONFIG_KVM_XIVE
622                 case KVM_REG_PPC_VP_STATE:
623                         if (!vcpu->arch.xive_vcpu) {
624                                 r = -ENXIO;
625                                 break;
626                         }
627                         if (xive_enabled())
628                                 r = kvmppc_xive_native_get_vp(vcpu, val);
629                         else
630                                 r = -ENXIO;
631                         break;
632 #endif /* CONFIG_KVM_XIVE */
633                 case KVM_REG_PPC_FSCR:
634                         *val = get_reg_val(id, vcpu->arch.fscr);
635                         break;
636                 case KVM_REG_PPC_TAR:
637                         *val = get_reg_val(id, vcpu->arch.tar);
638                         break;
639                 case KVM_REG_PPC_EBBHR:
640                         *val = get_reg_val(id, vcpu->arch.ebbhr);
641                         break;
642                 case KVM_REG_PPC_EBBRR:
643                         *val = get_reg_val(id, vcpu->arch.ebbrr);
644                         break;
645                 case KVM_REG_PPC_BESCR:
646                         *val = get_reg_val(id, vcpu->arch.bescr);
647                         break;
648                 case KVM_REG_PPC_IC:
649                         *val = get_reg_val(id, vcpu->arch.ic);
650                         break;
651                 default:
652                         r = -EINVAL;
653                         break;
654                 }
655         }
656
657         return r;
658 }
659
660 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
661                         union kvmppc_one_reg *val)
662 {
663         int r = 0;
664         long int i;
665
666         r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
667         if (r == -EINVAL) {
668                 r = 0;
669                 switch (id) {
670                 case KVM_REG_PPC_DAR:
671                         kvmppc_set_dar(vcpu, set_reg_val(id, *val));
672                         break;
673                 case KVM_REG_PPC_DSISR:
674                         kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
675                         break;
676                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
677                         i = id - KVM_REG_PPC_FPR0;
678                         VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
679                         break;
680                 case KVM_REG_PPC_FPSCR:
681                         vcpu->arch.fp.fpscr = set_reg_val(id, *val);
682                         break;
683 #ifdef CONFIG_VSX
684                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
685                         if (cpu_has_feature(CPU_FTR_VSX)) {
686                                 i = id - KVM_REG_PPC_VSR0;
687                                 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
688                                 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
689                         } else {
690                                 r = -ENXIO;
691                         }
692                         break;
693 #endif /* CONFIG_VSX */
694 #ifdef CONFIG_KVM_XICS
695                 case KVM_REG_PPC_ICP_STATE:
696                         if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
697                                 r = -ENXIO;
698                                 break;
699                         }
700                         if (xics_on_xive())
701                                 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
702                         else
703                                 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
704                         break;
705 #endif /* CONFIG_KVM_XICS */
706 #ifdef CONFIG_KVM_XIVE
707                 case KVM_REG_PPC_VP_STATE:
708                         if (!vcpu->arch.xive_vcpu) {
709                                 r = -ENXIO;
710                                 break;
711                         }
712                         if (xive_enabled())
713                                 r = kvmppc_xive_native_set_vp(vcpu, val);
714                         else
715                                 r = -ENXIO;
716                         break;
717 #endif /* CONFIG_KVM_XIVE */
718                 case KVM_REG_PPC_FSCR:
719                         vcpu->arch.fscr = set_reg_val(id, *val);
720                         break;
721                 case KVM_REG_PPC_TAR:
722                         vcpu->arch.tar = set_reg_val(id, *val);
723                         break;
724                 case KVM_REG_PPC_EBBHR:
725                         vcpu->arch.ebbhr = set_reg_val(id, *val);
726                         break;
727                 case KVM_REG_PPC_EBBRR:
728                         vcpu->arch.ebbrr = set_reg_val(id, *val);
729                         break;
730                 case KVM_REG_PPC_BESCR:
731                         vcpu->arch.bescr = set_reg_val(id, *val);
732                         break;
733                 case KVM_REG_PPC_IC:
734                         vcpu->arch.ic = set_reg_val(id, *val);
735                         break;
736                 default:
737                         r = -EINVAL;
738                         break;
739                 }
740         }
741
742         return r;
743 }
744
745 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
746 {
747         vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
748 }
749
750 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
751 {
752         vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
753 }
754
755 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
756 {
757         vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
758 }
759 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
760
761 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
762 {
763         return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
764 }
765
766 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
767                                   struct kvm_translation *tr)
768 {
769         return 0;
770 }
771
772 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
773                                         struct kvm_guest_debug *dbg)
774 {
775         vcpu_load(vcpu);
776         vcpu->guest_debug = dbg->control;
777         vcpu_put(vcpu);
778         return 0;
779 }
780
781 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
782 {
783         kvmppc_core_queue_dec(vcpu);
784         kvm_vcpu_kick(vcpu);
785 }
786
787 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
788 {
789         return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
790 }
791
792 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
793 {
794         vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
795 }
796
797 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
798 {
799         return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
800 }
801
802 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
803 {
804         return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
805 }
806
807 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
808                               struct kvm_memory_slot *dont)
809 {
810         kvm->arch.kvm_ops->free_memslot(free, dont);
811 }
812
813 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
814                                unsigned long npages)
815 {
816         return kvm->arch.kvm_ops->create_memslot(slot, npages);
817 }
818
819 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
820 {
821         kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
822 }
823
824 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
825                                 struct kvm_memory_slot *memslot,
826                                 const struct kvm_userspace_memory_region *mem)
827 {
828         return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
829 }
830
831 void kvmppc_core_commit_memory_region(struct kvm *kvm,
832                                 const struct kvm_userspace_memory_region *mem,
833                                 const struct kvm_memory_slot *old,
834                                 const struct kvm_memory_slot *new,
835                                 enum kvm_mr_change change)
836 {
837         kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
838 }
839
840 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
841 {
842         return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
843 }
844
845 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
846 {
847         return kvm->arch.kvm_ops->age_hva(kvm, start, end);
848 }
849
850 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
851 {
852         return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
853 }
854
855 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
856 {
857         kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
858         return 0;
859 }
860
861 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
862 {
863         vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
864 }
865
866 int kvmppc_core_init_vm(struct kvm *kvm)
867 {
868
869 #ifdef CONFIG_PPC64
870         INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
871         INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
872         mutex_init(&kvm->arch.rtas_token_lock);
873 #endif
874
875         return kvm->arch.kvm_ops->init_vm(kvm);
876 }
877
878 void kvmppc_core_destroy_vm(struct kvm *kvm)
879 {
880         kvm->arch.kvm_ops->destroy_vm(kvm);
881
882 #ifdef CONFIG_PPC64
883         kvmppc_rtas_tokens_free(kvm);
884         WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
885 #endif
886
887 #ifdef CONFIG_KVM_XICS
888         /*
889          * Free the XIVE devices which are not directly freed by the
890          * device 'release' method
891          */
892         kfree(kvm->arch.xive_devices.native);
893         kvm->arch.xive_devices.native = NULL;
894         kfree(kvm->arch.xive_devices.xics_on_xive);
895         kvm->arch.xive_devices.xics_on_xive = NULL;
896 #endif /* CONFIG_KVM_XICS */
897 }
898
899 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
900 {
901         unsigned long size = kvmppc_get_gpr(vcpu, 4);
902         unsigned long addr = kvmppc_get_gpr(vcpu, 5);
903         u64 buf;
904         int srcu_idx;
905         int ret;
906
907         if (!is_power_of_2(size) || (size > sizeof(buf)))
908                 return H_TOO_HARD;
909
910         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
911         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
912         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
913         if (ret != 0)
914                 return H_TOO_HARD;
915
916         switch (size) {
917         case 1:
918                 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
919                 break;
920
921         case 2:
922                 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
923                 break;
924
925         case 4:
926                 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
927                 break;
928
929         case 8:
930                 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
931                 break;
932
933         default:
934                 BUG();
935         }
936
937         return H_SUCCESS;
938 }
939 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
940
941 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
942 {
943         unsigned long size = kvmppc_get_gpr(vcpu, 4);
944         unsigned long addr = kvmppc_get_gpr(vcpu, 5);
945         unsigned long val = kvmppc_get_gpr(vcpu, 6);
946         u64 buf;
947         int srcu_idx;
948         int ret;
949
950         switch (size) {
951         case 1:
952                 *(u8 *)&buf = val;
953                 break;
954
955         case 2:
956                 *(__be16 *)&buf = cpu_to_be16(val);
957                 break;
958
959         case 4:
960                 *(__be32 *)&buf = cpu_to_be32(val);
961                 break;
962
963         case 8:
964                 *(__be64 *)&buf = cpu_to_be64(val);
965                 break;
966
967         default:
968                 return H_TOO_HARD;
969         }
970
971         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
972         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
973         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
974         if (ret != 0)
975                 return H_TOO_HARD;
976
977         return H_SUCCESS;
978 }
979 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
980
981 int kvmppc_core_check_processor_compat(void)
982 {
983         /*
984          * We always return 0 for book3s. We check
985          * for compatibility while loading the HV
986          * or PR module
987          */
988         return 0;
989 }
990
991 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
992 {
993         return kvm->arch.kvm_ops->hcall_implemented(hcall);
994 }
995
996 #ifdef CONFIG_KVM_XICS
997 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
998                 bool line_status)
999 {
1000         if (xics_on_xive())
1001                 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
1002                                            line_status);
1003         else
1004                 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1005                                            line_status);
1006 }
1007
1008 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1009                               struct kvm *kvm, int irq_source_id,
1010                               int level, bool line_status)
1011 {
1012         return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1013                            level, line_status);
1014 }
1015 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1016                                  struct kvm *kvm, int irq_source_id, int level,
1017                                  bool line_status)
1018 {
1019         return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1020 }
1021
1022 int kvm_irq_map_gsi(struct kvm *kvm,
1023                     struct kvm_kernel_irq_routing_entry *entries, int gsi)
1024 {
1025         entries->gsi = gsi;
1026         entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1027         entries->set = kvmppc_book3s_set_irq;
1028         entries->irqchip.irqchip = 0;
1029         entries->irqchip.pin = gsi;
1030         return 1;
1031 }
1032
1033 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1034 {
1035         return pin;
1036 }
1037
1038 #endif /* CONFIG_KVM_XICS */
1039
1040 static int kvmppc_book3s_init(void)
1041 {
1042         int r;
1043
1044         r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1045         if (r)
1046                 return r;
1047 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1048         r = kvmppc_book3s_init_pr();
1049 #endif
1050
1051 #ifdef CONFIG_KVM_XICS
1052 #ifdef CONFIG_KVM_XIVE
1053         if (xics_on_xive()) {
1054                 kvmppc_xive_init_module();
1055                 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1056                 if (kvmppc_xive_native_supported()) {
1057                         kvmppc_xive_native_init_module();
1058                         kvm_register_device_ops(&kvm_xive_native_ops,
1059                                                 KVM_DEV_TYPE_XIVE);
1060                 }
1061         } else
1062 #endif
1063                 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1064 #endif
1065         return r;
1066 }
1067
1068 static void kvmppc_book3s_exit(void)
1069 {
1070 #ifdef CONFIG_KVM_XICS
1071         if (xics_on_xive()) {
1072                 kvmppc_xive_exit_module();
1073                 kvmppc_xive_native_exit_module();
1074         }
1075 #endif
1076 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1077         kvmppc_book3s_exit_pr();
1078 #endif
1079         kvm_exit();
1080 }
1081
1082 module_init(kvmppc_book3s_init);
1083 module_exit(kvmppc_book3s_exit);
1084
1085 /* On 32bit this is our one and only kernel module */
1086 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1087 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1088 MODULE_ALIAS("devname:kvm");
1089 #endif