1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright IBM Corp. 2008
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
9 #ifndef __POWERPC_KVM_PPC_H__
10 #define __POWERPC_KVM_PPC_H__
12 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
15 #include <linux/mutex.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bug.h>
21 #ifdef CONFIG_PPC_BOOK3S
22 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_booke.h>
26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
29 #include <asm/cpu_has_feature.h>
33 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34 * for supporting software breakpoint.
36 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
38 enum emulation_result {
39 EMULATE_DONE, /* no further processing */
40 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
41 EMULATE_FAIL, /* can't emulate this instruction */
42 EMULATE_AGAIN, /* something went wrong. go again */
43 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
46 enum instruction_fetch_type {
48 INST_SC, /* system call */
52 XLATE_INST, /* translate instruction address */
53 XLATE_DATA /* translate data address */
56 enum xlate_readwrite {
57 XLATE_READ, /* check for read permissions */
58 XLATE_WRITE /* check for write permissions */
61 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
62 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
63 extern void kvmppc_handler_highmem(void);
65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75 extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77 extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
86 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
93 extern int kvmppc_emulate_instruction(struct kvm_run *run,
94 struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
96 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
97 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
98 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
99 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
100 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
101 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
102 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
104 /* Core-specific hooks */
106 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
107 unsigned int gtlb_idx);
108 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
109 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
112 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
114 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
116 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
117 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
118 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
119 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
120 struct kvmppc_pte *pte);
122 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
124 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
126 extern int kvmppc_core_check_processor_compat(void);
127 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
128 struct kvm_translation *tr);
130 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
131 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
133 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
134 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
135 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
136 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
137 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
138 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
139 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
141 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
142 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
143 struct kvm_interrupt *irq);
144 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
145 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
147 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
150 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
151 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
153 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
154 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
156 extern int kvmppc_booke_init(void);
157 extern void kvmppc_booke_exit(void);
159 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
160 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
161 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
163 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
164 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
165 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
166 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
167 extern void kvmppc_rmap_reset(struct kvm *kvm);
168 extern long kvmppc_prepare_vrma(struct kvm *kvm,
169 struct kvm_userspace_memory_region *mem);
170 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
171 struct kvm_memory_slot *memslot, unsigned long porder);
172 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
173 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
174 struct iommu_group *grp);
175 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
176 struct iommu_group *grp);
177 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
178 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
179 extern void kvmppc_setup_partition_table(struct kvm *kvm);
181 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
182 struct kvm_create_spapr_tce_64 *args);
183 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
184 struct kvm *kvm, unsigned long liobn);
185 #define kvmppc_ioba_validate(stt, ioba, npages) \
186 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
187 (stt)->size, (ioba), (npages)) ? \
188 H_PARAMETER : H_SUCCESS)
189 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
190 unsigned long ioba, unsigned long tce);
191 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
192 unsigned long liobn, unsigned long ioba,
193 unsigned long tce_list, unsigned long npages);
194 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
195 unsigned long liobn, unsigned long ioba,
196 unsigned long tce_value, unsigned long npages);
197 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
199 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
200 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
201 extern int kvmppc_core_init_vm(struct kvm *kvm);
202 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
203 extern void kvmppc_core_free_memslot(struct kvm *kvm,
204 struct kvm_memory_slot *free,
205 struct kvm_memory_slot *dont);
206 extern int kvmppc_core_create_memslot(struct kvm *kvm,
207 struct kvm_memory_slot *slot,
208 unsigned long npages);
209 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot,
211 const struct kvm_userspace_memory_region *mem);
212 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
213 const struct kvm_userspace_memory_region *mem,
214 const struct kvm_memory_slot *old,
215 const struct kvm_memory_slot *new,
216 enum kvm_mr_change change);
217 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
218 struct kvm_ppc_smmu_info *info);
219 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
220 struct kvm_memory_slot *memslot);
222 extern int kvmppc_bookehv_init(void);
223 extern void kvmppc_bookehv_exit(void);
225 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
227 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
228 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
229 struct kvm_ppc_resize_hpt *rhpt);
230 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
231 struct kvm_ppc_resize_hpt *rhpt);
233 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
235 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
236 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
237 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
239 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
241 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
243 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
244 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
246 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
247 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
249 union kvmppc_one_reg {
265 struct module *owner;
266 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
267 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
268 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
269 union kvmppc_one_reg *val);
270 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
271 union kvmppc_one_reg *val);
272 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
273 void (*vcpu_put)(struct kvm_vcpu *vcpu);
274 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
275 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
276 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
277 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
278 void (*vcpu_free)(struct kvm_vcpu *vcpu);
279 int (*check_requests)(struct kvm_vcpu *vcpu);
280 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
281 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
282 int (*prepare_memory_region)(struct kvm *kvm,
283 struct kvm_memory_slot *memslot,
284 const struct kvm_userspace_memory_region *mem);
285 void (*commit_memory_region)(struct kvm *kvm,
286 const struct kvm_userspace_memory_region *mem,
287 const struct kvm_memory_slot *old,
288 const struct kvm_memory_slot *new,
289 enum kvm_mr_change change);
290 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
292 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
293 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
294 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
295 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
296 void (*free_memslot)(struct kvm_memory_slot *free,
297 struct kvm_memory_slot *dont);
298 int (*create_memslot)(struct kvm_memory_slot *slot,
299 unsigned long npages);
300 int (*init_vm)(struct kvm *kvm);
301 void (*destroy_vm)(struct kvm *kvm);
302 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
303 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
304 unsigned int inst, int *advance);
305 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
306 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
307 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
308 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
310 int (*hcall_implemented)(unsigned long hcall);
311 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
312 struct irq_bypass_producer *);
313 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
314 struct irq_bypass_producer *);
315 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
316 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
317 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
318 unsigned long flags);
319 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
320 int (*enable_nested)(struct kvm *kvm);
321 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
323 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
325 int (*svm_off)(struct kvm *kvm);
328 extern struct kvmppc_ops *kvmppc_hv_ops;
329 extern struct kvmppc_ops *kvmppc_pr_ops;
331 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
332 enum instruction_fetch_type type, u32 *inst)
334 int ret = EMULATE_DONE;
337 /* Load the instruction manually if it failed to do so in the
339 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
340 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
342 /* Write fetch_failed unswapped if the fetch failed */
343 if (ret == EMULATE_DONE)
344 fetched_inst = kvmppc_need_byteswap(vcpu) ?
345 swab32(vcpu->arch.last_inst) :
346 vcpu->arch.last_inst;
348 fetched_inst = vcpu->arch.last_inst;
350 *inst = fetched_inst;
354 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
356 return kvm->arch.kvm_ops == kvmppc_hv_ops;
359 extern int kvmppc_hwrng_present(void);
362 * Cuts out inst bits with ordering according to spec.
363 * That means the leftmost bit is zero. All given bits are included.
365 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
372 mask = (1 << (lsb - msb + 1)) - 1;
373 r = (inst >> (63 - lsb)) & mask;
379 * Replaces inst bits with ordering according to spec.
381 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
388 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
389 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
394 #define one_reg_size(id) \
395 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
397 #define get_reg_val(id, reg) ({ \
398 union kvmppc_one_reg __u; \
399 switch (one_reg_size(id)) { \
400 case 4: __u.wval = (reg); break; \
401 case 8: __u.dval = (reg); break; \
408 #define set_reg_val(id, val) ({ \
410 switch (one_reg_size(id)) { \
411 case 4: __v = (val).wval; break; \
412 case 8: __v = (val).dval; break; \
418 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
419 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
421 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
422 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
424 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
425 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
426 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
427 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
429 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
433 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
434 extern void kvm_cma_reserve(void) __init;
435 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
437 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
440 static inline void kvmppc_set_xive_tima(int cpu,
441 unsigned long phys_addr,
442 void __iomem *virt_addr)
444 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
445 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
448 static inline u32 kvmppc_get_xics_latch(void)
452 xirr = get_paca()->kvm_hstate.saved_xirr;
453 get_paca()->kvm_hstate.saved_xirr = 0;
458 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
459 * a CPU thread that's running/napping inside of a guest is by default regarded
460 * as a request to wake the CPU (if needed) and continue execution within the
461 * guest, potentially to process new state like externally-generated
462 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
464 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
465 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
466 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
467 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
468 * the receiving side prior to processing the IPI work.
472 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
473 * This is to guard against sequences such as the following:
476 * X: smp_muxed_ipi_set_message():
478 * X: message[RESCHEDULE] = 1
479 * X: doorbell_global_ipi(42):
480 * X: kvmppc_set_host_ipi(42)
481 * X: ppc_msgsnd_sync()/smp_mb()
482 * X: ppc_msgsnd() -> 42
483 * 42: doorbell_exception(): // from CPU X
485 * 105: smp_muxed_ipi_set_message():
487 * // STORE DEFERRED DUE TO RE-ORDERING
488 * --105: message[CALL_FUNCTION] = 1
489 * | 105: doorbell_global_ipi(42):
490 * | 105: kvmppc_set_host_ipi(42)
491 * | 42: kvmppc_clear_host_ipi(42)
492 * | 42: smp_ipi_demux_relaxed()
493 * | 42: // returns to executing guest
494 * | // RE-ORDERED STORE COMPLETES
495 * ->105: message[CALL_FUNCTION] = 1
496 * 105: ppc_msgsnd_sync()/smp_mb()
497 * 105: ppc_msgsnd() -> 42
498 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
499 * 105: // hangs waiting on 42 to process messages/call_single_queue
501 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
502 * to guard against sequences such as the following (as well as to create
503 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
506 * X: smp_muxed_ipi_set_message():
508 * X: message[RESCHEDULE] = 1
509 * X: doorbell_global_ipi(42):
510 * X: kvmppc_set_host_ipi(42)
511 * X: ppc_msgsnd_sync()/smp_mb()
512 * X: ppc_msgsnd() -> 42
513 * 42: doorbell_exception(): // from CPU X
515 * // STORE DEFERRED DUE TO RE-ORDERING
516 * -- 42: kvmppc_clear_host_ipi(42)
517 * | 42: smp_ipi_demux_relaxed()
518 * | 105: smp_muxed_ipi_set_message():
520 * | 105: message[CALL_FUNCTION] = 1
521 * | 105: doorbell_global_ipi(42):
522 * | 105: kvmppc_set_host_ipi(42)
523 * | // RE-ORDERED STORE COMPLETES
524 * -> 42: kvmppc_clear_host_ipi(42)
525 * 42: // returns to executing guest
526 * 105: ppc_msgsnd_sync()/smp_mb()
527 * 105: ppc_msgsnd() -> 42
528 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
529 * 105: // hangs waiting on 42 to process messages/call_single_queue
531 static inline void kvmppc_set_host_ipi(int cpu)
534 * order stores of IPI messages vs. setting of host_ipi flag
536 * pairs with the barrier in kvmppc_clear_host_ipi()
539 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
542 static inline void kvmppc_clear_host_ipi(int cpu)
544 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
546 * order clearing of host_ipi flag vs. processing of IPI messages
548 * pairs with the barrier in kvmppc_set_host_ipi()
553 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
555 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
558 extern void kvm_hv_vm_activated(void);
559 extern void kvm_hv_vm_deactivated(void);
560 extern bool kvm_hv_mode_active(void);
562 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
563 struct kvm_nested_guest *nested);
566 static inline void __init kvm_cma_reserve(void)
569 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
572 static inline void kvmppc_set_xive_tima(int cpu,
573 unsigned long phys_addr,
574 void __iomem *virt_addr)
577 static inline u32 kvmppc_get_xics_latch(void)
582 static inline void kvmppc_set_host_ipi(int cpu)
585 static inline void kvmppc_clear_host_ipi(int cpu)
588 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
593 static inline bool kvm_hv_mode_active(void) { return false; }
597 #ifdef CONFIG_KVM_XICS
598 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
600 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
603 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
606 if (kvm && kvm_irq_bypass)
607 return kvm->arch.pimap;
611 extern void kvmppc_alloc_host_rm_ops(void);
612 extern void kvmppc_free_host_rm_ops(void);
613 extern void kvmppc_free_pimap(struct kvm *kvm);
614 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
615 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
616 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
617 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
618 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
619 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
620 struct kvm_vcpu *vcpu, u32 cpu);
621 extern void kvmppc_xics_ipi_action(void);
622 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
623 unsigned long host_irq);
624 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
625 unsigned long host_irq);
626 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
627 struct kvmppc_irq_map *irq_map,
628 struct kvmppc_passthru_irqmap *pimap,
631 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
632 int level, bool line_status);
634 extern int h_ipi_redirect;
636 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
639 static inline void kvmppc_alloc_host_rm_ops(void) {};
640 static inline void kvmppc_free_host_rm_ops(void) {};
641 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
642 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
644 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
646 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
647 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
651 #ifdef CONFIG_KVM_XIVE
653 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
654 * ie. P9 new interrupt controller, while the second "xive" is the legacy
655 * "eXternal Interrupt Vector Entry" which is the configuration of an
656 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
657 * two function consume or produce a legacy "XIVE" state from the
658 * new "XIVE" interrupt controller.
660 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
662 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
664 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
665 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
666 extern void kvmppc_xive_init_module(void);
667 extern void kvmppc_xive_exit_module(void);
669 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
670 struct kvm_vcpu *vcpu, u32 cpu);
671 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
672 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
673 struct irq_desc *host_desc);
674 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
675 struct irq_desc *host_desc);
676 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
677 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
679 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
680 int level, bool line_status);
681 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
683 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
685 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
688 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
689 struct kvm_vcpu *vcpu, u32 cpu);
690 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
691 extern void kvmppc_xive_native_init_module(void);
692 extern void kvmppc_xive_native_exit_module(void);
693 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
694 union kvmppc_one_reg *val);
695 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
696 union kvmppc_one_reg *val);
697 extern bool kvmppc_xive_native_supported(void);
700 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
701 u32 priority) { return -1; }
702 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
703 u32 *priority) { return -1; }
704 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
705 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
706 static inline void kvmppc_xive_init_module(void) { }
707 static inline void kvmppc_xive_exit_module(void) { }
709 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
710 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
711 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
712 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
713 struct irq_desc *host_desc) { return -ENODEV; }
714 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
715 struct irq_desc *host_desc) { return -ENODEV; }
716 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
717 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
719 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
720 int level, bool line_status) { return -ENODEV; }
721 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
723 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
725 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
726 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
727 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
728 static inline void kvmppc_xive_native_init_module(void) { }
729 static inline void kvmppc_xive_native_exit_module(void) { }
730 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
731 union kvmppc_one_reg *val)
733 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
734 union kvmppc_one_reg *val)
737 #endif /* CONFIG_KVM_XIVE */
739 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
740 static inline bool xics_on_xive(void)
742 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
745 static inline bool xics_on_xive(void)
752 * Prototypes for functions called only from assembler code.
753 * Having prototypes reduces sparse errors.
755 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
756 unsigned long ioba, unsigned long tce);
757 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
758 unsigned long liobn, unsigned long ioba,
759 unsigned long tce_list, unsigned long npages);
760 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
761 unsigned long liobn, unsigned long ioba,
762 unsigned long tce_value, unsigned long npages);
763 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
764 unsigned int yield_count);
765 long kvmppc_h_random(struct kvm_vcpu *vcpu);
766 void kvmhv_commence_exit(int trap);
767 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
768 void kvmppc_subcore_enter_guest(void);
769 void kvmppc_subcore_exit_guest(void);
770 long kvmppc_realmode_hmi_handler(void);
771 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
772 long pte_index, unsigned long pteh, unsigned long ptel);
773 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
774 unsigned long pte_index, unsigned long avpn);
775 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
776 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
777 unsigned long pte_index, unsigned long avpn,
779 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
780 unsigned long pte_index);
781 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
782 unsigned long pte_index);
783 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
784 unsigned long pte_index);
785 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
786 unsigned long dest, unsigned long src);
787 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
788 unsigned long slb_v, unsigned int status, bool data);
789 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
790 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
791 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
792 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
794 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
795 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
796 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
799 * Host-side operations we want to set up while running in real
800 * mode in the guest operating on the xics.
801 * Currently only VCPU wakeup is supported.
804 union kvmppc_rm_state {
812 struct kvmppc_host_rm_core {
813 union kvmppc_rm_state rm_state;
818 struct kvmppc_host_rm_ops {
819 struct kvmppc_host_rm_core *rm_core;
820 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
823 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
825 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
827 #ifdef CONFIG_KVM_BOOKE_HV
828 return mfspr(SPRN_GEPR);
829 #elif defined(CONFIG_BOOKE)
830 return vcpu->arch.epr;
836 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
838 #ifdef CONFIG_KVM_BOOKE_HV
839 mtspr(SPRN_GEPR, epr);
840 #elif defined(CONFIG_BOOKE)
841 vcpu->arch.epr = epr;
845 #ifdef CONFIG_KVM_MPIC
847 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
848 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
850 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
854 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
858 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
859 struct kvm_vcpu *vcpu, u32 cpu)
864 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
865 struct kvm_vcpu *vcpu)
869 #endif /* CONFIG_KVM_MPIC */
871 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
872 struct kvm_config_tlb *cfg);
873 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
874 struct kvm_dirty_tlb *cfg);
876 long kvmppc_alloc_lpid(void);
877 void kvmppc_claim_lpid(long lpid);
878 void kvmppc_free_lpid(long lpid);
879 void kvmppc_init_lpid(unsigned long nr_lpids);
881 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
885 * We can only access pages that the kernel maps
886 * as memory. Bail out for unmapped ones.
891 /* Clear i-cache for new pages */
892 page = pfn_to_page(pfn);
893 if (!test_bit(PG_arch_1, &page->flags)) {
894 flush_dcache_icache_page(page);
895 set_bit(PG_arch_1, &page->flags);
900 * Shared struct helpers. The shared struct can be little or big endian,
901 * depending on the guest endianness. So expose helpers to all of them.
903 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
905 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
906 /* Only Book3S_64 PR supports bi-endian for now */
907 return vcpu->arch.shared_big_endian;
908 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
909 /* Book3s_64 HV on little endian is always little endian */
916 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
917 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
919 return mfspr(bookehv_spr); \
922 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
923 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
925 mtspr(bookehv_spr, val); \
928 #define SHARED_WRAPPER_GET(reg, size) \
929 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
931 if (kvmppc_shared_big_endian(vcpu)) \
932 return be##size##_to_cpu(vcpu->arch.shared->reg); \
934 return le##size##_to_cpu(vcpu->arch.shared->reg); \
937 #define SHARED_WRAPPER_SET(reg, size) \
938 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
940 if (kvmppc_shared_big_endian(vcpu)) \
941 vcpu->arch.shared->reg = cpu_to_be##size(val); \
943 vcpu->arch.shared->reg = cpu_to_le##size(val); \
946 #define SHARED_WRAPPER(reg, size) \
947 SHARED_WRAPPER_GET(reg, size) \
948 SHARED_WRAPPER_SET(reg, size) \
950 #define SPRNG_WRAPPER(reg, bookehv_spr) \
951 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
952 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
954 #ifdef CONFIG_KVM_BOOKE_HV
956 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
957 SPRNG_WRAPPER(reg, bookehv_spr) \
961 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
962 SHARED_WRAPPER(reg, size) \
966 SHARED_WRAPPER(critical, 64)
967 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
968 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
969 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
970 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
971 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
972 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
973 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
974 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
975 SHARED_WRAPPER_GET(msr, 64)
976 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
978 if (kvmppc_shared_big_endian(vcpu))
979 vcpu->arch.shared->msr = cpu_to_be64(val);
981 vcpu->arch.shared->msr = cpu_to_le64(val);
983 SHARED_WRAPPER(dsisr, 32)
984 SHARED_WRAPPER(int_pending, 32)
985 SHARED_WRAPPER(sprg4, 64)
986 SHARED_WRAPPER(sprg5, 64)
987 SHARED_WRAPPER(sprg6, 64)
988 SHARED_WRAPPER(sprg7, 64)
990 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
992 if (kvmppc_shared_big_endian(vcpu))
993 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
995 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
998 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
1000 if (kvmppc_shared_big_endian(vcpu))
1001 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
1003 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
1007 * Please call after prepare_to_enter. This function puts the lazy ee and irq
1008 * disabled tracking state back to normal mode, without actually enabling
1011 static inline void kvmppc_fix_ee_before_entry(void)
1013 trace_hardirqs_on();
1017 * To avoid races, the caller must have gone directly from having
1018 * interrupts fully-enabled to hard-disabled.
1020 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1022 /* Only need to enable IRQs by hard enabling them after this */
1023 local_paca->irq_happened = 0;
1024 irq_soft_mask_set(IRQS_ENABLED);
1028 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1031 ulong msr_64bit = 0;
1033 ea = kvmppc_get_gpr(vcpu, rb);
1035 ea += kvmppc_get_gpr(vcpu, ra);
1037 #if defined(CONFIG_PPC_BOOK3E_64)
1039 #elif defined(CONFIG_PPC_BOOK3S_64)
1043 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1049 extern void xics_wake_cpu(int cpu);
1051 #endif /* __POWERPC_KVM_PPC_H__ */