1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
6 #define pr_fmt(fmt) "xive-kvm: " fmt
8 #include <linux/kernel.h>
9 #include <linux/kvm_host.h>
10 #include <linux/err.h>
11 #include <linux/gfp.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/percpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/uaccess.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/hvcall.h>
22 #include <asm/xive-regs.h>
23 #include <asm/debug.h>
24 #include <asm/debugfs.h>
28 #include <linux/debugfs.h>
29 #include <linux/seq_file.h>
31 #include "book3s_xive.h"
35 * Virtual mode variants of the hcalls for use on radix/radix
36 * with AIL. They require the VCPU's VP to be "pushed"
38 * We still instantiate them here because we use some of the
39 * generated utility functions as well in this file.
41 #define XIVE_RUNTIME_CHECKS
42 #define X_PFX xive_vm_
43 #define X_STATIC static
44 #define X_STAT_PFX stat_vm_
45 #define __x_tima xive_tima
46 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48 #define __x_writeb __raw_writeb
49 #define __x_readw __raw_readw
50 #define __x_readq __raw_readq
51 #define __x_writeq __raw_writeq
53 #include "book3s_xive_template.c"
56 * We leave a gap of a couple of interrupts in the queue to
57 * account for the IPI and additional safety guard.
62 * Push a vcpu's context to the XIVE on guest entry.
63 * This assumes we are in virtual mode (MMU on)
65 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
71 * Nothing to do if the platform doesn't have a XIVE
72 * or this vCPU doesn't have its own XIVE context
73 * (e.g. because it's not using an in-kernel interrupt controller).
75 if (!tima || !vcpu->arch.xive_cam_word)
79 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
80 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
81 vcpu->arch.xive_pushed = 1;
85 * We clear the irq_pending flag. There is a small chance of a
86 * race vs. the escalation interrupt happening on another
87 * processor setting it again, but the only consequence is to
88 * cause a spurious wakeup on the next H_CEDE, which is not an
91 vcpu->arch.irq_pending = 0;
94 * In single escalation mode, if the escalation interrupt is
97 if (vcpu->arch.xive_esc_on) {
98 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
103 * We have a possible subtle race here: The escalation
104 * interrupt might have fired and be on its way to the
105 * host queue while we mask it, and if we unmask it
106 * early enough (re-cede right away), there is a
107 * theorical possibility that it fires again, thus
108 * landing in the target queue more than once which is
111 * Fortunately, solving this is rather easy. If the
112 * above load setting PQ to 01 returns a previous
113 * value where P is set, then we know the escalation
114 * interrupt is somewhere on its way to the host. In
115 * that case we simply don't clear the xive_esc_on
116 * flag below. It will be eventually cleared by the
117 * handler for the escalation interrupt.
119 * Then, when doing a cede, we check that flag again
120 * before re-enabling the escalation interrupt, and if
121 * set, we abort the cede.
123 if (!(pq & XIVE_ESB_VAL_P))
124 /* Now P is 0, we can clear the flag */
125 vcpu->arch.xive_esc_on = 0;
128 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
131 * This is a simple trigger for a generic XIVE IRQ. This must
132 * only be called for interrupts that support a trigger page
134 static bool xive_irq_trigger(struct xive_irq_data *xd)
136 /* This should be only for MSIs */
137 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
140 /* Those interrupts should always have a trigger page */
141 if (WARN_ON(!xd->trig_mmio))
144 out_be64(xd->trig_mmio, 0);
149 static irqreturn_t xive_esc_irq(int irq, void *data)
151 struct kvm_vcpu *vcpu = data;
153 vcpu->arch.irq_pending = 1;
155 if (vcpu->arch.ceded)
156 kvmppc_fast_vcpu_kick(vcpu);
158 /* Since we have the no-EOI flag, the interrupt is effectively
159 * disabled now. Clearing xive_esc_on means we won't bother
160 * doing so on the next entry.
162 * This also allows the entry code to know that if a PQ combination
163 * of 10 is observed while xive_esc_on is true, it means the queue
164 * contains an unprocessed escalation interrupt. We don't make use of
165 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
167 vcpu->arch.xive_esc_on = false;
172 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
173 bool single_escalation)
175 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
176 struct xive_q *q = &xc->queues[prio];
180 /* Already there ? */
181 if (xc->esc_virq[prio])
184 /* Hook up the escalation interrupt */
185 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
186 if (!xc->esc_virq[prio]) {
187 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
188 prio, xc->server_num);
192 if (single_escalation)
193 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
194 vcpu->kvm->arch.lpid, xc->server_num);
196 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
197 vcpu->kvm->arch.lpid, xc->server_num, prio);
199 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
200 prio, xc->server_num);
205 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
207 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
208 IRQF_NO_THREAD, name, vcpu);
210 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
211 prio, xc->server_num);
214 xc->esc_virq_names[prio] = name;
216 /* In single escalation mode, we grab the ESB MMIO of the
217 * interrupt and mask it. Also populate the VCPU v/raddr
218 * of the ESB page for use by asm entry/exit code. Finally
219 * set the XIVE_IRQ_NO_EOI flag which will prevent the
220 * core code from performing an EOI on the escalation
221 * interrupt, thus leaving it effectively masked after
224 if (single_escalation) {
225 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
226 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
228 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
229 vcpu->arch.xive_esc_raddr = xd->eoi_page;
230 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
231 xd->flags |= XIVE_IRQ_NO_EOI;
236 irq_dispose_mapping(xc->esc_virq[prio]);
237 xc->esc_virq[prio] = 0;
242 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
244 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
245 struct kvmppc_xive *xive = xc->xive;
246 struct xive_q *q = &xc->queues[prio];
250 if (WARN_ON(q->qpage))
253 /* Allocate the queue and retrieve infos on current node for now */
254 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
256 pr_err("Failed to allocate queue %d for VCPU %d\n",
257 prio, xc->server_num);
260 memset(qpage, 0, 1 << xive->q_order);
263 * Reconfigure the queue. This will set q->qpage only once the
264 * queue is fully configured. This is a requirement for prio 0
265 * as we will stop doing EOIs for every IPI as soon as we observe
266 * qpage being non-NULL, and instead will only EOI when we receive
267 * corresponding queue 0 entries
269 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
270 xive->q_order, true);
272 pr_err("Failed to configure queue %d for VCPU %d\n",
273 prio, xc->server_num);
277 /* Called with xive->lock held */
278 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
280 struct kvmppc_xive *xive = kvm->arch.xive;
281 struct kvm_vcpu *vcpu;
284 lockdep_assert_held(&xive->lock);
286 /* Already provisioned ? */
287 if (xive->qmap & (1 << prio))
290 pr_devel("Provisioning prio... %d\n", prio);
292 /* Provision each VCPU and enable escalations if needed */
293 kvm_for_each_vcpu(i, vcpu, kvm) {
294 if (!vcpu->arch.xive_vcpu)
296 rc = xive_provision_queue(vcpu, prio);
297 if (rc == 0 && !xive->single_escalation)
298 kvmppc_xive_attach_escalation(vcpu, prio,
299 xive->single_escalation);
304 /* Order previous stores and mark it as provisioned */
306 xive->qmap |= (1 << prio);
310 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
312 struct kvm_vcpu *vcpu;
313 struct kvmppc_xive_vcpu *xc;
316 /* Locate target server */
317 vcpu = kvmppc_xive_find_server(kvm, server);
319 pr_warn("%s: Can't find server %d\n", __func__, server);
322 xc = vcpu->arch.xive_vcpu;
326 q = &xc->queues[prio];
327 atomic_inc(&q->pending_count);
330 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
332 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
341 q = &xc->queues[prio];
342 if (WARN_ON(!q->qpage))
345 /* Calculate max number of interrupts in that queue. */
346 max = (q->msk + 1) - XIVE_Q_GAP;
347 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
350 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
352 struct kvm_vcpu *vcpu;
355 /* Locate target server */
356 vcpu = kvmppc_xive_find_server(kvm, *server);
358 pr_devel("Can't find server %d\n", *server);
362 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
365 rc = xive_try_pick_queue(vcpu, prio);
369 pr_devel(" .. failed, looking up candidate...\n");
371 /* Failed, pick another VCPU */
372 kvm_for_each_vcpu(i, vcpu, kvm) {
373 if (!vcpu->arch.xive_vcpu)
375 rc = xive_try_pick_queue(vcpu, prio);
377 *server = vcpu->arch.xive_vcpu->server_num;
378 pr_devel(" found on 0x%x/%d\n", *server, prio);
382 pr_devel(" no available target !\n");
384 /* No available target ! */
388 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
389 struct kvmppc_xive_src_block *sb,
390 struct kvmppc_xive_irq_state *state)
392 struct xive_irq_data *xd;
398 * Take the lock, set masked, try again if racing
402 arch_spin_lock(&sb->lock);
403 old_prio = state->guest_priority;
404 state->guest_priority = MASKED;
408 state->guest_priority = old_prio;
409 arch_spin_unlock(&sb->lock);
412 /* No change ? Bail */
413 if (old_prio == MASKED)
416 /* Get the right irq */
417 kvmppc_xive_select_irq(state, &hw_num, &xd);
420 * If the interrupt is marked as needing masking via
421 * firmware, we do it here. Firmware masking however
422 * is "lossy", it won't return the old p and q bits
423 * and won't set the interrupt to a state where it will
424 * record queued ones. If this is an issue we should do
425 * lazy masking instead.
427 * For now, we work around this in unmask by forcing
428 * an interrupt whenever we unmask a non-LSI via FW
431 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
432 xive_native_configure_irq(hw_num,
433 kvmppc_xive_vp(xive, state->act_server),
434 MASKED, state->number);
435 /* set old_p so we can track if an H_EOI was done */
437 state->old_q = false;
439 /* Set PQ to 10, return old P and old Q and remember them */
440 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
441 state->old_p = !!(val & 2);
442 state->old_q = !!(val & 1);
445 * Synchronize hardware to sensure the queues are updated
448 xive_native_sync_source(hw_num);
454 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
455 struct kvmppc_xive_irq_state *state)
458 * Take the lock try again if racing with H_EOI
461 arch_spin_lock(&sb->lock);
464 arch_spin_unlock(&sb->lock);
468 static void xive_finish_unmask(struct kvmppc_xive *xive,
469 struct kvmppc_xive_src_block *sb,
470 struct kvmppc_xive_irq_state *state,
473 struct xive_irq_data *xd;
476 /* If we aren't changing a thing, move on */
477 if (state->guest_priority != MASKED)
480 /* Get the right irq */
481 kvmppc_xive_select_irq(state, &hw_num, &xd);
484 * See command in xive_lock_and_mask() concerning masking
487 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
488 xive_native_configure_irq(hw_num,
489 kvmppc_xive_vp(xive, state->act_server),
490 state->act_priority, state->number);
491 /* If an EOI is needed, do it here */
493 xive_vm_source_eoi(hw_num, xd);
494 /* If this is not an LSI, force a trigger */
495 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
496 xive_irq_trigger(xd);
500 /* Old Q set, set PQ to 11 */
502 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
505 * If not old P, then perform an "effective" EOI,
506 * on the source. This will handle the cases where
510 xive_vm_source_eoi(hw_num, xd);
512 /* Synchronize ordering and mark unmasked */
515 state->guest_priority = prio;
519 * Target an interrupt to a given server/prio, this will fallback
520 * to another server if necessary and perform the HW targetting
523 * NOTE: Must be called with the state lock held
525 static int xive_target_interrupt(struct kvm *kvm,
526 struct kvmppc_xive_irq_state *state,
529 struct kvmppc_xive *xive = kvm->arch.xive;
534 * This will return a tentative server and actual
535 * priority. The count for that new target will have
536 * already been incremented.
538 rc = kvmppc_xive_select_target(kvm, &server, prio);
541 * We failed to find a target ? Not much we can do
542 * at least until we support the GIQ.
548 * Increment the old queue pending count if there
549 * was one so that the old queue count gets adjusted later
550 * when observed to be empty.
552 if (state->act_priority != MASKED)
553 xive_inc_q_pending(kvm,
555 state->act_priority);
557 * Update state and HW
559 state->act_priority = prio;
560 state->act_server = server;
562 /* Get the right irq */
563 kvmppc_xive_select_irq(state, &hw_num, NULL);
565 return xive_native_configure_irq(hw_num,
566 kvmppc_xive_vp(xive, server),
567 prio, state->number);
571 * Targetting rules: In order to avoid losing track of
572 * pending interrupts accross mask and unmask, which would
573 * allow queue overflows, we implement the following rules:
575 * - Unless it was never enabled (or we run out of capacity)
576 * an interrupt is always targetted at a valid server/queue
577 * pair even when "masked" by the guest. This pair tends to
578 * be the last one used but it can be changed under some
579 * circumstances. That allows us to separate targetting
580 * from masking, we only handle accounting during (re)targetting,
581 * this also allows us to let an interrupt drain into its target
582 * queue after masking, avoiding complex schemes to remove
583 * interrupts out of remote processor queues.
585 * - When masking, we set PQ to 10 and save the previous value
588 * - When unmasking, if saved Q was set, we set PQ to 11
589 * otherwise we leave PQ to the HW state which will be either
590 * 10 if nothing happened or 11 if the interrupt fired while
591 * masked. Effectively we are OR'ing the previous Q into the
594 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
595 * which will unmask the interrupt and shoot a new one if Q was
598 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
599 * effectively meaning an H_EOI from the guest is still expected
600 * for that interrupt).
602 * - If H_EOI occurs while masked, we clear the saved P.
604 * - When changing target, we account on the new target and
605 * increment a separate "pending" counter on the old one.
606 * This pending counter will be used to decrement the old
607 * target's count when its queue has been observed empty.
610 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
613 struct kvmppc_xive *xive = kvm->arch.xive;
614 struct kvmppc_xive_src_block *sb;
615 struct kvmppc_xive_irq_state *state;
623 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
624 irq, server, priority);
626 /* First, check provisioning of queues */
627 if (priority != MASKED) {
628 mutex_lock(&xive->lock);
629 rc = xive_check_provisioning(xive->kvm,
630 xive_prio_from_guest(priority));
631 mutex_unlock(&xive->lock);
634 pr_devel(" provisioning failure %d !\n", rc);
638 sb = kvmppc_xive_find_source(xive, irq, &idx);
641 state = &sb->irq_state[idx];
644 * We first handle masking/unmasking since the locking
645 * might need to be retried due to EOIs, we'll handle
646 * targetting changes later. These functions will return
647 * with the SB lock held.
649 * xive_lock_and_mask() will also set state->guest_priority
650 * but won't otherwise change other fields of the state.
652 * xive_lock_for_unmask will not actually unmask, this will
653 * be done later by xive_finish_unmask() once the targetting
654 * has been done, so we don't try to unmask an interrupt
655 * that hasn't yet been targetted.
657 if (priority == MASKED)
658 xive_lock_and_mask(xive, sb, state);
660 xive_lock_for_unmask(sb, state);
664 * Then we handle targetting.
666 * First calculate a new "actual priority"
668 new_act_prio = state->act_priority;
669 if (priority != MASKED)
670 new_act_prio = xive_prio_from_guest(priority);
672 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
673 new_act_prio, state->act_server, state->act_priority);
676 * Then check if we actually need to change anything,
678 * The condition for re-targetting the interrupt is that
679 * we have a valid new priority (new_act_prio is not 0xff)
680 * and either the server or the priority changed.
682 * Note: If act_priority was ff and the new priority is
683 * also ff, we don't do anything and leave the interrupt
684 * untargetted. An attempt of doing an int_on on an
685 * untargetted interrupt will fail. If that is a problem
686 * we could initialize interrupts with valid default
689 if (new_act_prio != MASKED &&
690 (state->act_server != server ||
691 state->act_priority != new_act_prio))
692 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
695 * Perform the final unmasking of the interrupt source
698 if (priority != MASKED)
699 xive_finish_unmask(xive, sb, state, priority);
702 * Finally Update saved_priority to match. Only int_on/off
703 * set this field to a different value.
705 state->saved_priority = priority;
707 arch_spin_unlock(&sb->lock);
711 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
714 struct kvmppc_xive *xive = kvm->arch.xive;
715 struct kvmppc_xive_src_block *sb;
716 struct kvmppc_xive_irq_state *state;
722 sb = kvmppc_xive_find_source(xive, irq, &idx);
725 state = &sb->irq_state[idx];
726 arch_spin_lock(&sb->lock);
727 *server = state->act_server;
728 *priority = state->guest_priority;
729 arch_spin_unlock(&sb->lock);
734 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
736 struct kvmppc_xive *xive = kvm->arch.xive;
737 struct kvmppc_xive_src_block *sb;
738 struct kvmppc_xive_irq_state *state;
744 sb = kvmppc_xive_find_source(xive, irq, &idx);
747 state = &sb->irq_state[idx];
749 pr_devel("int_on(irq=0x%x)\n", irq);
752 * Check if interrupt was not targetted
754 if (state->act_priority == MASKED) {
755 pr_devel("int_on on untargetted interrupt\n");
759 /* If saved_priority is 0xff, do nothing */
760 if (state->saved_priority == MASKED)
764 * Lock and unmask it.
766 xive_lock_for_unmask(sb, state);
767 xive_finish_unmask(xive, sb, state, state->saved_priority);
768 arch_spin_unlock(&sb->lock);
773 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
775 struct kvmppc_xive *xive = kvm->arch.xive;
776 struct kvmppc_xive_src_block *sb;
777 struct kvmppc_xive_irq_state *state;
783 sb = kvmppc_xive_find_source(xive, irq, &idx);
786 state = &sb->irq_state[idx];
788 pr_devel("int_off(irq=0x%x)\n", irq);
793 state->saved_priority = xive_lock_and_mask(xive, sb, state);
794 arch_spin_unlock(&sb->lock);
799 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
801 struct kvmppc_xive_src_block *sb;
802 struct kvmppc_xive_irq_state *state;
805 sb = kvmppc_xive_find_source(xive, irq, &idx);
808 state = &sb->irq_state[idx];
813 * Trigger the IPI. This assumes we never restore a pass-through
814 * interrupt which should be safe enough
816 xive_irq_trigger(&state->ipi_data);
821 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
823 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
828 /* Return the per-cpu state for state saving/migration */
829 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
830 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
831 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
834 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
836 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
837 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
844 /* Grab individual state fields. We don't use pending_pri */
845 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
846 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
847 KVM_REG_PPC_ICP_XISR_MASK;
848 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
850 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
851 xc->server_num, cppr, mfrr, xisr);
854 * We can't update the state of a "pushed" VCPU, but that
855 * shouldn't happen because the vcpu->mutex makes running a
856 * vcpu mutually exclusive with doing one_reg get/set on it.
858 if (WARN_ON(vcpu->arch.xive_pushed))
861 /* Update VCPU HW saved state */
862 vcpu->arch.xive_saved_state.cppr = cppr;
863 xc->hw_cppr = xc->cppr = cppr;
866 * Update MFRR state. If it's not 0xff, we mark the VCPU as
867 * having a pending MFRR change, which will re-evaluate the
868 * target. The VCPU will thus potentially get a spurious
869 * interrupt but that's not a big deal.
873 xive_irq_trigger(&xc->vp_ipi_data);
876 * Now saved XIRR is "interesting". It means there's something in
877 * the legacy "1 element" queue... for an IPI we simply ignore it,
878 * as the MFRR restore will handle that. For anything else we need
879 * to force a resend of the source.
880 * However the source may not have been setup yet. If that's the
881 * case, we keep that info and increment a counter in the xive to
882 * tell subsequent xive_set_source() to go look.
884 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
885 xc->delayed_irq = xisr;
886 xive->delayed_irqs++;
887 pr_devel(" xisr restore delayed\n");
893 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
894 struct irq_desc *host_desc)
896 struct kvmppc_xive *xive = kvm->arch.xive;
897 struct kvmppc_xive_src_block *sb;
898 struct kvmppc_xive_irq_state *state;
899 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
900 unsigned int host_irq = irq_desc_get_irq(host_desc);
901 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
909 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
911 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
914 state = &sb->irq_state[idx];
917 * Mark the passed-through interrupt as going to a VCPU,
918 * this will prevent further EOIs and similar operations
919 * from the XIVE code. It will also mask the interrupt
920 * to either PQ=10 or 11 state, the latter if the interrupt
921 * is pending. This will allow us to unmask or retrigger it
922 * after routing it to the guest with a simple EOI.
924 * The "state" argument is a "token", all it needs is to be
925 * non-NULL to switch to passed-through or NULL for the
926 * other way around. We may not yet have an actual VCPU
927 * target here and we don't really care.
929 rc = irq_set_vcpu_affinity(host_irq, state);
931 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
936 * Mask and read state of IPI. We need to know if its P bit
937 * is set as that means it's potentially already using a
938 * queue entry in the target
940 prio = xive_lock_and_mask(xive, sb, state);
941 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
942 state->old_p, state->old_q);
944 /* Turn the IPI hard off */
945 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
948 * Reset ESB guest mapping. Needed when ESB pages are exposed
949 * to the guest in XIVE native mode
951 if (xive->ops && xive->ops->reset_mapped)
952 xive->ops->reset_mapped(kvm, guest_irq);
954 /* Grab info about irq */
955 state->pt_number = hw_irq;
956 state->pt_data = irq_data_get_irq_handler_data(host_data);
959 * Configure the IRQ to match the existing configuration of
960 * the IPI if it was already targetted. Otherwise this will
961 * mask the interrupt in a lossy way (act_priority is 0xff)
962 * which is fine for a never started interrupt.
964 xive_native_configure_irq(hw_irq,
965 kvmppc_xive_vp(xive, state->act_server),
966 state->act_priority, state->number);
969 * We do an EOI to enable the interrupt (and retrigger if needed)
970 * if the guest has the interrupt unmasked and the P bit was *not*
971 * set in the IPI. If it was set, we know a slot may still be in
972 * use in the target queue thus we have to wait for a guest
975 if (prio != MASKED && !state->old_p)
976 xive_vm_source_eoi(hw_irq, state->pt_data);
978 /* Clear old_p/old_q as they are no longer relevant */
979 state->old_p = state->old_q = false;
981 /* Restore guest prio (unlocks EOI) */
983 state->guest_priority = prio;
984 arch_spin_unlock(&sb->lock);
988 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
990 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
991 struct irq_desc *host_desc)
993 struct kvmppc_xive *xive = kvm->arch.xive;
994 struct kvmppc_xive_src_block *sb;
995 struct kvmppc_xive_irq_state *state;
996 unsigned int host_irq = irq_desc_get_irq(host_desc);
1004 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1006 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1009 state = &sb->irq_state[idx];
1012 * Mask and read state of IRQ. We need to know if its P bit
1013 * is set as that means it's potentially already using a
1014 * queue entry in the target
1016 prio = xive_lock_and_mask(xive, sb, state);
1017 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1018 state->old_p, state->old_q);
1021 * If old_p is set, the interrupt is pending, we switch it to
1022 * PQ=11. This will force a resend in the host so the interrupt
1023 * isn't lost to whatver host driver may pick it up
1026 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1028 /* Release the passed-through interrupt to the host */
1029 rc = irq_set_vcpu_affinity(host_irq, NULL);
1031 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035 /* Forget about the IRQ */
1036 state->pt_number = 0;
1037 state->pt_data = NULL;
1040 * Reset ESB guest mapping. Needed when ESB pages are exposed
1041 * to the guest in XIVE native mode
1043 if (xive->ops && xive->ops->reset_mapped) {
1044 xive->ops->reset_mapped(kvm, guest_irq);
1047 /* Reconfigure the IPI */
1048 xive_native_configure_irq(state->ipi_number,
1049 kvmppc_xive_vp(xive, state->act_server),
1050 state->act_priority, state->number);
1053 * If old_p is set (we have a queue entry potentially
1054 * occupied) or the interrupt is masked, we set the IPI
1055 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1057 if (prio == MASKED || state->old_p)
1058 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1060 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1062 /* Restore guest prio (unlocks EOI) */
1064 state->guest_priority = prio;
1065 arch_spin_unlock(&sb->lock);
1069 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1071 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1073 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1074 struct kvm *kvm = vcpu->kvm;
1075 struct kvmppc_xive *xive = kvm->arch.xive;
1078 for (i = 0; i <= xive->max_sbid; i++) {
1079 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1084 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088 if (state->act_priority == MASKED)
1090 if (state->act_server != xc->server_num)
1094 arch_spin_lock(&sb->lock);
1095 state->act_priority = MASKED;
1096 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1097 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1098 if (state->pt_number) {
1099 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1100 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1102 arch_spin_unlock(&sb->lock);
1106 /* Disable vcpu's escalation interrupt */
1107 if (vcpu->arch.xive_esc_on) {
1108 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1109 XIVE_ESB_SET_PQ_01));
1110 vcpu->arch.xive_esc_on = false;
1114 * Clear pointers to escalation interrupt ESB.
1115 * This is safe because the vcpu->mutex is held, preventing
1116 * any other CPU from concurrently executing a KVM_RUN ioctl.
1118 vcpu->arch.xive_esc_vaddr = 0;
1119 vcpu->arch.xive_esc_raddr = 0;
1122 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1124 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1125 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1128 if (!kvmppc_xics_enabled(vcpu))
1134 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1136 /* Ensure no interrupt is still routed to that VP */
1138 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1140 /* Mask the VP IPI */
1141 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1143 /* Free escalations */
1144 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1145 if (xc->esc_virq[i]) {
1146 free_irq(xc->esc_virq[i], vcpu);
1147 irq_dispose_mapping(xc->esc_virq[i]);
1148 kfree(xc->esc_virq_names[i]);
1152 /* Disable the VP */
1153 xive_native_disable_vp(xc->vp_id);
1155 /* Clear the cam word so guest entry won't try to push context */
1156 vcpu->arch.xive_cam_word = 0;
1158 /* Free the queues */
1159 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1160 struct xive_q *q = &xc->queues[i];
1162 xive_native_disable_queue(xc->vp_id, q, i);
1164 free_pages((unsigned long)q->qpage,
1165 xive->q_page_order);
1172 xive_cleanup_irq_data(&xc->vp_ipi_data);
1173 xive_native_free_irq(xc->vp_ipi);
1178 /* Cleanup the vcpu */
1179 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1180 vcpu->arch.xive_vcpu = NULL;
1183 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1184 struct kvm_vcpu *vcpu, u32 cpu)
1186 struct kvmppc_xive *xive = dev->private;
1187 struct kvmppc_xive_vcpu *xc;
1190 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1192 if (dev->ops != &kvm_xive_ops) {
1193 pr_devel("Wrong ops !\n");
1196 if (xive->kvm != vcpu->kvm)
1198 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1200 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1201 pr_devel("Duplicate !\n");
1204 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1205 pr_devel("Out of bounds !\n");
1208 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1212 /* We need to synchronize with queue provisioning */
1213 mutex_lock(&xive->lock);
1214 vcpu->arch.xive_vcpu = xc;
1217 xc->server_num = cpu;
1218 xc->vp_id = kvmppc_xive_vp(xive, cpu);
1222 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1226 /* Configure VCPU fields for use by assembly push/pull */
1227 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1228 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1231 xc->vp_ipi = xive_native_alloc_irq();
1233 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1237 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1239 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1244 * Enable the VP first as the single escalation mode will
1245 * affect escalation interrupts numbering
1247 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1249 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1254 * Initialize queues. Initially we set them all for no queueing
1255 * and we enable escalation for queue 0 only which we'll use for
1256 * our mfrr change notifications. If the VCPU is hot-plugged, we
1257 * do handle provisioning however based on the existing "map"
1258 * of enabled queues.
1260 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1261 struct xive_q *q = &xc->queues[i];
1263 /* Single escalation, no queue 7 */
1264 if (i == 7 && xive->single_escalation)
1267 /* Is queue already enabled ? Provision it */
1268 if (xive->qmap & (1 << i)) {
1269 r = xive_provision_queue(vcpu, i);
1270 if (r == 0 && !xive->single_escalation)
1271 kvmppc_xive_attach_escalation(
1272 vcpu, i, xive->single_escalation);
1276 r = xive_native_configure_queue(xc->vp_id,
1277 q, i, NULL, 0, true);
1279 pr_err("Failed to configure queue %d for VCPU %d\n",
1286 /* If not done above, attach priority 0 escalation */
1287 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1292 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1294 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1297 mutex_unlock(&xive->lock);
1299 kvmppc_xive_cleanup_vcpu(vcpu);
1303 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1308 * Scanning of queues before/after migration save
1310 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1312 struct kvmppc_xive_src_block *sb;
1313 struct kvmppc_xive_irq_state *state;
1316 sb = kvmppc_xive_find_source(xive, irq, &idx);
1320 state = &sb->irq_state[idx];
1322 /* Some sanity checking */
1323 if (!state->valid) {
1324 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1329 * If the interrupt is in a queue it should have P set.
1330 * We warn so that gets reported. A backtrace isn't useful
1331 * so no need to use a WARN_ON.
1333 if (!state->saved_p)
1334 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1337 state->in_queue = true;
1340 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1341 struct kvmppc_xive_src_block *sb,
1344 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1349 /* Mask and save state, this will also sync HW queues */
1350 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1352 /* Transfer P and Q */
1353 state->saved_p = state->old_p;
1354 state->saved_q = state->old_q;
1357 arch_spin_unlock(&sb->lock);
1360 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1361 struct kvmppc_xive_src_block *sb,
1364 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1370 * Lock / exclude EOI (not technically necessary if the
1371 * guest isn't running concurrently. If this becomes a
1372 * performance issue we can probably remove the lock.
1374 xive_lock_for_unmask(sb, state);
1376 /* Restore mask/prio if it wasn't masked */
1377 if (state->saved_scan_prio != MASKED)
1378 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1381 arch_spin_unlock(&sb->lock);
1384 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1387 u32 toggle = q->toggle;
1391 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1393 xive_pre_save_set_queued(xive, irq);
1397 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1399 struct kvm_vcpu *vcpu = NULL;
1403 * See comment in xive_get_source() about how this
1404 * work. Collect a stable state for all interrupts
1406 for (i = 0; i <= xive->max_sbid; i++) {
1407 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1410 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1411 xive_pre_save_mask_irq(xive, sb, j);
1414 /* Then scan the queues and update the "in_queue" flag */
1415 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1416 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1419 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1420 if (xc->queues[j].qpage)
1421 xive_pre_save_queue(xive, &xc->queues[j]);
1425 /* Finally restore interrupt states */
1426 for (i = 0; i <= xive->max_sbid; i++) {
1427 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1430 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1431 xive_pre_save_unmask_irq(xive, sb, j);
1435 static void xive_post_save_scan(struct kvmppc_xive *xive)
1439 /* Clear all the in_queue flags */
1440 for (i = 0; i <= xive->max_sbid; i++) {
1441 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1444 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1445 sb->irq_state[j].in_queue = false;
1448 /* Next get_source() will do a new scan */
1449 xive->saved_src_count = 0;
1453 * This returns the source configuration and state to user space.
1455 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1457 struct kvmppc_xive_src_block *sb;
1458 struct kvmppc_xive_irq_state *state;
1459 u64 __user *ubufp = (u64 __user *) addr;
1463 sb = kvmppc_xive_find_source(xive, irq, &idx);
1467 state = &sb->irq_state[idx];
1472 pr_devel("get_source(%ld)...\n", irq);
1475 * So to properly save the state into something that looks like a
1476 * XICS migration stream we cannot treat interrupts individually.
1478 * We need, instead, mask them all (& save their previous PQ state)
1479 * to get a stable state in the HW, then sync them to ensure that
1480 * any interrupt that had already fired hits its queue, and finally
1481 * scan all the queues to collect which interrupts are still present
1482 * in the queues, so we can set the "pending" flag on them and
1483 * they can be resent on restore.
1485 * So we do it all when the "first" interrupt gets saved, all the
1486 * state is collected at that point, the rest of xive_get_source()
1487 * will merely collect and convert that state to the expected
1488 * userspace bit mask.
1490 if (xive->saved_src_count == 0)
1491 xive_pre_save_scan(xive);
1492 xive->saved_src_count++;
1494 /* Convert saved state into something compatible with xics */
1495 val = state->act_server;
1496 prio = state->saved_scan_prio;
1498 if (prio == MASKED) {
1499 val |= KVM_XICS_MASKED;
1500 prio = state->saved_priority;
1502 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1504 val |= KVM_XICS_LEVEL_SENSITIVE;
1506 val |= KVM_XICS_PENDING;
1509 val |= KVM_XICS_PRESENTED;
1512 val |= KVM_XICS_QUEUED;
1515 * We mark it pending (which will attempt a re-delivery)
1516 * if we are in a queue *or* we were masked and had
1517 * Q set which is equivalent to the XICS "masked pending"
1520 if (state->in_queue || (prio == MASKED && state->saved_q))
1521 val |= KVM_XICS_PENDING;
1525 * If that was the last interrupt saved, reset the
1528 if (xive->saved_src_count == xive->src_count)
1529 xive_post_save_scan(xive);
1531 /* Copy the result to userspace */
1532 if (put_user(val, ubufp))
1538 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1539 struct kvmppc_xive *xive, int irq)
1541 struct kvmppc_xive_src_block *sb;
1544 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1546 mutex_lock(&xive->lock);
1548 /* block already exists - somebody else got here first */
1549 if (xive->src_blocks[bid])
1552 /* Create the ICS */
1553 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1559 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1560 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1561 sb->irq_state[i].eisn = 0;
1562 sb->irq_state[i].guest_priority = MASKED;
1563 sb->irq_state[i].saved_priority = MASKED;
1564 sb->irq_state[i].act_priority = MASKED;
1567 xive->src_blocks[bid] = sb;
1569 if (bid > xive->max_sbid)
1570 xive->max_sbid = bid;
1573 mutex_unlock(&xive->lock);
1574 return xive->src_blocks[bid];
1577 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1579 struct kvm *kvm = xive->kvm;
1580 struct kvm_vcpu *vcpu = NULL;
1583 kvm_for_each_vcpu(i, vcpu, kvm) {
1584 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1589 if (xc->delayed_irq == irq) {
1590 xc->delayed_irq = 0;
1591 xive->delayed_irqs--;
1598 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1600 struct kvmppc_xive_src_block *sb;
1601 struct kvmppc_xive_irq_state *state;
1602 u64 __user *ubufp = (u64 __user *) addr;
1605 u8 act_prio, guest_prio;
1609 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1612 pr_devel("set_source(irq=0x%lx)\n", irq);
1614 /* Find the source */
1615 sb = kvmppc_xive_find_source(xive, irq, &idx);
1617 pr_devel("No source, creating source block...\n");
1618 sb = kvmppc_xive_create_src_block(xive, irq);
1620 pr_devel("Failed to create block...\n");
1624 state = &sb->irq_state[idx];
1626 /* Read user passed data */
1627 if (get_user(val, ubufp)) {
1628 pr_devel("fault getting user info !\n");
1632 server = val & KVM_XICS_DESTINATION_MASK;
1633 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1635 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1636 val, server, guest_prio);
1639 * If the source doesn't already have an IPI, allocate
1640 * one and get the corresponding data
1642 if (!state->ipi_number) {
1643 state->ipi_number = xive_native_alloc_irq();
1644 if (state->ipi_number == 0) {
1645 pr_devel("Failed to allocate IPI !\n");
1648 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1649 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1653 * We use lock_and_mask() to set us in the right masked
1654 * state. We will override that state from the saved state
1655 * further down, but this will handle the cases of interrupts
1656 * that need FW masking. We set the initial guest_priority to
1657 * 0 before calling it to ensure it actually performs the masking.
1659 state->guest_priority = 0;
1660 xive_lock_and_mask(xive, sb, state);
1663 * Now, we select a target if we have one. If we don't we
1664 * leave the interrupt untargetted. It means that an interrupt
1665 * can become "untargetted" accross migration if it was masked
1666 * by set_xive() but there is little we can do about it.
1669 /* First convert prio and mark interrupt as untargetted */
1670 act_prio = xive_prio_from_guest(guest_prio);
1671 state->act_priority = MASKED;
1674 * We need to drop the lock due to the mutex below. Hopefully
1675 * nothing is touching that interrupt yet since it hasn't been
1676 * advertized to a running guest yet
1678 arch_spin_unlock(&sb->lock);
1680 /* If we have a priority target the interrupt */
1681 if (act_prio != MASKED) {
1682 /* First, check provisioning of queues */
1683 mutex_lock(&xive->lock);
1684 rc = xive_check_provisioning(xive->kvm, act_prio);
1685 mutex_unlock(&xive->lock);
1687 /* Target interrupt */
1689 rc = xive_target_interrupt(xive->kvm, state,
1692 * If provisioning or targetting failed, leave it
1693 * alone and masked. It will remain disabled until
1694 * the guest re-targets it.
1699 * Find out if this was a delayed irq stashed in an ICP,
1700 * in which case, treat it as pending
1702 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1703 val |= KVM_XICS_PENDING;
1704 pr_devel(" Found delayed ! forcing PENDING !\n");
1707 /* Cleanup the SW state */
1708 state->old_p = false;
1709 state->old_q = false;
1711 state->asserted = false;
1713 /* Restore LSI state */
1714 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1716 if (val & KVM_XICS_PENDING)
1717 state->asserted = true;
1718 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1722 * Restore P and Q. If the interrupt was pending, we
1723 * force Q and !P, which will trigger a resend.
1725 * That means that a guest that had both an interrupt
1726 * pending (queued) and Q set will restore with only
1727 * one instance of that interrupt instead of 2, but that
1728 * is perfectly fine as coalescing interrupts that haven't
1729 * been presented yet is always allowed.
1731 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1732 state->old_p = true;
1733 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1734 state->old_q = true;
1736 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1739 * If the interrupt was unmasked, update guest priority and
1740 * perform the appropriate state transition and do a
1741 * re-trigger if necessary.
1743 if (val & KVM_XICS_MASKED) {
1744 pr_devel(" masked, saving prio\n");
1745 state->guest_priority = MASKED;
1746 state->saved_priority = guest_prio;
1748 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1749 xive_finish_unmask(xive, sb, state, guest_prio);
1750 state->saved_priority = guest_prio;
1753 /* Increment the number of valid sources and mark this one valid */
1756 state->valid = true;
1761 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1764 struct kvmppc_xive *xive = kvm->arch.xive;
1765 struct kvmppc_xive_src_block *sb;
1766 struct kvmppc_xive_irq_state *state;
1772 sb = kvmppc_xive_find_source(xive, irq, &idx);
1776 /* Perform locklessly .... (we need to do some RCUisms here...) */
1777 state = &sb->irq_state[idx];
1781 /* We don't allow a trigger on a passed-through interrupt */
1782 if (state->pt_number)
1785 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1786 state->asserted = 1;
1787 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1788 state->asserted = 0;
1792 /* Trigger the IPI */
1793 xive_irq_trigger(&state->ipi_data);
1798 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1800 struct kvmppc_xive *xive = dev->private;
1802 /* We honor the existing XICS ioctl */
1803 switch (attr->group) {
1804 case KVM_DEV_XICS_GRP_SOURCES:
1805 return xive_set_source(xive, attr->attr, attr->addr);
1810 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1812 struct kvmppc_xive *xive = dev->private;
1814 /* We honor the existing XICS ioctl */
1815 switch (attr->group) {
1816 case KVM_DEV_XICS_GRP_SOURCES:
1817 return xive_get_source(xive, attr->attr, attr->addr);
1822 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1824 /* We honor the same limits as XICS, at least for now */
1825 switch (attr->group) {
1826 case KVM_DEV_XICS_GRP_SOURCES:
1827 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1828 attr->attr < KVMPPC_XICS_NR_IRQS)
1835 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1837 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1838 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1841 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1845 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1846 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1851 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1852 xive_cleanup_irq_data(&state->ipi_data);
1853 xive_native_free_irq(state->ipi_number);
1855 /* Pass-through, cleanup too but keep IRQ hw data */
1856 if (state->pt_number)
1857 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1859 state->valid = false;
1864 * Called when device fd is closed. kvm->lock is held.
1866 static void kvmppc_xive_release(struct kvm_device *dev)
1868 struct kvmppc_xive *xive = dev->private;
1869 struct kvm *kvm = xive->kvm;
1870 struct kvm_vcpu *vcpu;
1873 pr_devel("Releasing xive device\n");
1876 * Since this is the device release function, we know that
1877 * userspace does not have any open fd referring to the
1878 * device. Therefore there can not be any of the device
1879 * attribute set/get functions being executed concurrently,
1880 * and similarly, the connect_vcpu and set/clr_mapped
1881 * functions also cannot be being executed.
1884 debugfs_remove(xive->dentry);
1887 * We should clean up the vCPU interrupt presenters first.
1889 kvm_for_each_vcpu(i, vcpu, kvm) {
1891 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1892 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
1893 * Holding the vcpu->mutex also means that the vcpu cannot
1894 * be executing the KVM_RUN ioctl, and therefore it cannot
1895 * be executing the XIVE push or pull code or accessing
1896 * the XIVE MMIO regions.
1898 mutex_lock(&vcpu->mutex);
1899 kvmppc_xive_cleanup_vcpu(vcpu);
1900 mutex_unlock(&vcpu->mutex);
1904 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1905 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1906 * against xive code getting called during vcpu execution or
1907 * set/get one_reg operations.
1909 kvm->arch.xive = NULL;
1911 /* Mask and free interrupts */
1912 for (i = 0; i <= xive->max_sbid; i++) {
1913 if (xive->src_blocks[i])
1914 kvmppc_xive_free_sources(xive->src_blocks[i]);
1915 kfree(xive->src_blocks[i]);
1916 xive->src_blocks[i] = NULL;
1919 if (xive->vp_base != XIVE_INVALID_VP)
1920 xive_native_free_vp_block(xive->vp_base);
1923 * A reference of the kvmppc_xive pointer is now kept under
1924 * the xive_devices struct of the machine for reuse. It is
1925 * freed when the VM is destroyed for now until we fix all the
1933 * When the guest chooses the interrupt mode (XICS legacy or XIVE
1934 * native), the VM will switch of KVM device. The previous device will
1935 * be "released" before the new one is created.
1937 * Until we are sure all execution paths are well protected, provide a
1938 * fail safe (transitional) method for device destruction, in which
1939 * the XIVE device pointer is recycled and not directly freed.
1941 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
1943 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
1944 &kvm->arch.xive_devices.native :
1945 &kvm->arch.xive_devices.xics_on_xive;
1946 struct kvmppc_xive *xive = *kvm_xive_device;
1949 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1950 *kvm_xive_device = xive;
1952 memset(xive, 0, sizeof(*xive));
1959 * Create a XICS device with XIVE backend. kvm->lock is held.
1961 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1963 struct kvmppc_xive *xive;
1964 struct kvm *kvm = dev->kvm;
1967 pr_devel("Creating xive for partition\n");
1969 xive = kvmppc_xive_get_device(kvm, type);
1973 dev->private = xive;
1976 mutex_init(&xive->lock);
1978 /* Already there ? */
1982 kvm->arch.xive = xive;
1984 /* We use the default queue size set by the host */
1985 xive->q_order = xive_native_default_eq_shift();
1986 if (xive->q_order < PAGE_SHIFT)
1987 xive->q_page_order = 0;
1989 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1991 /* Allocate a bunch of VPs */
1992 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1993 pr_devel("VP_Base=%x\n", xive->vp_base);
1995 if (xive->vp_base == XIVE_INVALID_VP)
1998 xive->single_escalation = xive_native_has_single_escalation();
2006 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2008 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2011 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2012 struct xive_q *q = &xc->queues[i];
2015 if (!q->qpage && !xc->esc_virq[i])
2018 seq_printf(m, " [q%d]: ", i);
2022 i0 = be32_to_cpup(q->qpage + idx);
2023 idx = (idx + 1) & q->msk;
2024 i1 = be32_to_cpup(q->qpage + idx);
2025 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2028 if (xc->esc_virq[i]) {
2029 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2030 struct xive_irq_data *xd =
2031 irq_data_get_irq_handler_data(d);
2032 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2034 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2035 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2036 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2037 xc->esc_virq[i], pq, xd->eoi_page);
2044 static int xive_debug_show(struct seq_file *m, void *private)
2046 struct kvmppc_xive *xive = m->private;
2047 struct kvm *kvm = xive->kvm;
2048 struct kvm_vcpu *vcpu;
2049 u64 t_rm_h_xirr = 0;
2050 u64 t_rm_h_ipoll = 0;
2051 u64 t_rm_h_cppr = 0;
2054 u64 t_vm_h_xirr = 0;
2055 u64 t_vm_h_ipoll = 0;
2056 u64 t_vm_h_cppr = 0;
2064 seq_printf(m, "=========\nVCPU state\n=========\n");
2066 kvm_for_each_vcpu(i, vcpu, kvm) {
2067 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2072 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
2073 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2074 xc->server_num, xc->cppr, xc->hw_cppr,
2075 xc->mfrr, xc->pending,
2076 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2078 kvmppc_xive_debug_show_queues(m, vcpu);
2080 t_rm_h_xirr += xc->stat_rm_h_xirr;
2081 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2082 t_rm_h_cppr += xc->stat_rm_h_cppr;
2083 t_rm_h_eoi += xc->stat_rm_h_eoi;
2084 t_rm_h_ipi += xc->stat_rm_h_ipi;
2085 t_vm_h_xirr += xc->stat_vm_h_xirr;
2086 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2087 t_vm_h_cppr += xc->stat_vm_h_cppr;
2088 t_vm_h_eoi += xc->stat_vm_h_eoi;
2089 t_vm_h_ipi += xc->stat_vm_h_ipi;
2092 seq_printf(m, "Hcalls totals\n");
2093 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2094 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2095 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2096 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2097 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2102 DEFINE_SHOW_ATTRIBUTE(xive_debug);
2104 static void xive_debugfs_init(struct kvmppc_xive *xive)
2108 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2110 pr_err("%s: no memory for name\n", __func__);
2114 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2115 xive, &xive_debug_fops);
2117 pr_debug("%s: created %s\n", __func__, name);
2121 static void kvmppc_xive_init(struct kvm_device *dev)
2123 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2125 /* Register some debug interfaces */
2126 xive_debugfs_init(xive);
2129 struct kvm_device_ops kvm_xive_ops = {
2131 .create = kvmppc_xive_create,
2132 .init = kvmppc_xive_init,
2133 .release = kvmppc_xive_release,
2134 .set_attr = xive_set_attr,
2135 .get_attr = xive_get_attr,
2136 .has_attr = xive_has_attr,
2139 void kvmppc_xive_init_module(void)
2141 __xive_vm_h_xirr = xive_vm_h_xirr;
2142 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2143 __xive_vm_h_ipi = xive_vm_h_ipi;
2144 __xive_vm_h_cppr = xive_vm_h_cppr;
2145 __xive_vm_h_eoi = xive_vm_h_eoi;
2148 void kvmppc_xive_exit_module(void)
2150 __xive_vm_h_xirr = NULL;
2151 __xive_vm_h_ipoll = NULL;
2152 __xive_vm_h_ipi = NULL;
2153 __xive_vm_h_cppr = NULL;
2154 __xive_vm_h_eoi = NULL;