2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 /* File to be included by other .c files */
11 #define XGLUE(a,b) a##b
12 #define GLUE(a,b) XGLUE(a,b)
14 /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
17 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
23 * Ensure any previous store to CPPR is ordered vs.
24 * the subsequent loads from PIPR or ACK.
29 * DD1 bug workaround: If PIPR is less favored than CPPR
30 * ignore the interrupt or we might incorrectly lose an IPB
33 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
34 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
35 u8 pipr = be64_to_cpu(qw1) & 0xff;
36 if (pipr >= xc->hw_cppr)
40 /* Perform the acknowledge OS to register cycle. */
41 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
43 /* Synchronize subsequent queue accesses */
46 /* XXX Check grouping level */
49 if (!((ack >> 8) & TM_QW1_NSR_EO))
52 /* Grab CPPR of the most favored pending interrupt */
55 xc->pending |= 1 << cppr;
57 #ifdef XIVE_RUNTIME_CHECKS
58 /* Check consistency */
59 if (cppr >= xc->hw_cppr)
60 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
61 smp_processor_id(), cppr, xc->hw_cppr);
65 * Update our image of the HW CPPR. We don't yet modify
66 * xc->cppr, this will be done as we scan for interrupts
72 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
76 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
77 offset |= offset << 4;
79 val =__x_readq(__x_eoi_page(xd) + offset);
80 #ifdef __LITTLE_ENDIAN__
87 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
89 /* If the XIVE supports the new "store EOI facility, use it */
90 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
91 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
92 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
98 * Otherwise for EOI, we use the special MMIO that does
99 * a clear of both P and Q and returns the old Q,
100 * except for LSIs where we use the "EOI cycle" special
103 * This allows us to then do a re-trigger if Q was set
104 * rather than synthetizing an interrupt in software
106 * For LSIs, using the HW EOI cycle works around a problem
107 * on P9 DD1 PHBs where the other ESB accesses don't work
110 if (xd->flags & XIVE_IRQ_FLAG_LSI)
111 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
113 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
115 /* Re-trigger if needed */
116 if ((eoi_val & 1) && __x_trig_page(xd))
117 __x_writeq(0, __x_trig_page(xd));
128 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
129 u8 pending, int scan_type)
134 /* Find highest pending priority */
135 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
141 * If pending is 0 this will return 0xff which is what
144 prio = ffs(pending) - 1;
147 * If the most favoured prio we found pending is less
148 * favored (or equal) than a pending IPI, we return
151 * Note: If pending was 0 and mfrr is 0xff, we will
152 * not spurriously take an IPI because mfrr cannot
153 * then be smaller than cppr.
155 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
161 /* Don't scan past the guest cppr */
162 if (prio >= xc->cppr || prio > 7)
165 /* Grab queue and pointers */
166 q = &xc->queues[prio];
171 * Snapshot the queue page. The test further down for EOI
172 * must use the same "copy" that was used by __xive_read_eq
173 * since qpage can be set concurrently and we don't want
176 qpage = READ_ONCE(q->qpage);
180 * Try to fetch from the queue. Will return 0 for a
181 * non-queueing priority (ie, qpage = 0).
183 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
186 * If this was a signal for an MFFR change done by
187 * H_IPI we skip it. Additionally, if we were fetching
188 * we EOI it now, thus re-enabling reception of a new
191 * We also need to do that if prio is 0 and we had no
192 * page for the queue. In this case, we have non-queued
193 * IPI that needs to be EOId.
195 * This is safe because if we have another pending MFRR
196 * change that wasn't observed above, the Q bit will have
197 * been set and another occurrence of the IPI will trigger.
199 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
200 if (scan_type == scan_fetch)
201 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
203 /* Loop back on same queue with updated idx/toggle */
204 #ifdef XIVE_RUNTIME_CHECKS
205 WARN_ON(hirq && hirq != XICS_IPI);
211 /* If it's the dummy interrupt, continue searching */
212 if (hirq == XICS_DUMMY)
215 /* If fetching, update queue pointers */
216 if (scan_type == scan_fetch) {
221 /* Something found, stop searching */
225 /* Clear the pending bit on the now empty queue */
226 pending &= ~(1 << prio);
229 * Check if the queue count needs adjusting due to
230 * interrupts being moved away.
232 if (atomic_read(&q->pending_count)) {
233 int p = atomic_xchg(&q->pending_count, 0);
235 #ifdef XIVE_RUNTIME_CHECKS
236 WARN_ON(p > atomic_read(&q->count));
238 atomic_sub(p, &q->count);
243 /* If we are just taking a "peek", do nothing else */
244 if (scan_type == scan_poll)
247 /* Update the pending bits */
248 xc->pending = pending;
251 * If this is an EOI that's it, no CPPR adjustment done here,
252 * all we needed was cleanup the stale pending bits and check
253 * if there's anything left.
255 if (scan_type == scan_eoi)
259 * If we found an interrupt, adjust what the guest CPPR should
260 * be as if we had just fetched that interrupt from HW.
262 * Note: This can only make xc->cppr smaller as the previous
263 * loop will only exit with hirq != 0 if prio is lower than
264 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
270 * If it was an IPI the HW CPPR might have been lowered too much
271 * as the HW interrupt we use for IPIs is routed to priority 0.
273 * We re-sync it here.
275 if (xc->cppr != xc->hw_cppr) {
276 xc->hw_cppr = xc->cppr;
277 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
283 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
285 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
289 pr_devel("H_XIRR\n");
291 xc->GLUE(X_STAT_PFX,h_xirr)++;
293 /* First collect pending bits from HW */
294 GLUE(X_PFX,ack_pending)(xc);
297 * Cleanup the old-style bits if needed (they may have been
298 * set by pull or an escalation interrupts).
300 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
301 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
302 &vcpu->arch.pending_exceptions);
304 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
305 xc->pending, xc->hw_cppr, xc->cppr);
307 /* Grab previous CPPR and reverse map it */
308 old_cppr = xive_prio_to_guest(xc->cppr);
310 /* Scan for actual interrupts */
311 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
313 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
314 hirq, xc->hw_cppr, xc->cppr);
316 #ifdef XIVE_RUNTIME_CHECKS
317 /* That should never hit */
318 if (hirq & 0xff000000)
319 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
323 * XXX We could check if the interrupt is masked here and
324 * filter it. If we chose to do so, we would need to do:
336 /* Return interrupt and old CPPR in GPR4 */
337 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
342 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
344 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
345 u8 pending = xc->pending;
348 pr_devel("H_IPOLL(server=%ld)\n", server);
350 xc->GLUE(X_STAT_PFX,h_ipoll)++;
352 /* Grab the target VCPU if not the current one */
353 if (xc->server_num != server) {
354 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
357 xc = vcpu->arch.xive_vcpu;
359 /* Scan all priorities */
362 /* Grab pending interrupt if any */
363 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
364 u8 pipr = be64_to_cpu(qw1) & 0xff;
366 pending |= 1 << pipr;
369 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
371 /* Return interrupt and old CPPR in GPR4 */
372 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
377 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
381 pending = xc->pending;
382 if (xc->mfrr != 0xff) {
384 pending |= 1 << xc->mfrr;
390 prio = ffs(pending) - 1;
392 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
395 static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
396 struct kvmppc_xive_vcpu *xc)
400 /* For each priority that is now masked */
401 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
402 struct xive_q *q = &xc->queues[prio];
403 struct kvmppc_xive_irq_state *state;
404 struct kvmppc_xive_src_block *sb;
405 u32 idx, toggle, entry, irq, hw_num;
406 struct xive_irq_data *xd;
412 qpage = READ_ONCE(q->qpage);
416 /* For each interrupt in the queue */
418 entry = be32_to_cpup(qpage + idx);
421 if ((entry >> 31) == toggle)
423 irq = entry & 0x7fffffff;
425 /* Skip dummies and IPIs */
426 if (irq == XICS_DUMMY || irq == XICS_IPI)
428 sb = kvmppc_xive_find_source(xive, irq, &src);
431 state = &sb->irq_state[src];
433 /* Has it been rerouted ? */
434 if (xc->server_num == state->act_server)
438 * Allright, it *has* been re-routed, kill it from
441 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
443 /* Find the HW interrupt */
444 kvmppc_xive_select_irq(state, &hw_num, &xd);
446 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
447 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
448 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
451 GLUE(X_PFX,source_eoi)(hw_num, xd);
454 idx = (idx + 1) & q->msk;
461 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
463 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
464 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
467 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
469 xc->GLUE(X_STAT_PFX,h_cppr)++;
472 cppr = xive_prio_from_guest(cppr);
474 /* Remember old and update SW state */
479 * Order the above update of xc->cppr with the subsequent
480 * read of xc->mfrr inside push_pending_to_hw()
484 if (cppr > old_cppr) {
486 * We are masking less, we need to look for pending things
487 * to deliver and set VP pending bits accordingly to trigger
488 * a new interrupt otherwise we might miss MFRR changes for
489 * which we have optimized out sending an IPI signal.
491 GLUE(X_PFX,push_pending_to_hw)(xc);
494 * We are masking more, we need to check the queue for any
495 * interrupt that has been routed to another CPU, take
496 * it out (replace it with the dummy) and retrigger it.
498 * This is necessary since those interrupts may otherwise
499 * never be processed, at least not until this CPU restores
502 * This is in theory racy vs. HW adding new interrupts to
503 * the queue. In practice this works because the interesting
504 * cases are when the guest has done a set_xive() to move the
505 * interrupt away, which flushes the xive, followed by the
506 * target CPU doing a H_CPPR. So any new interrupt coming into
507 * the queue must still be routed to us and isn't a source
510 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
515 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
520 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
522 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
523 struct kvmppc_xive_src_block *sb;
524 struct kvmppc_xive_irq_state *state;
525 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
526 struct xive_irq_data *xd;
527 u8 new_cppr = xirr >> 24;
528 u32 irq = xirr & 0x00ffffff, hw_num;
532 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
534 xc->GLUE(X_STAT_PFX,h_eoi)++;
536 xc->cppr = xive_prio_from_guest(new_cppr);
539 * IPIs are synthetized from MFRR and thus don't need
540 * any special EOI handling. The underlying interrupt
541 * used to signal MFRR changes is EOId when fetched from
544 if (irq == XICS_IPI || irq == 0) {
546 * This barrier orders the setting of xc->cppr vs.
547 * subsquent test of xc->mfrr done inside
548 * scan_interrupts and push_pending_to_hw
554 /* Find interrupt source */
555 sb = kvmppc_xive_find_source(xive, irq, &src);
557 pr_devel(" source not found !\n");
563 state = &sb->irq_state[src];
564 kvmppc_xive_select_irq(state, &hw_num, &xd);
566 state->in_eoi = true;
569 * This barrier orders both setting of in_eoi above vs,
570 * subsequent test of guest_priority, and the setting
571 * of xc->cppr vs. subsquent test of xc->mfrr done inside
572 * scan_interrupts and push_pending_to_hw
577 if (state->guest_priority == MASKED) {
578 arch_spin_lock(&sb->lock);
579 if (state->guest_priority != MASKED) {
580 arch_spin_unlock(&sb->lock);
583 pr_devel(" EOI on saved P...\n");
585 /* Clear old_p, that will cause unmask to perform an EOI */
586 state->old_p = false;
588 arch_spin_unlock(&sb->lock);
590 pr_devel(" EOI on source...\n");
592 /* Perform EOI on the source */
593 GLUE(X_PFX,source_eoi)(hw_num, xd);
595 /* If it's an emulated LSI, check level and resend */
596 if (state->lsi && state->asserted)
597 __x_writeq(0, __x_trig_page(xd));
602 * This barrier orders the above guest_priority check
603 * and spin_lock/unlock with clearing in_eoi below.
605 * It also has to be a full mb() as it must ensure
606 * the MMIOs done in source_eoi() are completed before
607 * state->in_eoi is visible.
610 state->in_eoi = false;
613 /* Re-evaluate pending IRQs and update HW */
614 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
615 GLUE(X_PFX,push_pending_to_hw)(xc);
616 pr_devel(" after scan pending=%02x\n", xc->pending);
619 xc->hw_cppr = xc->cppr;
620 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
625 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
628 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
630 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
632 xc->GLUE(X_STAT_PFX,h_ipi)++;
635 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
638 xc = vcpu->arch.xive_vcpu;
640 /* Locklessly write over MFRR */
644 * The load of xc->cppr below and the subsequent MMIO store
645 * to the IPI must happen after the above mfrr update is
646 * globally visible so that:
648 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
649 * updating xc->cppr then reading xc->mfrr.
651 * - The target of the IPI sees the xc->mfrr update
655 /* Shoot the IPI if most favored than target cppr */
657 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));