2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 /* File to be included by other .c files */
11 #define XGLUE(a,b) a##b
12 #define GLUE(a,b) XGLUE(a,b)
14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
20 * Ensure any previous store to CPPR is ordered vs.
21 * the subsequent loads from PIPR or ACK.
26 * DD1 bug workaround: If PIPR is less favored than CPPR
27 * ignore the interrupt or we might incorrectly lose an IPB
30 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
31 u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
32 if (pipr >= xc->hw_cppr)
36 /* Perform the acknowledge OS to register cycle. */
37 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
39 /* Synchronize subsequent queue accesses */
42 /* XXX Check grouping level */
45 if (!((ack >> 8) & TM_QW1_NSR_EO))
48 /* Grab CPPR of the most favored pending interrupt */
51 xc->pending |= 1 << cppr;
53 #ifdef XIVE_RUNTIME_CHECKS
54 /* Check consistency */
55 if (cppr >= xc->hw_cppr)
56 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
57 smp_processor_id(), cppr, xc->hw_cppr);
61 * Update our image of the HW CPPR. We don't yet modify
62 * xc->cppr, this will be done as we scan for interrupts
68 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
72 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
73 offset |= offset << 4;
75 val =__x_readq(__x_eoi_page(xd) + offset);
76 #ifdef __LITTLE_ENDIAN__
83 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
85 /* If the XIVE supports the new "store EOI facility, use it */
86 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
87 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
88 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
94 * Otherwise for EOI, we use the special MMIO that does
95 * a clear of both P and Q and returns the old Q,
96 * except for LSIs where we use the "EOI cycle" special
99 * This allows us to then do a re-trigger if Q was set
100 * rather than synthetizing an interrupt in software
102 * For LSIs, using the HW EOI cycle works around a problem
103 * on P9 DD1 PHBs where the other ESB accesses don't work
106 if (xd->flags & XIVE_IRQ_FLAG_LSI)
107 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
109 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
111 /* Re-trigger if needed */
112 if ((eoi_val & 1) && __x_trig_page(xd))
113 __x_writeq(0, __x_trig_page(xd));
124 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
125 u8 pending, int scan_type)
130 /* Find highest pending priority */
131 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
137 * If pending is 0 this will return 0xff which is what
140 prio = ffs(pending) - 1;
143 * If the most favoured prio we found pending is less
144 * favored (or equal) than a pending IPI, we return
147 * Note: If pending was 0 and mfrr is 0xff, we will
148 * not spurriously take an IPI because mfrr cannot
149 * then be smaller than cppr.
151 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
157 /* Don't scan past the guest cppr */
158 if (prio >= xc->cppr || prio > 7)
161 /* Grab queue and pointers */
162 q = &xc->queues[prio];
167 * Snapshot the queue page. The test further down for EOI
168 * must use the same "copy" that was used by __xive_read_eq
169 * since qpage can be set concurrently and we don't want
172 qpage = READ_ONCE(q->qpage);
176 * Try to fetch from the queue. Will return 0 for a
177 * non-queueing priority (ie, qpage = 0).
179 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
182 * If this was a signal for an MFFR change done by
183 * H_IPI we skip it. Additionally, if we were fetching
184 * we EOI it now, thus re-enabling reception of a new
187 * We also need to do that if prio is 0 and we had no
188 * page for the queue. In this case, we have non-queued
189 * IPI that needs to be EOId.
191 * This is safe because if we have another pending MFRR
192 * change that wasn't observed above, the Q bit will have
193 * been set and another occurrence of the IPI will trigger.
195 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
196 if (scan_type == scan_fetch)
197 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
199 /* Loop back on same queue with updated idx/toggle */
200 #ifdef XIVE_RUNTIME_CHECKS
201 WARN_ON(hirq && hirq != XICS_IPI);
207 /* If fetching, update queue pointers */
208 if (scan_type == scan_fetch) {
213 /* Something found, stop searching */
217 /* Clear the pending bit on the now empty queue */
218 pending &= ~(1 << prio);
221 * Check if the queue count needs adjusting due to
222 * interrupts being moved away.
224 if (atomic_read(&q->pending_count)) {
225 int p = atomic_xchg(&q->pending_count, 0);
227 #ifdef XIVE_RUNTIME_CHECKS
228 WARN_ON(p > atomic_read(&q->count));
230 atomic_sub(p, &q->count);
235 /* If we are just taking a "peek", do nothing else */
236 if (scan_type == scan_poll)
239 /* Update the pending bits */
240 xc->pending = pending;
243 * If this is an EOI that's it, no CPPR adjustment done here,
244 * all we needed was cleanup the stale pending bits and check
245 * if there's anything left.
247 if (scan_type == scan_eoi)
251 * If we found an interrupt, adjust what the guest CPPR should
252 * be as if we had just fetched that interrupt from HW.
254 * Note: This can only make xc->cppr smaller as the previous
255 * loop will only exit with hirq != 0 if prio is lower than
256 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
262 * If it was an IPI the HW CPPR might have been lowered too much
263 * as the HW interrupt we use for IPIs is routed to priority 0.
265 * We re-sync it here.
267 if (xc->cppr != xc->hw_cppr) {
268 xc->hw_cppr = xc->cppr;
269 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
275 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
277 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
281 pr_devel("H_XIRR\n");
283 xc->GLUE(X_STAT_PFX,h_xirr)++;
285 /* First collect pending bits from HW */
286 GLUE(X_PFX,ack_pending)(xc);
289 * Cleanup the old-style bits if needed (they may have been
290 * set by pull or an escalation interrupts).
292 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
293 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
294 &vcpu->arch.pending_exceptions);
296 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
297 xc->pending, xc->hw_cppr, xc->cppr);
299 /* Grab previous CPPR and reverse map it */
300 old_cppr = xive_prio_to_guest(xc->cppr);
302 /* Scan for actual interrupts */
303 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
305 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
306 hirq, xc->hw_cppr, xc->cppr);
308 #ifdef XIVE_RUNTIME_CHECKS
309 /* That should never hit */
310 if (hirq & 0xff000000)
311 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
315 * XXX We could check if the interrupt is masked here and
316 * filter it. If we chose to do so, we would need to do:
328 /* Return interrupt and old CPPR in GPR4 */
329 vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
334 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
336 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
337 u8 pending = xc->pending;
341 pr_devel("H_IPOLL(server=%ld)\n", server);
343 xc->GLUE(X_STAT_PFX,h_ipoll)++;
345 /* Grab the target VCPU if not the current one */
346 if (xc->server_num != server) {
347 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
350 xc = vcpu->arch.xive_vcpu;
352 /* Scan all priorities */
355 /* Grab pending interrupt if any */
356 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
358 pending |= 1 << pipr;
361 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
363 /* Return interrupt and old CPPR in GPR4 */
364 vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
369 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
373 pending = xc->pending;
374 if (xc->mfrr != 0xff) {
376 pending |= 1 << xc->mfrr;
382 prio = ffs(pending) - 1;
384 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
387 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
389 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
392 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
394 xc->GLUE(X_STAT_PFX,h_cppr)++;
397 cppr = xive_prio_from_guest(cppr);
399 /* Remember old and update SW state */
404 * Order the above update of xc->cppr with the subsequent
405 * read of xc->mfrr inside push_pending_to_hw()
410 * We are masking less, we need to look for pending things
411 * to deliver and set VP pending bits accordingly to trigger
412 * a new interrupt otherwise we might miss MFRR changes for
413 * which we have optimized out sending an IPI signal.
416 GLUE(X_PFX,push_pending_to_hw)(xc);
420 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
425 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
427 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
428 struct kvmppc_xive_src_block *sb;
429 struct kvmppc_xive_irq_state *state;
430 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
431 struct xive_irq_data *xd;
432 u8 new_cppr = xirr >> 24;
433 u32 irq = xirr & 0x00ffffff, hw_num;
437 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
439 xc->GLUE(X_STAT_PFX,h_eoi)++;
441 xc->cppr = xive_prio_from_guest(new_cppr);
444 * IPIs are synthetized from MFRR and thus don't need
445 * any special EOI handling. The underlying interrupt
446 * used to signal MFRR changes is EOId when fetched from
449 if (irq == XICS_IPI || irq == 0) {
451 * This barrier orders the setting of xc->cppr vs.
452 * subsquent test of xc->mfrr done inside
453 * scan_interrupts and push_pending_to_hw
459 /* Find interrupt source */
460 sb = kvmppc_xive_find_source(xive, irq, &src);
462 pr_devel(" source not found !\n");
468 state = &sb->irq_state[src];
469 kvmppc_xive_select_irq(state, &hw_num, &xd);
471 state->in_eoi = true;
474 * This barrier orders both setting of in_eoi above vs,
475 * subsequent test of guest_priority, and the setting
476 * of xc->cppr vs. subsquent test of xc->mfrr done inside
477 * scan_interrupts and push_pending_to_hw
482 if (state->guest_priority == MASKED) {
483 arch_spin_lock(&sb->lock);
484 if (state->guest_priority != MASKED) {
485 arch_spin_unlock(&sb->lock);
488 pr_devel(" EOI on saved P...\n");
490 /* Clear old_p, that will cause unmask to perform an EOI */
491 state->old_p = false;
493 arch_spin_unlock(&sb->lock);
495 pr_devel(" EOI on source...\n");
497 /* Perform EOI on the source */
498 GLUE(X_PFX,source_eoi)(hw_num, xd);
500 /* If it's an emulated LSI, check level and resend */
501 if (state->lsi && state->asserted)
502 __x_writeq(0, __x_trig_page(xd));
507 * This barrier orders the above guest_priority check
508 * and spin_lock/unlock with clearing in_eoi below.
510 * It also has to be a full mb() as it must ensure
511 * the MMIOs done in source_eoi() are completed before
512 * state->in_eoi is visible.
515 state->in_eoi = false;
518 /* Re-evaluate pending IRQs and update HW */
519 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
520 GLUE(X_PFX,push_pending_to_hw)(xc);
521 pr_devel(" after scan pending=%02x\n", xc->pending);
524 xc->hw_cppr = xc->cppr;
525 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
530 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
533 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
535 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
537 xc->GLUE(X_STAT_PFX,h_ipi)++;
540 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
543 xc = vcpu->arch.xive_vcpu;
545 /* Locklessly write over MFRR */
549 * The load of xc->cppr below and the subsequent MMIO store
550 * to the IPI must happen after the above mfrr update is
551 * globally visible so that:
553 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
554 * updating xc->cppr then reading xc->mfrr.
556 * - The target of the IPI sees the xc->mfrr update
560 /* Shoot the IPI if most favored than target cppr */
562 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));