2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/debugfs.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/seq_file.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/msi.h>
30 #include <asm/machdep.h>
32 #include <asm/errno.h>
34 #include <asm/xive-regs.h>
37 #include "xive-internal.h"
43 #define DBG_VERBOSE(fmt...) pr_devel(fmt)
45 #define DBG_VERBOSE(fmt...) do { } while(0)
49 bool xive_cmdline_disabled;
51 /* We use only one priority for now */
52 static u8 xive_irq_priority;
55 void __iomem *xive_tima;
59 static const struct xive_ops *xive_ops;
61 /* Our global interrupt domain */
62 static struct irq_domain *xive_irq_domain;
65 /* The IPIs all use the same logical irq number */
66 static u32 xive_ipi_irq;
69 /* Xive state for each CPU */
70 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
73 * A "disabled" interrupt should never fire, to catch problems
74 * we set its logical number to this
76 #define XIVE_BAD_IRQ 0x7fffffff
77 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
79 /* An invalid CPU target */
80 #define XIVE_INVALID_TARGET (-1)
83 * Read the next entry in a queue, return its content if it's valid
84 * or 0 if there is no new entry.
86 * The queue pointer is moved forward unless "just_peek" is set
88 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
94 cur = be32_to_cpup(q->qpage + q->idx);
96 /* Check valid bit (31) vs current toggle polarity */
97 if ((cur >> 31) == q->toggle)
100 /* If consuming from the queue ... */
103 q->idx = (q->idx + 1) & q->msk;
105 /* Wrap around: flip valid toggle */
109 /* Mask out the valid bit (31) */
110 return cur & 0x7fffffff;
114 * Scans all the queue that may have interrupts in them
115 * (based on "pending_prio") in priority order until an
116 * interrupt is found or all the queues are empty.
118 * Then updates the CPPR (Current Processor Priority
119 * Register) based on the most favored interrupt found
120 * (0xff if none) and return what was found (0 if none).
122 * If just_peek is set, return the most favored pending
123 * interrupt if any but don't update the queue pointers.
125 * Note: This function can operate generically on any number
126 * of queues (up to 8). The current implementation of the XIVE
127 * driver only uses a single queue however.
129 * Note2: This will also "flush" "the pending_count" of a queue
130 * into the "count" when that queue is observed to be empty.
131 * This is used to keep track of the amount of interrupts
132 * targetting a queue. When an interrupt is moved away from
133 * a queue, we only decrement that queue count once the queue
134 * has been observed empty to avoid races.
136 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
141 /* Find highest pending priority */
142 while (xc->pending_prio != 0) {
145 prio = ffs(xc->pending_prio) - 1;
146 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
149 irq = xive_read_eq(&xc->queue[prio], just_peek);
151 /* Found something ? That's it */
155 /* Clear pending bits */
156 xc->pending_prio &= ~(1 << prio);
159 * Check if the queue count needs adjusting due to
160 * interrupts being moved away. See description of
161 * xive_dec_target_count()
163 q = &xc->queue[prio];
164 if (atomic_read(&q->pending_count)) {
165 int p = atomic_xchg(&q->pending_count, 0);
167 WARN_ON(p > atomic_read(&q->count));
168 atomic_sub(p, &q->count);
173 /* If nothing was found, set CPPR to 0xff */
177 /* Update HW CPPR to match if necessary */
178 if (prio != xc->cppr) {
179 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
181 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
188 * This is used to perform the magic loads from an ESB
189 * described in xive.h
191 static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset)
195 /* Handle HW errata */
196 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
197 offset |= offset << 4;
199 val = in_be64(xd->eoi_mmio + offset);
205 static void xive_dump_eq(const char *name, struct xive_q *q)
212 i0 = be32_to_cpup(q->qpage + idx);
213 idx = (idx + 1) & q->msk;
214 i1 = be32_to_cpup(q->qpage + idx);
215 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
219 void xmon_xive_do_dump(int cpu)
221 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
223 xmon_printf("XIVE state for CPU %d:\n", cpu);
224 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
225 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
228 u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET);
229 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
230 val & XIVE_ESB_VAL_P ? 'P' : 'p',
231 val & XIVE_ESB_VAL_P ? 'Q' : 'q');
235 #endif /* CONFIG_XMON */
237 static unsigned int xive_get_irq(void)
239 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
243 * This can be called either as a result of a HW interrupt or
244 * as a "replay" because EOI decided there was still something
245 * in one of the queues.
247 * First we perform an ACK cycle in order to update our mask
248 * of pending priorities. This will also have the effect of
249 * updating the CPPR to the most favored pending interrupts.
251 * In the future, if we have a way to differenciate a first
252 * entry (on HW interrupt) from a replay triggered by EOI,
253 * we could skip this on replays unless we soft-mask tells us
254 * that a new HW interrupt occurred.
256 xive_ops->update_pending(xc);
258 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
260 /* Scan our queue(s) for interrupts */
261 irq = xive_scan_interrupts(xc, false);
263 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
264 irq, xc->pending_prio);
266 /* Return pending interrupt if any */
267 if (irq == XIVE_BAD_IRQ)
273 * After EOI'ing an interrupt, we need to re-check the queue
274 * to see if another interrupt is pending since multiple
275 * interrupts can coalesce into a single notification to the
278 * If we find that there is indeed more in there, we call
279 * force_external_irq_replay() to make Linux synthetize an
280 * external interrupt on the next call to local_irq_restore().
282 static void xive_do_queue_eoi(struct xive_cpu *xc)
284 if (xive_scan_interrupts(xc, true) != 0) {
285 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
286 force_external_irq_replay();
291 * EOI an interrupt at the source. There are several methods
292 * to do this depending on the HW version and source type
294 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
296 /* If the XIVE supports the new "store EOI facility, use it */
297 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
298 out_be64(xd->eoi_mmio, 0);
299 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
301 * The FW told us to call it. This happens for some
302 * interrupt sources that need additional HW whacking
303 * beyond the ESB manipulation. For example LPC interrupts
304 * on P9 DD1.0 need a latch to be clared in the LPC bridge
305 * itself. The Firmware will take care of it.
307 if (WARN_ON_ONCE(!xive_ops->eoi))
309 xive_ops->eoi(hw_irq);
314 * Otherwise for EOI, we use the special MMIO that does
315 * a clear of both P and Q and returns the old Q,
316 * except for LSIs where we use the "EOI cycle" special
319 * This allows us to then do a re-trigger if Q was set
320 * rather than synthesizing an interrupt in software
322 * For LSIs, using the HW EOI cycle works around a problem
323 * on P9 DD1 PHBs where the other ESB accesses don't work
326 if (xd->flags & XIVE_IRQ_FLAG_LSI)
327 in_be64(xd->eoi_mmio);
329 eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
330 DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val);
332 /* Re-trigger if needed */
333 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
334 out_be64(xd->trig_mmio, 0);
339 /* irq_chip eoi callback */
340 static void xive_irq_eoi(struct irq_data *d)
342 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
343 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
345 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
346 d->irq, irqd_to_hwirq(d), xc->pending_prio);
348 /* EOI the source if it hasn't been disabled */
349 if (!irqd_irq_disabled(d))
350 xive_do_source_eoi(irqd_to_hwirq(d), xd);
353 * Clear saved_p to indicate that it's no longer occupying
354 * a queue slot on the target queue
358 /* Check for more work in the queue */
359 xive_do_queue_eoi(xc);
363 * Helper used to mask and unmask an interrupt source. This
364 * is only called for normal interrupts that do not require
365 * masking/unmasking via firmware.
367 static void xive_do_source_set_mask(struct xive_irq_data *xd,
373 * If the interrupt had P set, it may be in a queue.
375 * We need to make sure we don't re-enable it until it
376 * has been fetched from that queue and EOId. We keep
377 * a copy of that P state and use it to restore the
378 * ESB accordingly on unmask.
381 val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01);
382 xd->saved_p = !!(val & XIVE_ESB_VAL_P);
383 } else if (xd->saved_p)
384 xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
386 xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
390 * Try to chose "cpu" as a new interrupt target. Increments
391 * the queue accounting for that target if it's not already
394 static bool xive_try_pick_target(int cpu)
396 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
397 struct xive_q *q = &xc->queue[xive_irq_priority];
401 * Calculate max number of interrupts in that queue.
403 * We leave a gap of 1 just in case...
405 max = (q->msk + 1) - 1;
406 return !!atomic_add_unless(&q->count, 1, max);
410 * Un-account an interrupt for a target CPU. We don't directly
411 * decrement q->count since the interrupt might still be present
414 * Instead increment a separate counter "pending_count" which
415 * will be substracted from "count" later when that CPU observes
416 * the queue to be empty.
418 static void xive_dec_target_count(int cpu)
420 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
421 struct xive_q *q = &xc->queue[xive_irq_priority];
423 if (unlikely(WARN_ON(cpu < 0 || !xc))) {
424 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
429 * We increment the "pending count" which will be used
430 * to decrement the target queue count whenever it's next
431 * processed and found empty. This ensure that we don't
432 * decrement while we still have the interrupt there
435 atomic_inc(&q->pending_count);
438 /* Find a tentative CPU target in a CPU mask */
439 static int xive_find_target_in_mask(const struct cpumask *mask,
442 int cpu, first, num, i;
444 /* Pick up a starting point CPU in the mask based on fuzz */
445 num = cpumask_weight(mask);
449 cpu = cpumask_first(mask);
450 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
451 cpu = cpumask_next(cpu, mask);
454 if (WARN_ON(cpu >= nr_cpu_ids))
455 cpu = cpumask_first(cpu_online_mask);
457 /* Remember first one to handle wrap-around */
461 * Now go through the entire mask until we find a valid
466 * We re-check online as the fallback case passes us
467 * an untested affinity mask
469 if (cpu_online(cpu) && xive_try_pick_target(cpu))
471 cpu = cpumask_next(cpu, mask);
475 if (cpu >= nr_cpu_ids)
476 cpu = cpumask_first(mask);
482 * Pick a target CPU for an interrupt. This is done at
483 * startup or if the affinity is changed in a way that
484 * invalidates the current target.
486 static int xive_pick_irq_target(struct irq_data *d,
487 const struct cpumask *affinity)
489 static unsigned int fuzz;
490 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
495 * If we have chip IDs, first we try to build a mask of
496 * CPUs matching the CPU and find a target in there
498 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
499 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
500 /* Build a mask of matching chip IDs */
501 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
502 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
503 if (xc->chip_id == xd->src_chip)
504 cpumask_set_cpu(cpu, mask);
506 /* Try to find a target */
507 if (cpumask_empty(mask))
510 cpu = xive_find_target_in_mask(mask, fuzz++);
511 free_cpumask_var(mask);
517 /* No chip IDs, fallback to using the affinity mask */
518 return xive_find_target_in_mask(affinity, fuzz++);
521 static unsigned int xive_irq_startup(struct irq_data *d)
523 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
524 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
527 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
530 #ifdef CONFIG_PCI_MSI
532 * The generic MSI code returns with the interrupt disabled on the
533 * card, using the MSI mask bits. Firmware doesn't appear to unmask
534 * at that level, so we do it here by hand.
536 if (irq_data_get_msi_desc(d))
537 pci_msi_unmask_irq(d);
541 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
542 if (target == XIVE_INVALID_TARGET) {
543 /* Try again breaking affinity */
544 target = xive_pick_irq_target(d, cpu_online_mask);
545 if (target == XIVE_INVALID_TARGET)
547 pr_warn("irq %d started with broken affinity\n", d->irq);
551 if (WARN_ON(target == XIVE_INVALID_TARGET ||
552 target >= nr_cpu_ids))
553 target = smp_processor_id();
558 * Configure the logical number to be the Linux IRQ number
559 * and set the target queue
561 rc = xive_ops->configure_irq(hw_irq,
562 get_hard_smp_processor_id(target),
563 xive_irq_priority, d->irq);
568 xive_do_source_set_mask(xd, false);
573 static void xive_irq_shutdown(struct irq_data *d)
575 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
576 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
578 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
581 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
584 /* Mask the interrupt at the source */
585 xive_do_source_set_mask(xd, true);
588 * The above may have set saved_p. We clear it otherwise it
589 * will prevent re-enabling later on. It is ok to forget the
590 * fact that the interrupt might be in a queue because we are
591 * accounting that already in xive_dec_target_count() and will
592 * be re-routing it to a new queue with proper accounting when
593 * it's started up again
598 * Mask the interrupt in HW in the IVT/EAS and set the number
599 * to be the "bad" IRQ number
601 xive_ops->configure_irq(hw_irq,
602 get_hard_smp_processor_id(xd->target),
605 xive_dec_target_count(xd->target);
606 xd->target = XIVE_INVALID_TARGET;
609 static void xive_irq_unmask(struct irq_data *d)
611 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
613 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
616 * This is a workaround for PCI LSI problems on P9, for
617 * these, we call FW to set the mask. The problems might
618 * be fixed by P9 DD2.0, if that is the case, firmware
619 * will no longer set that flag.
621 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
622 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
623 xive_ops->configure_irq(hw_irq,
624 get_hard_smp_processor_id(xd->target),
625 xive_irq_priority, d->irq);
629 xive_do_source_set_mask(xd, false);
632 static void xive_irq_mask(struct irq_data *d)
634 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
636 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
639 * This is a workaround for PCI LSI problems on P9, for
640 * these, we call OPAL to set the mask. The problems might
641 * be fixed by P9 DD2.0, if that is the case, firmware
642 * will no longer set that flag.
644 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
645 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
646 xive_ops->configure_irq(hw_irq,
647 get_hard_smp_processor_id(xd->target),
652 xive_do_source_set_mask(xd, true);
655 static int xive_irq_set_affinity(struct irq_data *d,
656 const struct cpumask *cpumask,
659 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
660 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
661 u32 target, old_target;
664 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
666 /* Is this valid ? */
667 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
671 * If existing target is already in the new mask, and is
672 * online then do nothing.
674 if (xd->target != XIVE_INVALID_TARGET &&
675 cpu_online(xd->target) &&
676 cpumask_test_cpu(xd->target, cpumask))
677 return IRQ_SET_MASK_OK;
679 /* Pick a new target */
680 target = xive_pick_irq_target(d, cpumask);
682 /* No target found */
683 if (target == XIVE_INVALID_TARGET)
687 if (WARN_ON(target >= nr_cpu_ids))
688 target = smp_processor_id();
690 old_target = xd->target;
692 rc = xive_ops->configure_irq(hw_irq,
693 get_hard_smp_processor_id(target),
694 xive_irq_priority, d->irq);
696 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
700 pr_devel(" target: 0x%x\n", target);
703 /* Give up previous target */
704 if (old_target != XIVE_INVALID_TARGET)
705 xive_dec_target_count(old_target);
707 return IRQ_SET_MASK_OK;
710 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
712 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
715 * We only support these. This has really no effect other than setting
716 * the corresponding descriptor bits mind you but those will in turn
717 * affect the resend function when re-enabling an edge interrupt.
719 * Set set the default to edge as explained in map().
721 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
722 flow_type = IRQ_TYPE_EDGE_RISING;
724 if (flow_type != IRQ_TYPE_EDGE_RISING &&
725 flow_type != IRQ_TYPE_LEVEL_LOW)
728 irqd_set_trigger_type(d, flow_type);
731 * Double check it matches what the FW thinks
733 * NOTE: We don't know yet if the PAPR interface will provide
734 * the LSI vs MSI information apart from the device-tree so
735 * this check might have to move into an optional backend call
736 * that is specific to the native backend
738 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
739 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
740 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
741 d->irq, (u32)irqd_to_hwirq(d),
742 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
743 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
746 return IRQ_SET_MASK_OK_NOCOPY;
749 static int xive_irq_retrigger(struct irq_data *d)
751 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
753 /* This should be only for MSIs */
754 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
758 * To perform a retrigger, we first set the PQ bits to
759 * 11, then perform an EOI.
761 xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
764 * Note: We pass "0" to the hw_irq argument in order to
765 * avoid calling into the backend EOI code which we don't
766 * want to do in the case of a re-trigger. Backends typically
767 * only do EOI for LSIs anyway.
769 xive_do_source_eoi(0, xd);
774 static struct irq_chip xive_irq_chip = {
776 .irq_startup = xive_irq_startup,
777 .irq_shutdown = xive_irq_shutdown,
778 .irq_eoi = xive_irq_eoi,
779 .irq_mask = xive_irq_mask,
780 .irq_unmask = xive_irq_unmask,
781 .irq_set_affinity = xive_irq_set_affinity,
782 .irq_set_type = xive_irq_set_type,
783 .irq_retrigger = xive_irq_retrigger,
786 bool is_xive_irq(struct irq_chip *chip)
788 return chip == &xive_irq_chip;
791 void xive_cleanup_irq_data(struct xive_irq_data *xd)
794 iounmap(xd->eoi_mmio);
795 if (xd->eoi_mmio == xd->trig_mmio)
796 xd->trig_mmio = NULL;
800 iounmap(xd->trig_mmio);
801 xd->trig_mmio = NULL;
805 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
807 struct xive_irq_data *xd;
810 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
813 rc = xive_ops->populate_irq_data(hw, xd);
818 xd->target = XIVE_INVALID_TARGET;
819 irq_set_handler_data(virq, xd);
824 static void xive_irq_free_data(unsigned int virq)
826 struct xive_irq_data *xd = irq_get_handler_data(virq);
830 irq_set_handler_data(virq, NULL);
831 xive_cleanup_irq_data(xd);
837 static void xive_cause_ipi(int cpu)
840 struct xive_irq_data *xd;
842 xc = per_cpu(xive_cpu, cpu);
844 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
845 smp_processor_id(), cpu, xc->hw_ipi);
848 if (WARN_ON(!xd->trig_mmio))
850 out_be64(xd->trig_mmio, 0);
853 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
855 return smp_ipi_demux();
858 static void xive_ipi_eoi(struct irq_data *d)
860 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
862 /* Handle possible race with unplug and drop stale IPIs */
865 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
866 xive_do_queue_eoi(xc);
869 static void xive_ipi_do_nothing(struct irq_data *d)
872 * Nothing to do, we never mask/unmask IPIs, but the callback
873 * has to exist for the struct irq_chip.
877 static struct irq_chip xive_ipi_chip = {
879 .irq_eoi = xive_ipi_eoi,
880 .irq_mask = xive_ipi_do_nothing,
881 .irq_unmask = xive_ipi_do_nothing,
884 static void __init xive_request_ipi(void)
889 * Initialization failed, move on, we might manage to
890 * reach the point where we display our errors before
891 * the system falls appart
893 if (!xive_irq_domain)
897 virq = irq_create_mapping(xive_irq_domain, 0);
900 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
901 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
904 static int xive_setup_cpu_ipi(unsigned int cpu)
909 pr_debug("Setting up IPI for CPU %d\n", cpu);
911 xc = per_cpu(xive_cpu, cpu);
913 /* Check if we are already setup */
917 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
918 if (xive_ops->get_ipi(cpu, xc))
922 * Populate the IRQ data in the xive_cpu structure and
923 * configure the HW / enable the IPIs.
925 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
927 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
930 rc = xive_ops->configure_irq(xc->hw_ipi,
931 get_hard_smp_processor_id(cpu),
932 xive_irq_priority, xive_ipi_irq);
934 pr_err("Failed to map IPI CPU %d\n", cpu);
937 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
938 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
941 xive_do_source_set_mask(&xc->ipi_data, false);
946 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
948 /* Disable the IPI and free the IRQ data */
950 /* Already cleaned up ? */
955 xive_do_source_set_mask(&xc->ipi_data, true);
958 * Note: We don't call xive_cleanup_irq_data() to free
959 * the mappings as this is called from an IPI on kexec
960 * which is not a safe environment to call iounmap()
963 /* Deconfigure/mask in the backend */
964 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
967 /* Free the IPIs in the backend */
968 xive_ops->put_ipi(cpu, xc);
971 void __init xive_smp_probe(void)
973 smp_ops->cause_ipi = xive_cause_ipi;
975 /* Register the IPI */
978 /* Allocate and setup IPI for the boot CPU */
979 xive_setup_cpu_ipi(smp_processor_id());
982 #endif /* CONFIG_SMP */
984 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
990 * Mark interrupts as edge sensitive by default so that resend
991 * actually works. Will fix that up below if needed.
993 irq_clear_status_flags(virq, IRQ_LEVEL);
996 /* IPIs are special and come up with HW number 0 */
999 * IPIs are marked per-cpu. We use separate HW interrupts under
1000 * the hood but associated with the same "linux" interrupt
1002 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1008 rc = xive_irq_alloc_data(virq, hw);
1012 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1017 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1019 struct irq_data *data = irq_get_irq_data(virq);
1020 unsigned int hw_irq;
1022 /* XXX Assign BAD number */
1025 hw_irq = (unsigned int)irqd_to_hwirq(data);
1027 xive_irq_free_data(virq);
1030 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1031 const u32 *intspec, unsigned int intsize,
1032 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1035 *out_hwirq = intspec[0];
1038 * If intsize is at least 2, we look for the type in the second cell,
1039 * we assume the LSB indicates a level interrupt.
1043 *out_flags = IRQ_TYPE_LEVEL_LOW;
1045 *out_flags = IRQ_TYPE_EDGE_RISING;
1047 *out_flags = IRQ_TYPE_LEVEL_LOW;
1052 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1053 enum irq_domain_bus_token bus_token)
1055 return xive_ops->match(node);
1058 static const struct irq_domain_ops xive_irq_domain_ops = {
1059 .match = xive_irq_domain_match,
1060 .map = xive_irq_domain_map,
1061 .unmap = xive_irq_domain_unmap,
1062 .xlate = xive_irq_domain_xlate,
1065 static void __init xive_init_host(void)
1067 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1068 &xive_irq_domain_ops, NULL);
1069 if (WARN_ON(xive_irq_domain == NULL))
1071 irq_set_default_host(xive_irq_domain);
1074 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1076 if (xc->queue[xive_irq_priority].qpage)
1077 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1080 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1084 /* We setup 1 queues for now with a 64k page */
1085 if (!xc->queue[xive_irq_priority].qpage)
1086 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1091 static int xive_prepare_cpu(unsigned int cpu)
1093 struct xive_cpu *xc;
1095 xc = per_cpu(xive_cpu, cpu);
1097 struct device_node *np;
1099 xc = kzalloc_node(sizeof(struct xive_cpu),
1100 GFP_KERNEL, cpu_to_node(cpu));
1103 np = of_get_cpu_node(cpu, NULL);
1105 xc->chip_id = of_get_ibm_chip_id(np);
1108 per_cpu(xive_cpu, cpu) = xc;
1111 /* Setup EQs if not already */
1112 return xive_setup_cpu_queues(cpu, xc);
1115 static void xive_setup_cpu(void)
1117 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1119 /* Debug: Dump the TM state */
1120 pr_devel("CPU %d [HW 0x%02x] VT=%02x\n",
1121 smp_processor_id(), hard_smp_processor_id(),
1122 in_8(xive_tima + xive_tima_offset + TM_WORD2));
1124 /* The backend might have additional things to do */
1125 if (xive_ops->setup_cpu)
1126 xive_ops->setup_cpu(smp_processor_id(), xc);
1128 /* Set CPPR to 0xff to enable flow of interrupts */
1130 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1134 void xive_smp_setup_cpu(void)
1136 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1138 /* This will have already been done on the boot CPU */
1139 if (smp_processor_id() != boot_cpuid)
1144 int xive_smp_prepare_cpu(unsigned int cpu)
1148 /* Allocate per-CPU data and queues */
1149 rc = xive_prepare_cpu(cpu);
1153 /* Allocate and setup IPI for the new CPU */
1154 return xive_setup_cpu_ipi(cpu);
1157 #ifdef CONFIG_HOTPLUG_CPU
1158 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1162 /* We assume local irqs are disabled */
1163 WARN_ON(!irqs_disabled());
1165 /* Check what's already in the CPU queue */
1166 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1168 * We need to re-route that interrupt to its new destination.
1169 * First get and lock the descriptor
1171 struct irq_desc *desc = irq_to_desc(irq);
1172 struct irq_data *d = irq_desc_get_irq_data(desc);
1173 struct xive_irq_data *xd;
1174 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1177 * Ignore anything that isn't a XIVE irq and ignore
1178 * IPIs, so can just be dropped.
1180 if (d->domain != xive_irq_domain || hw_irq == 0)
1184 * The IRQ should have already been re-routed, it's just a
1185 * stale in the old queue, so re-trigger it in order to make
1186 * it reach is new destination.
1189 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1192 raw_spin_lock(&desc->lock);
1193 xd = irq_desc_get_handler_data(desc);
1196 * For LSIs, we EOI, this will cause a resend if it's
1197 * still asserted. Otherwise do an MSI retrigger.
1199 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1200 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1202 xive_irq_retrigger(d);
1204 raw_spin_unlock(&desc->lock);
1208 void xive_smp_disable_cpu(void)
1210 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1211 unsigned int cpu = smp_processor_id();
1213 /* Migrate interrupts away from the CPU */
1214 irq_migrate_all_off_this_cpu();
1216 /* Set CPPR to 0 to disable flow of interrupts */
1218 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1220 /* Flush everything still in the queue */
1221 xive_flush_cpu_queue(cpu, xc);
1223 /* Re-enable CPPR */
1225 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1228 void xive_flush_interrupt(void)
1230 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1231 unsigned int cpu = smp_processor_id();
1233 /* Called if an interrupt occurs while the CPU is hot unplugged */
1234 xive_flush_cpu_queue(cpu, xc);
1237 #endif /* CONFIG_HOTPLUG_CPU */
1239 #endif /* CONFIG_SMP */
1241 void xive_kexec_teardown_cpu(int secondary)
1243 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1244 unsigned int cpu = smp_processor_id();
1246 /* Set CPPR to 0 to disable flow of interrupts */
1248 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1250 /* Backend cleanup if any */
1251 if (xive_ops->teardown_cpu)
1252 xive_ops->teardown_cpu(cpu, xc);
1255 /* Get rid of IPI */
1256 xive_cleanup_cpu_ipi(cpu, xc);
1259 /* Disable and free the queues */
1260 xive_cleanup_cpu_queues(cpu, xc);
1263 void xive_shutdown(void)
1265 xive_ops->shutdown();
1268 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1272 xive_tima_offset = offset;
1274 xive_irq_priority = max_prio;
1276 ppc_md.get_irq = xive_get_irq;
1277 __xive_enabled = true;
1279 pr_devel("Initializing host..\n");
1282 pr_devel("Initializing boot CPU..\n");
1284 /* Allocate per-CPU data and queues */
1285 xive_prepare_cpu(smp_processor_id());
1287 /* Get ready for interrupts */
1290 pr_info("Interrupt handling intialized with %s backend\n",
1292 pr_info("Using priority %d for all interrupts\n", max_prio);
1297 static int __init xive_off(char *arg)
1299 xive_cmdline_disabled = true;
1302 __setup("xive=off", xive_off);