4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
28 #include <linux/kvm_host.h>
29 #include <linux/errno.h>
31 #include <linux/anon_inodes.h>
32 #include <asm/uaccess.h>
34 #include <asm/kvm_para.h>
35 #include <asm/kvm_host.h>
36 #include <asm/kvm_ppc.h>
44 #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
45 #define VID 0x03 /* MPIC version ID */
47 /* OpenPIC capability flags */
48 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
49 #define OPENPIC_FLAG_ILR (2 << 0)
51 /* OpenPIC address map */
52 #define OPENPIC_REG_SIZE 0x40000
53 #define OPENPIC_GLB_REG_START 0x0
54 #define OPENPIC_GLB_REG_SIZE 0x10F0
55 #define OPENPIC_TMR_REG_START 0x10F0
56 #define OPENPIC_TMR_REG_SIZE 0x220
57 #define OPENPIC_MSI_REG_START 0x1600
58 #define OPENPIC_MSI_REG_SIZE 0x200
59 #define OPENPIC_SUMMARY_REG_START 0x3800
60 #define OPENPIC_SUMMARY_REG_SIZE 0x800
61 #define OPENPIC_SRC_REG_START 0x10000
62 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
63 #define OPENPIC_CPU_REG_START 0x20000
64 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
66 struct fsl_mpic_info {
70 static struct fsl_mpic_info fsl_mpic_20 = {
74 static struct fsl_mpic_info fsl_mpic_42 = {
78 #define FRR_NIRQ_SHIFT 16
79 #define FRR_NCPU_SHIFT 8
80 #define FRR_VID_SHIFT 0
82 #define VID_REVISION_1_2 2
83 #define VID_REVISION_1_3 3
85 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
87 #define GCR_RESET 0x80000000
88 #define GCR_MODE_PASS 0x00000000
89 #define GCR_MODE_MIXED 0x20000000
90 #define GCR_MODE_PROXY 0x60000000
92 #define TBCR_CI 0x80000000 /* count inhibit */
93 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
95 #define IDR_EP_SHIFT 31
96 #define IDR_EP_MASK (1 << IDR_EP_SHIFT)
97 #define IDR_CI0_SHIFT 30
98 #define IDR_CI1_SHIFT 29
99 #define IDR_P1_SHIFT 1
100 #define IDR_P0_SHIFT 0
102 #define ILR_INTTGT_MASK 0x000000ff
103 #define ILR_INTTGT_INT 0x00
104 #define ILR_INTTGT_CINT 0x01 /* critical */
105 #define ILR_INTTGT_MCP 0x02 /* machine check */
106 #define NUM_OUTPUTS 3
108 #define MSIIR_OFFSET 0x140
109 #define MSIIR_SRS_SHIFT 29
110 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
111 #define MSIIR_IBS_SHIFT 24
112 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
114 static int get_current_cpu(void)
116 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
117 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
118 return vcpu ? vcpu->arch.irq_cpu_id : -1;
125 static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
127 static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
132 IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
133 IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
137 /* Round up to the nearest 64 IRQs so that the queue length
138 * won't change when moving between 32 and 64 bit hosts.
140 unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
146 uint32_t ivpr; /* IRQ vector/priority register */
147 uint32_t idr; /* IRQ destination register */
148 uint32_t destmask; /* bitmap of CPU destinations */
150 int output; /* IRQ level, e.g. ILR_INTTGT_INT */
151 int pending; /* TRUE if IRQ is pending */
153 bool level:1; /* level-triggered */
154 bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */
157 #define IVPR_MASK_SHIFT 31
158 #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
159 #define IVPR_ACTIVITY_SHIFT 30
160 #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
161 #define IVPR_MODE_SHIFT 29
162 #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
163 #define IVPR_POLARITY_SHIFT 23
164 #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
165 #define IVPR_SENSE_SHIFT 22
166 #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
168 #define IVPR_PRIORITY_MASK (0xF << 16)
169 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
170 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
172 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
173 #define IDR_EP 0x80000000 /* external pin */
174 #define IDR_CI 0x40000000 /* critical interrupt */
177 struct kvm_vcpu *vcpu;
179 int32_t ctpr; /* CPU current task priority */
180 struct irq_queue raised;
181 struct irq_queue servicing;
183 /* Count of IRQ sources asserting on non-INT outputs */
184 uint32_t outputs_active[NUM_OUTPUTS];
189 struct kvm_device *dev;
190 struct kvm_io_device mmio;
191 struct list_head mmio_regions;
198 /* Behavior control */
199 struct fsl_mpic_info *fsl;
204 uint32_t vir; /* Vendor identification register */
205 uint32_t vector_mask;
210 uint32_t mpic_mode_mask;
212 /* Global registers */
213 uint32_t frr; /* Feature reporting register */
214 uint32_t gcr; /* Global configuration register */
215 uint32_t pir; /* Processor initialization register */
216 uint32_t spve; /* Spurious vector register */
217 uint32_t tfrr; /* Timer frequency reporting register */
218 /* Source registers */
219 struct irq_source src[MAX_IRQ];
220 /* Local registers per output pin */
221 struct irq_dest dst[MAX_CPU];
223 /* Timer registers */
225 uint32_t tccr; /* Global timer current count register */
226 uint32_t tbcr; /* Global timer base count register */
228 /* Shared MSI registers */
230 uint32_t msir; /* Shared Message Signaled Interrupt Register */
239 static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
242 struct kvm_interrupt irq = {
243 .irq = KVM_INTERRUPT_SET_LEVEL,
247 pr_debug("%s: destination cpu %d does not exist\n",
248 __func__, (int)(dst - &opp->dst[0]));
252 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
255 if (output != ILR_INTTGT_INT) /* TODO */
258 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
261 static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
265 pr_debug("%s: destination cpu %d does not exist\n",
266 __func__, (int)(dst - &opp->dst[0]));
270 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
273 if (output != ILR_INTTGT_INT) /* TODO */
276 kvmppc_core_dequeue_external(dst->vcpu);
279 static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
281 set_bit(n_IRQ, q->queue);
284 static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
286 clear_bit(n_IRQ, q->queue);
289 static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
291 return test_bit(n_IRQ, q->queue);
294 static void IRQ_check(struct openpic *opp, struct irq_queue *q)
301 irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
302 if (irq == opp->max_irq)
305 pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
306 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
308 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
310 priority = IVPR_PRIORITY(opp->src[irq].ivpr);
315 q->priority = priority;
318 static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
326 static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
327 bool active, bool was_active)
329 struct irq_dest *dst;
330 struct irq_source *src;
333 dst = &opp->dst[n_CPU];
334 src = &opp->src[n_IRQ];
336 pr_debug("%s: IRQ %d active %d was %d\n",
337 __func__, n_IRQ, active, was_active);
339 if (src->output != ILR_INTTGT_INT) {
340 pr_debug("%s: output %d irq %d active %d was %d count %d\n",
341 __func__, src->output, n_IRQ, active, was_active,
342 dst->outputs_active[src->output]);
344 /* On Freescale MPIC, critical interrupts ignore priority,
345 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
350 dst->outputs_active[src->output]++ == 0) {
351 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
352 __func__, src->output, n_CPU, n_IRQ);
353 mpic_irq_raise(opp, dst, src->output);
357 --dst->outputs_active[src->output] == 0) {
358 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
359 __func__, src->output, n_CPU, n_IRQ);
360 mpic_irq_lower(opp, dst, src->output);
367 priority = IVPR_PRIORITY(src->ivpr);
369 /* Even if the interrupt doesn't have enough priority,
370 * it is still raised, in case ctpr is lowered later.
373 IRQ_setbit(&dst->raised, n_IRQ);
375 IRQ_resetbit(&dst->raised, n_IRQ);
377 IRQ_check(opp, &dst->raised);
379 if (active && priority <= dst->ctpr) {
380 pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
381 __func__, n_IRQ, priority, dst->ctpr, n_CPU);
386 if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
387 priority <= dst->servicing.priority) {
388 pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
389 __func__, n_IRQ, dst->servicing.next, n_CPU);
391 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
392 __func__, n_CPU, n_IRQ, dst->raised.next);
393 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
396 IRQ_get_next(opp, &dst->servicing);
397 if (dst->raised.priority > dst->ctpr &&
398 dst->raised.priority > dst->servicing.priority) {
399 pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
400 __func__, n_IRQ, dst->raised.next,
401 dst->raised.priority, dst->ctpr,
402 dst->servicing.priority, n_CPU);
403 /* IRQ line stays asserted */
405 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
406 __func__, n_IRQ, dst->ctpr,
407 dst->servicing.priority, n_CPU);
408 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
413 /* update pic state because registers for n_IRQ have changed value */
414 static void openpic_update_irq(struct openpic *opp, int n_IRQ)
416 struct irq_source *src;
417 bool active, was_active;
420 src = &opp->src[n_IRQ];
421 active = src->pending;
423 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
424 /* Interrupt source is disabled */
425 pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
429 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
432 * We don't have a similar check for already-active because
433 * ctpr may have changed and we need to withdraw the interrupt.
435 if (!active && !was_active) {
436 pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
441 src->ivpr |= IVPR_ACTIVITY_MASK;
443 src->ivpr &= ~IVPR_ACTIVITY_MASK;
445 if (src->destmask == 0) {
447 pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ);
451 if (src->destmask == (1 << src->last_cpu)) {
452 /* Only one CPU is allowed to receive this IRQ */
453 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
454 } else if (!(src->ivpr & IVPR_MODE_MASK)) {
455 /* Directed delivery mode */
456 for (i = 0; i < opp->nb_cpus; i++) {
457 if (src->destmask & (1 << i)) {
458 IRQ_local_pipe(opp, i, n_IRQ, active,
463 /* Distributed delivery mode */
464 for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
465 if (i == opp->nb_cpus)
468 if (src->destmask & (1 << i)) {
469 IRQ_local_pipe(opp, i, n_IRQ, active,
478 static void openpic_set_irq(void *opaque, int n_IRQ, int level)
480 struct openpic *opp = opaque;
481 struct irq_source *src;
483 if (n_IRQ >= MAX_IRQ) {
484 WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
488 src = &opp->src[n_IRQ];
489 pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
490 n_IRQ, level, src->ivpr);
492 /* level-sensitive irq */
493 src->pending = level;
494 openpic_update_irq(opp, n_IRQ);
496 /* edge-sensitive irq */
499 openpic_update_irq(opp, n_IRQ);
502 if (src->output != ILR_INTTGT_INT) {
503 /* Edge-triggered interrupts shouldn't be used
504 * with non-INT delivery, but just in case,
505 * try to make it do something sane rather than
506 * cause an interrupt storm. This is close to
507 * what you'd probably see happen in real hardware.
510 openpic_update_irq(opp, n_IRQ);
515 static void openpic_reset(struct openpic *opp)
519 opp->gcr = GCR_RESET;
520 /* Initialise controller registers */
521 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
522 (opp->vid << FRR_VID_SHIFT);
525 opp->spve = -1 & opp->vector_mask;
526 opp->tfrr = opp->tfrr_reset;
527 /* Initialise IRQ sources */
528 for (i = 0; i < opp->max_irq; i++) {
529 opp->src[i].ivpr = opp->ivpr_reset;
530 opp->src[i].idr = opp->idr_reset;
532 switch (opp->src[i].type) {
533 case IRQ_TYPE_NORMAL:
535 !!(opp->ivpr_reset & IVPR_SENSE_MASK);
538 case IRQ_TYPE_FSLINT:
539 opp->src[i].ivpr |= IVPR_POLARITY_MASK;
542 case IRQ_TYPE_FSLSPECIAL:
546 /* Initialise IRQ destinations */
547 for (i = 0; i < MAX_CPU; i++) {
548 opp->dst[i].ctpr = 15;
549 memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue));
550 opp->dst[i].raised.next = -1;
551 memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue));
552 opp->dst[i].servicing.next = -1;
554 /* Initialise timers */
555 for (i = 0; i < MAX_TMR; i++) {
556 opp->timers[i].tccr = 0;
557 opp->timers[i].tbcr = TBCR_CI;
559 /* Go out of RESET state */
563 static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
565 return opp->src[n_IRQ].idr;
568 static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
570 if (opp->flags & OPENPIC_FLAG_ILR)
571 return opp->src[n_IRQ].output;
576 static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ)
578 return opp->src[n_IRQ].ivpr;
581 static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
584 struct irq_source *src = &opp->src[n_IRQ];
585 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
586 uint32_t crit_mask = 0;
587 uint32_t mask = normal_mask;
588 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
591 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
592 crit_mask = mask << crit_shift;
593 mask |= crit_mask | IDR_EP;
596 src->idr = val & mask;
597 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
599 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
600 if (src->idr & crit_mask) {
601 if (src->idr & normal_mask) {
602 pr_debug("%s: IRQ configured for multiple output types, using critical\n",
606 src->output = ILR_INTTGT_CINT;
610 for (i = 0; i < opp->nb_cpus; i++) {
611 int n_ci = IDR_CI0_SHIFT - i;
613 if (src->idr & (1UL << n_ci))
614 src->destmask |= 1UL << i;
617 src->output = ILR_INTTGT_INT;
619 src->destmask = src->idr & normal_mask;
622 src->destmask = src->idr;
626 static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
629 if (opp->flags & OPENPIC_FLAG_ILR) {
630 struct irq_source *src = &opp->src[n_IRQ];
632 src->output = val & ILR_INTTGT_MASK;
633 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
636 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
640 static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
645 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
646 * the polarity bit is read-only on internal interrupts.
648 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
649 IVPR_POLARITY_MASK | opp->vector_mask;
651 /* ACTIVITY bit is read-only */
652 opp->src[n_IRQ].ivpr =
653 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
655 /* For FSL internal interrupts, The sense bit is reserved and zero,
656 * and the interrupt is always level-triggered. Timers and IPIs
657 * have no sense or polarity bits, and are edge-triggered.
659 switch (opp->src[n_IRQ].type) {
660 case IRQ_TYPE_NORMAL:
661 opp->src[n_IRQ].level =
662 !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
665 case IRQ_TYPE_FSLINT:
666 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
669 case IRQ_TYPE_FSLSPECIAL:
670 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
674 openpic_update_irq(opp, n_IRQ);
675 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
676 opp->src[n_IRQ].ivpr);
679 static void openpic_gcr_write(struct openpic *opp, uint64_t val)
681 if (val & GCR_RESET) {
686 opp->gcr &= ~opp->mpic_mode_mask;
687 opp->gcr |= val & opp->mpic_mode_mask;
690 static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
692 struct openpic *opp = opaque;
695 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
700 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
710 err = openpic_cpu_write_internal(opp, addr, val,
713 case 0x1000: /* FRR */
715 case 0x1020: /* GCR */
716 openpic_gcr_write(opp, val);
718 case 0x1080: /* VIR */
720 case 0x1090: /* PIR */
722 * This register is used to reset a CPU core --
723 * let userspace handle it.
727 case 0x10A0: /* IPI_IVPR */
732 idx = (addr - 0x10A0) >> 4;
733 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
736 case 0x10E0: /* SPVE */
737 opp->spve = val & opp->vector_mask;
746 static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
748 struct openpic *opp = opaque;
752 pr_debug("%s: addr %#llx\n", __func__, addr);
758 case 0x1000: /* FRR */
760 retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
762 case 0x1020: /* GCR */
765 case 0x1080: /* VIR */
768 case 0x1090: /* PIR */
771 case 0x00: /* Block Revision Register1 (BRR1) */
782 err = openpic_cpu_read_internal(opp, addr,
783 &retval, get_current_cpu());
785 case 0x10A0: /* IPI_IVPR */
791 idx = (addr - 0x10A0) >> 4;
792 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
795 case 0x10E0: /* SPVE */
803 pr_debug("%s: => 0x%08x\n", __func__, retval);
808 static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
810 struct openpic *opp = opaque;
815 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
819 if (addr == 0x10f0) {
825 idx = (addr >> 6) & 0x3;
828 switch (addr & 0x30) {
829 case 0x00: /* TCCR */
831 case 0x10: /* TBCR */
832 if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
833 (val & TBCR_CI) == 0 &&
834 (opp->timers[idx].tbcr & TBCR_CI) != 0)
835 opp->timers[idx].tccr &= ~TCCR_TOG;
837 opp->timers[idx].tbcr = val;
839 case 0x20: /* TVPR */
840 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
843 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
850 static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
852 struct openpic *opp = opaque;
853 uint32_t retval = -1;
856 pr_debug("%s: addr %#llx\n", __func__, addr);
860 idx = (addr >> 6) & 0x3;
867 switch (addr & 0x30) {
868 case 0x00: /* TCCR */
869 retval = opp->timers[idx].tccr;
871 case 0x10: /* TBCR */
872 retval = opp->timers[idx].tbcr;
874 case 0x20: /* TIPV */
875 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
877 case 0x30: /* TIDE (TIDR) */
878 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
883 pr_debug("%s: => 0x%08x\n", __func__, retval);
888 static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
890 struct openpic *opp = opaque;
893 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
895 addr = addr & 0xffff;
898 switch (addr & 0x1f) {
900 write_IRQreg_ivpr(opp, idx, val);
903 write_IRQreg_idr(opp, idx, val);
906 write_IRQreg_ilr(opp, idx, val);
913 static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
915 struct openpic *opp = opaque;
919 pr_debug("%s: addr %#llx\n", __func__, addr);
922 addr = addr & 0xffff;
925 switch (addr & 0x1f) {
927 retval = read_IRQreg_ivpr(opp, idx);
930 retval = read_IRQreg_idr(opp, idx);
933 retval = read_IRQreg_ilr(opp, idx);
937 pr_debug("%s: => 0x%08x\n", __func__, retval);
942 static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
944 struct openpic *opp = opaque;
945 int idx = opp->irq_msi;
948 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
954 srs = val >> MSIIR_SRS_SHIFT;
956 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
957 opp->msi[srs].msir |= 1 << ibs;
958 openpic_set_irq(opp, idx, 1);
961 /* most registers are read-only, thus ignored */
968 static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
970 struct openpic *opp = opaque;
974 pr_debug("%s: addr %#llx\n", __func__, addr);
988 case 0x70: /* MSIRs */
989 r = opp->msi[srs].msir;
991 opp->msi[srs].msir = 0;
992 openpic_set_irq(opp, opp->irq_msi + srs, 0);
994 case 0x120: /* MSISR */
995 for (i = 0; i < MAX_MSI; i++)
996 r |= (opp->msi[i].msir ? 1 : 0) << i;
1000 pr_debug("%s: => 0x%08x\n", __func__, r);
1005 static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
1009 pr_debug("%s: addr %#llx\n", __func__, addr);
1011 /* TODO: EISR/EIMR */
1017 static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
1019 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
1021 /* TODO: EISR/EIMR */
1025 static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
1028 struct openpic *opp = opaque;
1029 struct irq_source *src;
1030 struct irq_dest *dst;
1033 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
1042 dst = &opp->dst[idx];
1045 case 0x40: /* IPIDR */
1049 idx = (addr - 0x40) >> 4;
1050 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1051 opp->src[opp->irq_ipi0 + idx].destmask |= val;
1052 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
1053 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
1055 case 0x80: /* CTPR */
1056 dst->ctpr = val & 0x0000000F;
1058 pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1059 __func__, idx, dst->ctpr, dst->raised.priority,
1060 dst->servicing.priority);
1062 if (dst->raised.priority <= dst->ctpr) {
1063 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1065 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1066 } else if (dst->raised.priority > dst->servicing.priority) {
1067 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1068 __func__, idx, dst->raised.next);
1069 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1073 case 0x90: /* WHOAMI */
1074 /* Read-only register */
1076 case 0xA0: /* IACK */
1077 /* Read-only register */
1079 case 0xB0: { /* EOI */
1083 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1086 pr_debug("%s: EOI with no interrupt in service\n",
1091 IRQ_resetbit(&dst->servicing, s_IRQ);
1092 /* Notify listeners that the IRQ is over */
1094 /* Set up next servicing IRQ */
1095 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1096 /* Check queued interrupts. */
1097 n_IRQ = IRQ_get_next(opp, &dst->raised);
1098 src = &opp->src[n_IRQ];
1101 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1102 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1104 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1107 spin_unlock(&opp->lock);
1108 kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
1109 spin_lock(&opp->lock);
1120 static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
1122 struct openpic *opp = opaque;
1124 return openpic_cpu_write_internal(opp, addr, val,
1125 (addr & 0x1f000) >> 12);
1128 static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
1131 struct irq_source *src;
1134 pr_debug("Lower OpenPIC INT output\n");
1135 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1137 irq = IRQ_get_next(opp, &dst->raised);
1138 pr_debug("IACK: irq=%d\n", irq);
1141 /* No more interrupt pending */
1144 src = &opp->src[irq];
1145 if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1146 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1147 pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1148 __func__, irq, dst->ctpr, src->ivpr);
1149 openpic_update_irq(opp, irq);
1152 /* IRQ enter servicing state */
1153 IRQ_setbit(&dst->servicing, irq);
1154 retval = IVPR_VECTOR(opp, src->ivpr);
1158 /* edge-sensitive IRQ */
1159 src->ivpr &= ~IVPR_ACTIVITY_MASK;
1161 IRQ_resetbit(&dst->raised, irq);
1164 if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
1165 src->destmask &= ~(1 << cpu);
1166 if (src->destmask && !src->level) {
1167 /* trigger on CPUs that didn't know about it yet */
1168 openpic_set_irq(opp, irq, 1);
1169 openpic_set_irq(opp, irq, 0);
1170 /* if all CPUs knew about it, set active bit again */
1171 src->ivpr |= IVPR_ACTIVITY_MASK;
1178 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
1180 struct openpic *opp = vcpu->arch.mpic;
1181 int cpu = vcpu->arch.irq_cpu_id;
1182 unsigned long flags;
1184 spin_lock_irqsave(&opp->lock, flags);
1186 if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
1187 kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
1189 spin_unlock_irqrestore(&opp->lock, flags);
1192 static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
1195 struct openpic *opp = opaque;
1196 struct irq_dest *dst;
1199 pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
1200 retval = 0xFFFFFFFF;
1208 dst = &opp->dst[idx];
1211 case 0x80: /* CTPR */
1214 case 0x90: /* WHOAMI */
1217 case 0xA0: /* IACK */
1218 retval = openpic_iack(opp, dst, idx);
1220 case 0xB0: /* EOI */
1226 pr_debug("%s: => 0x%08x\n", __func__, retval);
1233 static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
1235 struct openpic *opp = opaque;
1237 return openpic_cpu_read_internal(opp, addr, ptr,
1238 (addr & 0x1f000) >> 12);
1242 struct list_head list;
1243 int (*read)(void *opaque, gpa_t addr, u32 *ptr);
1244 int (*write)(void *opaque, gpa_t addr, u32 val);
1249 static struct mem_reg openpic_gbl_mmio = {
1250 .write = openpic_gbl_write,
1251 .read = openpic_gbl_read,
1252 .start_addr = OPENPIC_GLB_REG_START,
1253 .size = OPENPIC_GLB_REG_SIZE,
1256 static struct mem_reg openpic_tmr_mmio = {
1257 .write = openpic_tmr_write,
1258 .read = openpic_tmr_read,
1259 .start_addr = OPENPIC_TMR_REG_START,
1260 .size = OPENPIC_TMR_REG_SIZE,
1263 static struct mem_reg openpic_cpu_mmio = {
1264 .write = openpic_cpu_write,
1265 .read = openpic_cpu_read,
1266 .start_addr = OPENPIC_CPU_REG_START,
1267 .size = OPENPIC_CPU_REG_SIZE,
1270 static struct mem_reg openpic_src_mmio = {
1271 .write = openpic_src_write,
1272 .read = openpic_src_read,
1273 .start_addr = OPENPIC_SRC_REG_START,
1274 .size = OPENPIC_SRC_REG_SIZE,
1277 static struct mem_reg openpic_msi_mmio = {
1278 .read = openpic_msi_read,
1279 .write = openpic_msi_write,
1280 .start_addr = OPENPIC_MSI_REG_START,
1281 .size = OPENPIC_MSI_REG_SIZE,
1284 static struct mem_reg openpic_summary_mmio = {
1285 .read = openpic_summary_read,
1286 .write = openpic_summary_write,
1287 .start_addr = OPENPIC_SUMMARY_REG_START,
1288 .size = OPENPIC_SUMMARY_REG_SIZE,
1291 static void fsl_common_init(struct openpic *opp)
1296 list_add(&openpic_msi_mmio.list, &opp->mmio_regions);
1297 list_add(&openpic_summary_mmio.list, &opp->mmio_regions);
1299 opp->vid = VID_REVISION_1_2;
1300 opp->vir = VIR_GENERIC;
1301 opp->vector_mask = 0xFFFF;
1302 opp->tfrr_reset = 0;
1303 opp->ivpr_reset = IVPR_MASK_MASK;
1304 opp->idr_reset = 1 << 0;
1305 opp->max_irq = MAX_IRQ;
1307 opp->irq_ipi0 = virq;
1309 opp->irq_tim0 = virq;
1312 BUG_ON(virq > MAX_IRQ);
1316 for (i = 0; i < opp->fsl->max_ext; i++)
1317 opp->src[i].level = false;
1319 /* Internal interrupts, including message and MSI */
1320 for (i = 16; i < MAX_SRC; i++) {
1321 opp->src[i].type = IRQ_TYPE_FSLINT;
1322 opp->src[i].level = true;
1325 /* timers and IPIs */
1326 for (i = MAX_SRC; i < virq; i++) {
1327 opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1328 opp->src[i].level = false;
1332 static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
1334 struct list_head *node;
1336 list_for_each(node, &opp->mmio_regions) {
1337 struct mem_reg *mr = list_entry(node, struct mem_reg, list);
1339 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1342 return mr->read(opp, addr - mr->start_addr, ptr);
1348 static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
1350 struct list_head *node;
1352 list_for_each(node, &opp->mmio_regions) {
1353 struct mem_reg *mr = list_entry(node, struct mem_reg, list);
1355 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1358 return mr->write(opp, addr - mr->start_addr, val);
1364 static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
1367 struct openpic *opp = container_of(this, struct openpic, mmio);
1374 if (addr & (len - 1)) {
1375 pr_debug("%s: bad alignment %llx/%d\n",
1376 __func__, addr, len);
1380 spin_lock_irq(&opp->lock);
1381 ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
1382 spin_unlock_irq(&opp->lock);
1385 * Technically only 32-bit accesses are allowed, but be nice to
1386 * people dumping registers a byte at a time -- it works in real
1387 * hardware (reads only, not writes).
1390 *(u32 *)ptr = u.val;
1391 pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1392 __func__, addr, ret, u.val);
1393 } else if (len == 1) {
1394 *(u8 *)ptr = u.bytes[addr & 3];
1395 pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1396 __func__, addr, ret, u.bytes[addr & 3]);
1398 pr_debug("%s: bad length %d\n", __func__, len);
1405 static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
1406 int len, const void *ptr)
1408 struct openpic *opp = container_of(this, struct openpic, mmio);
1412 pr_debug("%s: bad length %d\n", __func__, len);
1416 pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
1420 spin_lock_irq(&opp->lock);
1421 ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
1423 spin_unlock_irq(&opp->lock);
1425 pr_debug("%s: addr %llx ret %d val %x\n",
1426 __func__, addr, ret, *(const u32 *)ptr);
1431 static void kvm_mpic_dtor(struct kvm_io_device *this)
1433 struct openpic *opp = container_of(this, struct openpic, mmio);
1435 opp->mmio_mapped = false;
1438 static const struct kvm_io_device_ops mpic_mmio_ops = {
1439 .read = kvm_mpic_read,
1440 .write = kvm_mpic_write,
1441 .destructor = kvm_mpic_dtor,
1444 static void map_mmio(struct openpic *opp)
1446 BUG_ON(opp->mmio_mapped);
1447 opp->mmio_mapped = true;
1449 kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
1451 kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
1452 opp->reg_base, OPENPIC_REG_SIZE,
1456 static void unmap_mmio(struct openpic *opp)
1458 if (opp->mmio_mapped) {
1459 opp->mmio_mapped = false;
1460 kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
1464 static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
1468 if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
1471 if (base & 0x3ffff) {
1472 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1477 if (base == opp->reg_base)
1480 mutex_lock(&opp->kvm->slots_lock);
1483 opp->reg_base = base;
1485 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1493 mutex_unlock(&opp->kvm->slots_lock);
1501 static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
1508 spin_lock_irq(&opp->lock);
1510 if (type == ATTR_SET)
1511 ret = kvm_mpic_write_internal(opp, addr, *val);
1513 ret = kvm_mpic_read_internal(opp, addr, val);
1515 spin_unlock_irq(&opp->lock);
1517 pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
1522 static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1524 struct openpic *opp = dev->private;
1527 switch (attr->group) {
1528 case KVM_DEV_MPIC_GRP_MISC:
1529 switch (attr->attr) {
1530 case KVM_DEV_MPIC_BASE_ADDR:
1531 return set_base_addr(opp, attr);
1536 case KVM_DEV_MPIC_GRP_REGISTER:
1537 if (get_user(attr32, (u32 __user *)(long)attr->addr))
1540 return access_reg(opp, attr->attr, &attr32, ATTR_SET);
1542 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1543 if (attr->attr > MAX_SRC)
1546 if (get_user(attr32, (u32 __user *)(long)attr->addr))
1549 if (attr32 != 0 && attr32 != 1)
1552 spin_lock_irq(&opp->lock);
1553 openpic_set_irq(opp, attr->attr, attr32);
1554 spin_unlock_irq(&opp->lock);
1561 static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1563 struct openpic *opp = dev->private;
1568 switch (attr->group) {
1569 case KVM_DEV_MPIC_GRP_MISC:
1570 switch (attr->attr) {
1571 case KVM_DEV_MPIC_BASE_ADDR:
1572 mutex_lock(&opp->kvm->slots_lock);
1573 attr64 = opp->reg_base;
1574 mutex_unlock(&opp->kvm->slots_lock);
1576 if (copy_to_user((u64 __user *)(long)attr->addr,
1577 &attr64, sizeof(u64)))
1585 case KVM_DEV_MPIC_GRP_REGISTER:
1586 ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
1590 if (put_user(attr32, (u32 __user *)(long)attr->addr))
1595 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1596 if (attr->attr > MAX_SRC)
1599 spin_lock_irq(&opp->lock);
1600 attr32 = opp->src[attr->attr].pending;
1601 spin_unlock_irq(&opp->lock);
1603 if (put_user(attr32, (u32 __user *)(long)attr->addr))
1612 static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1614 switch (attr->group) {
1615 case KVM_DEV_MPIC_GRP_MISC:
1616 switch (attr->attr) {
1617 case KVM_DEV_MPIC_BASE_ADDR:
1623 case KVM_DEV_MPIC_GRP_REGISTER:
1626 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1627 if (attr->attr > MAX_SRC)
1636 static void mpic_destroy(struct kvm_device *dev)
1638 struct openpic *opp = dev->private;
1640 if (opp->mmio_mapped) {
1642 * Normally we get unmapped by kvm_io_bus_destroy(),
1643 * which happens before the VCPUs release their references.
1645 * Thus, we should only get here if no VCPUs took a reference
1646 * to us in the first place.
1648 WARN_ON(opp->nb_cpus != 0);
1652 dev->kvm->arch.mpic = NULL;
1656 static int mpic_set_default_irq_routing(struct openpic *opp)
1658 struct kvm_irq_routing_entry *routing;
1660 /* Create a nop default map, so that dereferencing it still works */
1661 routing = kzalloc((sizeof(*routing)), GFP_KERNEL);
1665 kvm_set_irq_routing(opp->kvm, routing, 0, 0);
1671 static int mpic_create(struct kvm_device *dev, u32 type)
1673 struct openpic *opp;
1676 /* We only support one MPIC at a time for now */
1677 if (dev->kvm->arch.mpic)
1680 opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
1685 opp->kvm = dev->kvm;
1688 spin_lock_init(&opp->lock);
1690 INIT_LIST_HEAD(&opp->mmio_regions);
1691 list_add(&openpic_gbl_mmio.list, &opp->mmio_regions);
1692 list_add(&openpic_tmr_mmio.list, &opp->mmio_regions);
1693 list_add(&openpic_src_mmio.list, &opp->mmio_regions);
1694 list_add(&openpic_cpu_mmio.list, &opp->mmio_regions);
1696 switch (opp->model) {
1697 case KVM_DEV_TYPE_FSL_MPIC_20:
1698 opp->fsl = &fsl_mpic_20;
1699 opp->brr1 = 0x00400200;
1700 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1702 opp->mpic_mode_mask = GCR_MODE_MIXED;
1704 fsl_common_init(opp);
1708 case KVM_DEV_TYPE_FSL_MPIC_42:
1709 opp->fsl = &fsl_mpic_42;
1710 opp->brr1 = 0x00400402;
1711 opp->flags |= OPENPIC_FLAG_ILR;
1713 opp->mpic_mode_mask = GCR_MODE_PROXY;
1715 fsl_common_init(opp);
1724 ret = mpic_set_default_irq_routing(opp);
1731 dev->kvm->arch.mpic = opp;
1740 struct kvm_device_ops kvm_mpic_ops = {
1742 .create = mpic_create,
1743 .destroy = mpic_destroy,
1744 .set_attr = mpic_set_attr,
1745 .get_attr = mpic_get_attr,
1746 .has_attr = mpic_has_attr,
1749 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1752 struct openpic *opp = dev->private;
1755 if (dev->ops != &kvm_mpic_ops)
1757 if (opp->kvm != vcpu->kvm)
1759 if (cpu < 0 || cpu >= MAX_CPU)
1762 spin_lock_irq(&opp->lock);
1764 if (opp->dst[cpu].vcpu) {
1768 if (vcpu->arch.irq_type) {
1773 opp->dst[cpu].vcpu = vcpu;
1774 opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
1776 vcpu->arch.mpic = opp;
1777 vcpu->arch.irq_cpu_id = cpu;
1778 vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
1780 /* This might need to be changed if GCR gets extended */
1781 if (opp->mpic_mode_mask == GCR_MODE_PROXY)
1782 vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
1784 kvm_device_get(dev);
1786 spin_unlock_irq(&opp->lock);
1791 * This should only happen immediately before the mpic is destroyed,
1792 * so we shouldn't need to worry about anything still trying to
1793 * access the vcpu pointer.
1795 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
1797 BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
1799 opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
1800 kvm_device_put(opp->dev);
1805 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1806 * = 0 Interrupt was coalesced (previous irq is still pending)
1807 * > 0 Number of CPUs interrupt was delivered to
1809 static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
1810 struct kvm *kvm, int irq_source_id, int level,
1813 u32 irq = e->irqchip.pin;
1814 struct openpic *opp = kvm->arch.mpic;
1815 unsigned long flags;
1817 spin_lock_irqsave(&opp->lock, flags);
1818 openpic_set_irq(opp, irq, level);
1819 spin_unlock_irqrestore(&opp->lock, flags);
1821 /* All code paths we care about don't check for the return value */
1825 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
1826 struct kvm *kvm, int irq_source_id, int level, bool line_status)
1828 struct openpic *opp = kvm->arch.mpic;
1829 unsigned long flags;
1831 spin_lock_irqsave(&opp->lock, flags);
1834 * XXX We ignore the target address for now, as we only support
1835 * a single MSI bank.
1837 openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
1838 spin_unlock_irqrestore(&opp->lock, flags);
1840 /* All code paths we care about don't check for the return value */
1844 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
1845 struct kvm_kernel_irq_routing_entry *e,
1846 const struct kvm_irq_routing_entry *ue)
1851 case KVM_IRQ_ROUTING_IRQCHIP:
1852 e->set = mpic_set_irq;
1853 e->irqchip.irqchip = ue->u.irqchip.irqchip;
1854 e->irqchip.pin = ue->u.irqchip.pin;
1855 if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
1857 rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
1859 case KVM_IRQ_ROUTING_MSI:
1860 e->set = kvm_set_msi;
1861 e->msi.address_lo = ue->u.msi.address_lo;
1862 e->msi.address_hi = ue->u.msi.address_hi;
1863 e->msi.data = ue->u.msi.data;