2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
33 #include <trace/events/kvm.h>
35 #include <kvm/iodev.h>
38 * How the whole thing works (courtesy of Christoffer Dall):
40 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
41 * something is pending on the CPU interface.
42 * - Interrupts that are pending on the distributor are stored on the
43 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
44 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
46 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
48 * - To calculate the oracle, we need info for each cpu from
49 * compute_pending_for_cpu, which considers:
50 * - PPI: dist->irq_pending & dist->irq_enable
51 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
52 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
53 * registers, stored on each vcpu. We only keep one bit of
54 * information per interrupt, making sure that only one vcpu can
55 * accept the interrupt.
56 * - If any of the above state changes, we must recalculate the oracle.
57 * - The same is true when injecting an interrupt, except that we only
58 * consider a single interrupt at a time. The irq_spi_cpu array
59 * contains the target CPU for each SPI.
61 * The handling of level interrupts adds some extra complexity. We
62 * need to track when the interrupt has been EOIed, so we can sample
63 * the 'line' again. This is achieved as such:
65 * - When a level interrupt is moved onto a vcpu, the corresponding
66 * bit in irq_queued is set. As long as this bit is set, the line
67 * will be ignored for further interrupts. The interrupt is injected
68 * into the vcpu with the GICH_LR_EOI bit set (generate a
69 * maintenance interrupt on EOI).
70 * - When the interrupt is EOIed, the maintenance interrupt fires,
71 * and clears the corresponding bit in irq_queued. This allows the
72 * interrupt line to be sampled again.
73 * - Note that level-triggered interrupts can also be set to pending from
74 * writes to GICD_ISPENDRn and lowering the external input line does not
75 * cause the interrupt to become inactive in such a situation.
76 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
77 * inactive as long as the external input line is held high.
80 * Initialization rules: there are multiple stages to the vgic
81 * initialization, both for the distributor and the CPU interfaces.
85 * - kvm_vgic_early_init(): initialization of static data that doesn't
86 * depend on any sizing information or emulation type. No allocation
89 * - vgic_init(): allocation and initialization of the generic data
90 * structures that depend on sizing information (number of CPUs,
91 * number of interrupts). Also initializes the vcpu specific data
92 * structures. Can be executed lazily for GICv2.
93 * [to be renamed to kvm_vgic_init??]
97 * - kvm_vgic_cpu_early_init(): initialization of static data that
98 * doesn't depend on any sizing information or emulation type. No
99 * allocation is allowed there.
104 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
105 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
106 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
107 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
108 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
111 static const struct vgic_ops *vgic_ops;
112 static const struct vgic_params *vgic;
114 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
116 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
119 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
121 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
124 int kvm_vgic_map_resources(struct kvm *kvm)
126 return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
130 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
131 * extracts u32s out of them.
133 * This does not work on 64-bit BE systems, because the bitmap access
134 * will store two consecutive 32-bit words with the higher-addressed
135 * register's bits at the lower index and the lower-addressed register's
136 * bits at the higher index.
138 * Therefore, swizzle the register index when accessing the 32-bit word
139 * registers to access the right register's value.
141 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
142 #define REG_OFFSET_SWIZZLE 1
144 #define REG_OFFSET_SWIZZLE 0
147 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
151 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
153 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
157 b->shared = b->private + nr_cpus;
162 static void vgic_free_bitmap(struct vgic_bitmap *b)
170 * Call this function to convert a u64 value to an unsigned long * bitmask
171 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
173 * Warning: Calling this function may modify *val.
175 static unsigned long *u64_to_bitmask(u64 *val)
177 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
178 *val = (*val >> 32) | (*val << 32);
180 return (unsigned long *)val;
183 u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
187 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
189 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
192 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
195 if (irq < VGIC_NR_PRIVATE_IRQS)
196 return test_bit(irq, x->private + cpuid);
198 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
201 void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
206 if (irq < VGIC_NR_PRIVATE_IRQS) {
207 reg = x->private + cpuid;
210 irq -= VGIC_NR_PRIVATE_IRQS;
219 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
221 return x->private + cpuid;
224 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
229 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
233 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
234 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
236 x->private = kzalloc(size, GFP_KERNEL);
240 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
244 static void vgic_free_bytemap(struct vgic_bytemap *b)
251 u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
255 if (offset < VGIC_NR_PRIVATE_IRQS) {
257 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
260 offset -= VGIC_NR_PRIVATE_IRQS;
263 return reg + (offset / sizeof(u32));
266 #define VGIC_CFG_LEVEL 0
267 #define VGIC_CFG_EDGE 1
269 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
271 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
274 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
275 return irq_val == VGIC_CFG_EDGE;
278 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
280 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
282 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
285 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
287 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
289 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
292 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
294 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
296 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
299 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
301 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
303 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
306 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
308 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
310 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
313 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
315 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
317 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
320 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
322 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
324 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
327 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
329 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
331 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
334 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
336 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
338 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
341 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
343 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
345 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
348 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
350 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
352 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
355 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
357 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
359 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
362 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
364 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
366 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
369 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
371 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
373 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
376 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
378 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
380 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
383 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
385 if (irq < VGIC_NR_PRIVATE_IRQS)
386 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
388 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
389 vcpu->arch.vgic_cpu.pending_shared);
392 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
394 if (irq < VGIC_NR_PRIVATE_IRQS)
395 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
397 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
398 vcpu->arch.vgic_cpu.pending_shared);
401 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
403 return !vgic_irq_is_queued(vcpu, irq);
407 * vgic_reg_access - access vgic register
408 * @mmio: pointer to the data describing the mmio access
409 * @reg: pointer to the virtual backing of vgic distributor data
410 * @offset: least significant 2 bits used for word offset
411 * @mode: ACCESS_ mode (see defines above)
413 * Helper to make vgic register access easier using one of the access
414 * modes defined for vgic register access
415 * (read,raz,write-ignored,setbit,clearbit,write)
417 void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
418 phys_addr_t offset, int mode)
420 int word_offset = (offset & 3) * 8;
421 u32 mask = (1UL << (mmio->len * 8)) - 1;
425 * Any alignment fault should have been delivered to the guest
426 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
432 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
436 if (mmio->is_write) {
437 u32 data = mmio_data_read(mmio, mask) << word_offset;
438 switch (ACCESS_WRITE_MASK(mode)) {
439 case ACCESS_WRITE_IGNORED:
442 case ACCESS_WRITE_SETBIT:
446 case ACCESS_WRITE_CLEARBIT:
450 case ACCESS_WRITE_VALUE:
451 regval = (regval & ~(mask << word_offset)) | data;
456 switch (ACCESS_READ_MASK(mode)) {
457 case ACCESS_READ_RAZ:
461 case ACCESS_READ_VALUE:
462 mmio_data_write(mmio, mask, regval >> word_offset);
467 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
470 vgic_reg_access(mmio, NULL, offset,
471 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
475 bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
476 phys_addr_t offset, int vcpu_id, int access)
479 int mode = ACCESS_READ_VALUE | access;
480 struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
482 reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
483 vgic_reg_access(mmio, reg, offset, mode);
484 if (mmio->is_write) {
485 if (access & ACCESS_WRITE_CLEARBIT) {
486 if (offset < 4) /* Force SGI enabled */
488 vgic_retire_disabled_irqs(target_vcpu);
490 vgic_update_state(kvm);
497 bool vgic_handle_set_pending_reg(struct kvm *kvm,
498 struct kvm_exit_mmio *mmio,
499 phys_addr_t offset, int vcpu_id)
503 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
504 struct vgic_dist *dist = &kvm->arch.vgic;
506 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
507 level_mask = (~(*reg));
509 /* Mark both level and edge triggered irqs as pending */
510 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
512 vgic_reg_access(mmio, reg, offset, mode);
514 if (mmio->is_write) {
515 /* Set the soft-pending flag only for level-triggered irqs */
516 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
518 vgic_reg_access(mmio, reg, offset, mode);
521 /* Ignore writes to SGIs */
524 *reg |= orig & 0xffff;
527 vgic_update_state(kvm);
534 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
535 struct kvm_exit_mmio *mmio,
536 phys_addr_t offset, int vcpu_id)
540 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
541 struct vgic_dist *dist = &kvm->arch.vgic;
543 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
545 vgic_reg_access(mmio, reg, offset, mode);
546 if (mmio->is_write) {
547 /* Re-set level triggered level-active interrupts */
548 level_active = vgic_bitmap_get_reg(&dist->irq_level,
550 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
551 *reg |= *level_active;
553 /* Ignore writes to SGIs */
556 *reg |= orig & 0xffff;
559 /* Clear soft-pending flags */
560 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
562 vgic_reg_access(mmio, reg, offset, mode);
564 vgic_update_state(kvm);
570 bool vgic_handle_set_active_reg(struct kvm *kvm,
571 struct kvm_exit_mmio *mmio,
572 phys_addr_t offset, int vcpu_id)
575 struct vgic_dist *dist = &kvm->arch.vgic;
577 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
578 vgic_reg_access(mmio, reg, offset,
579 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
581 if (mmio->is_write) {
582 vgic_update_state(kvm);
589 bool vgic_handle_clear_active_reg(struct kvm *kvm,
590 struct kvm_exit_mmio *mmio,
591 phys_addr_t offset, int vcpu_id)
594 struct vgic_dist *dist = &kvm->arch.vgic;
596 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
597 vgic_reg_access(mmio, reg, offset,
598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
600 if (mmio->is_write) {
601 vgic_update_state(kvm);
608 static u32 vgic_cfg_expand(u16 val)
614 * Turn a 16bit value like abcd...mnop into a 32bit word
615 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
617 for (i = 0; i < 16; i++)
618 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
623 static u16 vgic_cfg_compress(u32 val)
629 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
630 * abcd...mnop which is what we really care about.
632 for (i = 0; i < 16; i++)
633 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
639 * The distributor uses 2 bits per IRQ for the CFG register, but the
640 * LSB is always 0. As such, we only keep the upper bit, and use the
641 * two above functions to compress/expand the bits
643 bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
653 val = vgic_cfg_expand(val);
654 vgic_reg_access(mmio, &val, offset,
655 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
656 if (mmio->is_write) {
658 *reg = ~0U; /* Force PPIs/SGIs to 1 */
662 val = vgic_cfg_compress(val);
667 *reg &= 0xffff << 16;
676 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
677 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
679 * Move any IRQs that have already been assigned to LRs back to the
680 * emulated distributor state so that the complete emulated state can be read
681 * from the main emulation structures without investigating the LRs.
683 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
685 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
688 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
689 struct vgic_lr lr = vgic_get_lr(vcpu, i);
692 * There are three options for the state bits:
696 * 11: pending and active
698 BUG_ON(!(lr.state & LR_STATE_MASK));
700 /* Reestablish SGI source for pending and active IRQs */
701 if (lr.irq < VGIC_NR_SGIS)
702 add_sgi_source(vcpu, lr.irq, lr.source);
705 * If the LR holds an active (10) or a pending and active (11)
706 * interrupt then move the active state to the
707 * distributor tracking bit.
709 if (lr.state & LR_STATE_ACTIVE) {
710 vgic_irq_set_active(vcpu, lr.irq);
711 lr.state &= ~LR_STATE_ACTIVE;
715 * Reestablish the pending state on the distributor and the
716 * CPU interface. It may have already been pending, but that
717 * is fine, then we are only setting a few bits that were
720 if (lr.state & LR_STATE_PENDING) {
721 vgic_dist_irq_set_pending(vcpu, lr.irq);
722 lr.state &= ~LR_STATE_PENDING;
725 vgic_set_lr(vcpu, i, lr);
728 * Mark the LR as free for other use.
730 BUG_ON(lr.state & LR_STATE_MASK);
731 vgic_retire_lr(i, lr.irq, vcpu);
732 vgic_irq_clear_queued(vcpu, lr.irq);
734 /* Finally update the VGIC state. */
735 vgic_update_state(vcpu->kvm);
740 struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
741 int len, gpa_t offset)
743 while (ranges->len) {
744 if (offset >= ranges->base &&
745 (offset + len) <= (ranges->base + ranges->len))
753 static bool vgic_validate_access(const struct vgic_dist *dist,
754 const struct vgic_io_range *range,
755 unsigned long offset)
759 if (!range->bits_per_irq)
760 return true; /* Not an irq-based access */
762 irq = offset * 8 / range->bits_per_irq;
763 if (irq >= dist->nr_irqs)
770 * Call the respective handler function for the given range.
771 * We split up any 64 bit accesses into two consecutive 32 bit
772 * handler calls and merge the result afterwards.
773 * We do this in a little endian fashion regardless of the host's
774 * or guest's endianness, because the GIC is always LE and the rest of
775 * the code (vgic_reg_access) also puts it in a LE fashion already.
776 * At this point we have already identified the handle function, so
777 * range points to that one entry and offset is relative to this.
779 static bool call_range_handler(struct kvm_vcpu *vcpu,
780 struct kvm_exit_mmio *mmio,
781 unsigned long offset,
782 const struct vgic_io_range *range)
784 struct kvm_exit_mmio mmio32;
787 if (likely(mmio->len <= 4))
788 return range->handle_mmio(vcpu, mmio, offset);
791 * Any access bigger than 4 bytes (that we currently handle in KVM)
792 * is actually 8 bytes long, caused by a 64-bit access
796 mmio32.is_write = mmio->is_write;
797 mmio32.private = mmio->private;
799 mmio32.phys_addr = mmio->phys_addr + 4;
800 mmio32.data = &((u32 *)mmio->data)[1];
801 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
803 mmio32.phys_addr = mmio->phys_addr;
804 mmio32.data = &((u32 *)mmio->data)[0];
805 ret |= range->handle_mmio(vcpu, &mmio32, offset);
811 * vgic_handle_mmio_access - handle an in-kernel MMIO access
812 * This is called by the read/write KVM IO device wrappers below.
813 * @vcpu: pointer to the vcpu performing the access
814 * @this: pointer to the KVM IO device in charge
815 * @addr: guest physical address of the access
816 * @len: size of the access
817 * @val: pointer to the data region
818 * @is_write: read or write access
820 * returns true if the MMIO access could be performed
822 static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
823 struct kvm_io_device *this, gpa_t addr,
824 int len, void *val, bool is_write)
826 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
827 struct vgic_io_device *iodev = container_of(this,
828 struct vgic_io_device, dev);
829 struct kvm_run *run = vcpu->run;
830 const struct vgic_io_range *range;
831 struct kvm_exit_mmio mmio;
835 offset = addr - iodev->addr;
836 range = vgic_find_range(iodev->reg_ranges, len, offset);
837 if (unlikely(!range || !range->handle_mmio)) {
838 pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
842 mmio.phys_addr = addr;
844 mmio.is_write = is_write;
846 mmio.private = iodev->redist_vcpu;
848 spin_lock(&dist->lock);
849 offset -= range->base;
850 if (vgic_validate_access(dist, range, offset)) {
851 updated_state = call_range_handler(vcpu, &mmio, offset, range);
855 updated_state = false;
857 spin_unlock(&dist->lock);
858 run->mmio.is_write = is_write;
860 run->mmio.phys_addr = addr;
861 memcpy(run->mmio.data, val, len);
863 kvm_handle_mmio_return(vcpu, run);
866 vgic_kick_vcpus(vcpu->kvm);
871 static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
872 struct kvm_io_device *this,
873 gpa_t addr, int len, void *val)
875 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
878 static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
879 struct kvm_io_device *this,
880 gpa_t addr, int len, const void *val)
882 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
886 struct kvm_io_device_ops vgic_io_ops = {
887 .read = vgic_handle_mmio_read,
888 .write = vgic_handle_mmio_write,
892 * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
893 * @kvm: The VM structure pointer
894 * @base: The (guest) base address for the register frame
895 * @len: Length of the register frame window
896 * @ranges: Describing the handler functions for each register
897 * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
898 * @iodev: Points to memory to be passed on to the handler
900 * @iodev stores the parameters of this function to be usable by the handler
901 * respectively the dispatcher function (since the KVM I/O bus framework lacks
902 * an opaque parameter). Initialization is done in this function, but the
903 * reference should be valid and unique for the whole VGIC lifetime.
904 * If the register frame is not mapped for a specific VCPU, pass -1 to
907 int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
908 const struct vgic_io_range *ranges,
910 struct vgic_io_device *iodev)
912 struct kvm_vcpu *vcpu = NULL;
915 if (redist_vcpu_id >= 0)
916 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
920 iodev->reg_ranges = ranges;
921 iodev->redist_vcpu = vcpu;
923 kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
925 mutex_lock(&kvm->slots_lock);
927 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
929 mutex_unlock(&kvm->slots_lock);
931 /* Mark the iodev as invalid if registration fails. */
933 iodev->dev.ops = NULL;
938 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
940 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
943 static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
945 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
946 unsigned long *active, *enabled, *act_percpu, *act_shared;
947 unsigned long active_private, active_shared;
948 int nr_shared = vgic_nr_shared_irqs(dist);
951 vcpu_id = vcpu->vcpu_id;
952 act_percpu = vcpu->arch.vgic_cpu.active_percpu;
953 act_shared = vcpu->arch.vgic_cpu.active_shared;
955 active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
956 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
957 bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
959 active = vgic_bitmap_get_shared_map(&dist->irq_active);
960 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
961 bitmap_and(act_shared, active, enabled, nr_shared);
962 bitmap_and(act_shared, act_shared,
963 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
966 active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
967 active_shared = find_first_bit(act_shared, nr_shared);
969 return (active_private < VGIC_NR_PRIVATE_IRQS ||
970 active_shared < nr_shared);
973 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
975 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
976 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
977 unsigned long pending_private, pending_shared;
978 int nr_shared = vgic_nr_shared_irqs(dist);
981 vcpu_id = vcpu->vcpu_id;
982 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
983 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
985 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
986 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
987 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
989 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
990 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
991 bitmap_and(pend_shared, pending, enabled, nr_shared);
992 bitmap_and(pend_shared, pend_shared,
993 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
996 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
997 pending_shared = find_first_bit(pend_shared, nr_shared);
998 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
999 pending_shared < vgic_nr_shared_irqs(dist));
1003 * Update the interrupt state and determine which CPUs have pending
1004 * or active interrupts. Must be called with distributor lock held.
1006 void vgic_update_state(struct kvm *kvm)
1008 struct vgic_dist *dist = &kvm->arch.vgic;
1009 struct kvm_vcpu *vcpu;
1012 if (!dist->enabled) {
1013 set_bit(0, dist->irq_pending_on_cpu);
1017 kvm_for_each_vcpu(c, vcpu, kvm) {
1018 if (compute_pending_for_cpu(vcpu))
1019 set_bit(c, dist->irq_pending_on_cpu);
1021 if (compute_active_for_cpu(vcpu))
1022 set_bit(c, dist->irq_active_on_cpu);
1024 clear_bit(c, dist->irq_active_on_cpu);
1028 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1030 return vgic_ops->get_lr(vcpu, lr);
1033 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1036 vgic_ops->set_lr(vcpu, lr, vlr);
1039 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1042 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1045 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1047 return vgic_ops->get_elrsr(vcpu);
1050 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1052 return vgic_ops->get_eisr(vcpu);
1055 static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
1057 vgic_ops->clear_eisr(vcpu);
1060 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1062 return vgic_ops->get_interrupt_status(vcpu);
1065 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1067 vgic_ops->enable_underflow(vcpu);
1070 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1072 vgic_ops->disable_underflow(vcpu);
1075 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1077 vgic_ops->get_vmcr(vcpu, vmcr);
1080 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1082 vgic_ops->set_vmcr(vcpu, vmcr);
1085 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1087 vgic_ops->enable(vcpu);
1090 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1092 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1093 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1096 * We must transfer the pending state back to the distributor before
1097 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1099 if (vlr.state & LR_STATE_PENDING) {
1100 vgic_dist_irq_set_pending(vcpu, irq);
1105 vgic_set_lr(vcpu, lr_nr, vlr);
1106 clear_bit(lr_nr, vgic_cpu->lr_used);
1107 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1108 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1112 * An interrupt may have been disabled after being made pending on the
1113 * CPU interface (the classic case is a timer running while we're
1114 * rebooting the guest - the interrupt would kick as soon as the CPU
1115 * interface gets enabled, with deadly consequences).
1117 * The solution is to examine already active LRs, and check the
1118 * interrupt is still enabled. If not, just retire it.
1120 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1122 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1125 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1126 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1128 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1129 vgic_retire_lr(lr, vlr.irq, vcpu);
1130 if (vgic_irq_is_queued(vcpu, vlr.irq))
1131 vgic_irq_clear_queued(vcpu, vlr.irq);
1136 static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1137 int lr_nr, struct vgic_lr vlr)
1139 if (vgic_irq_is_active(vcpu, irq)) {
1140 vlr.state |= LR_STATE_ACTIVE;
1141 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1142 vgic_irq_clear_active(vcpu, irq);
1143 vgic_update_state(vcpu->kvm);
1145 WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1146 vlr.state |= LR_STATE_PENDING;
1147 kvm_debug("Set pending: 0x%x\n", vlr.state);
1150 if (!vgic_irq_is_edge(vcpu, irq))
1151 vlr.state |= LR_EOI_INT;
1153 if (vlr.irq >= VGIC_NR_SGIS) {
1154 struct irq_phys_map *map;
1155 map = vgic_irq_map_search(vcpu, irq);
1158 vlr.hwirq = map->phys_irq;
1160 vlr.state &= ~LR_EOI_INT;
1163 * Make sure we're not going to sample this
1164 * again, as a HW-backed interrupt cannot be
1165 * in the PENDING_ACTIVE stage.
1167 vgic_irq_set_queued(vcpu, irq);
1171 vgic_set_lr(vcpu, lr_nr, vlr);
1172 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1176 * Queue an interrupt to a CPU virtual interface. Return true on success,
1177 * or false if it wasn't possible to queue it.
1178 * sgi_source must be zero for any non-SGI interrupts.
1180 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1182 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1183 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1187 /* Sanitize the input... */
1188 BUG_ON(sgi_source_id & ~7);
1189 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1190 BUG_ON(irq >= dist->nr_irqs);
1192 kvm_debug("Queue IRQ%d\n", irq);
1194 lr = vgic_cpu->vgic_irq_lr_map[irq];
1196 /* Do we have an active interrupt for the same CPUID? */
1197 if (lr != LR_EMPTY) {
1198 vlr = vgic_get_lr(vcpu, lr);
1199 if (vlr.source == sgi_source_id) {
1200 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1201 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1202 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1207 /* Try to use another LR for this interrupt */
1208 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1210 if (lr >= vgic->nr_lr)
1213 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1214 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1215 set_bit(lr, vgic_cpu->lr_used);
1218 vlr.source = sgi_source_id;
1220 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1225 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1227 if (!vgic_can_sample_irq(vcpu, irq))
1228 return true; /* level interrupt, already queued */
1230 if (vgic_queue_irq(vcpu, 0, irq)) {
1231 if (vgic_irq_is_edge(vcpu, irq)) {
1232 vgic_dist_irq_clear_pending(vcpu, irq);
1233 vgic_cpu_irq_clear(vcpu, irq);
1235 vgic_irq_set_queued(vcpu, irq);
1245 * Fill the list registers with pending interrupts before running the
1248 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1250 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1251 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1252 unsigned long *pa_percpu, *pa_shared;
1255 int nr_shared = vgic_nr_shared_irqs(dist);
1257 vcpu_id = vcpu->vcpu_id;
1259 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1260 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1262 bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1263 VGIC_NR_PRIVATE_IRQS);
1264 bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1267 * We may not have any pending interrupt, or the interrupts
1268 * may have been serviced from another vcpu. In all cases,
1271 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
1275 for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1276 if (!queue_sgi(vcpu, i))
1281 for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1282 if (!vgic_queue_hwirq(vcpu, i))
1287 for_each_set_bit(i, pa_shared, nr_shared) {
1288 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1297 vgic_enable_underflow(vcpu);
1299 vgic_disable_underflow(vcpu);
1301 * We're about to run this VCPU, and we've consumed
1302 * everything the distributor had in store for
1303 * us. Claim we don't have anything pending. We'll
1304 * adjust that if needed while exiting.
1306 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1310 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1312 u32 status = vgic_get_interrupt_status(vcpu);
1313 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1314 bool level_pending = false;
1315 struct kvm *kvm = vcpu->kvm;
1317 kvm_debug("STATUS = %08x\n", status);
1319 if (status & INT_STATUS_EOI) {
1321 * Some level interrupts have been EOIed. Clear their
1324 u64 eisr = vgic_get_eisr(vcpu);
1325 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1328 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1329 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1330 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1332 spin_lock(&dist->lock);
1333 vgic_irq_clear_queued(vcpu, vlr.irq);
1334 WARN_ON(vlr.state & LR_STATE_MASK);
1336 vgic_set_lr(vcpu, lr, vlr);
1339 * If the IRQ was EOIed it was also ACKed and we we
1340 * therefore assume we can clear the soft pending
1341 * state (should it had been set) for this interrupt.
1343 * Note: if the IRQ soft pending state was set after
1344 * the IRQ was acked, it actually shouldn't be
1345 * cleared, but we have no way of knowing that unless
1346 * we start trapping ACKs when the soft-pending state
1349 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1352 * kvm_notify_acked_irq calls kvm_set_irq()
1353 * to reset the IRQ level. Need to release the
1354 * lock for kvm_set_irq to grab it.
1356 spin_unlock(&dist->lock);
1358 kvm_notify_acked_irq(kvm, 0,
1359 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1360 spin_lock(&dist->lock);
1362 /* Any additional pending interrupt? */
1363 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1364 vgic_cpu_irq_set(vcpu, vlr.irq);
1365 level_pending = true;
1367 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1368 vgic_cpu_irq_clear(vcpu, vlr.irq);
1371 spin_unlock(&dist->lock);
1374 * Despite being EOIed, the LR may not have
1375 * been marked as empty.
1377 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1381 if (status & INT_STATUS_UNDERFLOW)
1382 vgic_disable_underflow(vcpu);
1385 * In the next iterations of the vcpu loop, if we sync the vgic state
1386 * after flushing it, but before entering the guest (this happens for
1387 * pending signals and vmid rollovers), then make sure we don't pick
1388 * up any old maintenance interrupts here.
1390 vgic_clear_eisr(vcpu);
1392 return level_pending;
1396 * Save the physical active state, and reset it to inactive.
1398 * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
1400 static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
1402 struct irq_phys_map *map;
1405 if (!(vlr.state & LR_HW))
1408 map = vgic_irq_map_search(vcpu, vlr.irq);
1409 BUG_ON(!map || !map->active);
1411 ret = irq_get_irqchip_state(map->irq,
1412 IRQCHIP_STATE_ACTIVE,
1423 /* Sync back the VGIC state after a guest run */
1424 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1426 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1427 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1429 unsigned long *elrsr_ptr;
1433 level_pending = vgic_process_maintenance(vcpu);
1434 elrsr = vgic_get_elrsr(vcpu);
1435 elrsr_ptr = u64_to_bitmask(&elrsr);
1437 /* Deal with HW interrupts, and clear mappings for empty LRs */
1438 for (lr = 0; lr < vgic->nr_lr; lr++) {
1441 if (!test_bit(lr, vgic_cpu->lr_used))
1444 vlr = vgic_get_lr(vcpu, lr);
1445 if (vgic_sync_hwirq(vcpu, vlr)) {
1447 * So this is a HW interrupt that the guest
1448 * EOI-ed. Clean the LR state and allow the
1449 * interrupt to be sampled again.
1453 vgic_set_lr(vcpu, lr, vlr);
1454 vgic_irq_clear_queued(vcpu, vlr.irq);
1455 set_bit(lr, elrsr_ptr);
1458 if (!test_bit(lr, elrsr_ptr))
1461 clear_bit(lr, vgic_cpu->lr_used);
1463 BUG_ON(vlr.irq >= dist->nr_irqs);
1464 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1467 /* Check if we still have something up our sleeve... */
1468 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1469 if (level_pending || pending < vgic->nr_lr)
1470 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1473 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1475 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1477 if (!irqchip_in_kernel(vcpu->kvm))
1480 spin_lock(&dist->lock);
1481 __kvm_vgic_flush_hwstate(vcpu);
1482 spin_unlock(&dist->lock);
1485 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1487 if (!irqchip_in_kernel(vcpu->kvm))
1490 __kvm_vgic_sync_hwstate(vcpu);
1493 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1495 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1497 if (!irqchip_in_kernel(vcpu->kvm))
1500 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1503 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1505 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1507 if (!irqchip_in_kernel(vcpu->kvm))
1510 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1514 void vgic_kick_vcpus(struct kvm *kvm)
1516 struct kvm_vcpu *vcpu;
1520 * We've injected an interrupt, time to find out who deserves
1523 kvm_for_each_vcpu(c, vcpu, kvm) {
1524 if (kvm_vgic_vcpu_pending_irq(vcpu))
1525 kvm_vcpu_kick(vcpu);
1529 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1531 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1534 * Only inject an interrupt if:
1535 * - edge triggered and we have a rising edge
1536 * - level triggered and we change level
1538 if (edge_triggered) {
1539 int state = vgic_dist_irq_is_pending(vcpu, irq);
1540 return level > state;
1542 int state = vgic_dist_irq_get_level(vcpu, irq);
1543 return level != state;
1547 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1548 struct irq_phys_map *map,
1549 unsigned int irq_num, bool level)
1551 struct vgic_dist *dist = &kvm->arch.vgic;
1552 struct kvm_vcpu *vcpu;
1553 int edge_triggered, level_triggered;
1555 bool ret = true, can_inject = true;
1557 if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
1560 spin_lock(&dist->lock);
1562 vcpu = kvm_get_vcpu(kvm, cpuid);
1563 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1564 level_triggered = !edge_triggered;
1566 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1571 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1572 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1573 if (cpuid == VCPU_NOT_ALLOCATED) {
1574 /* Pretend we use CPU0, and prevent injection */
1578 vcpu = kvm_get_vcpu(kvm, cpuid);
1581 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1584 if (level_triggered)
1585 vgic_dist_irq_set_level(vcpu, irq_num);
1586 vgic_dist_irq_set_pending(vcpu, irq_num);
1588 if (level_triggered) {
1589 vgic_dist_irq_clear_level(vcpu, irq_num);
1590 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1591 vgic_dist_irq_clear_pending(vcpu, irq_num);
1592 vgic_cpu_irq_clear(vcpu, irq_num);
1593 if (!compute_pending_for_cpu(vcpu))
1594 clear_bit(cpuid, dist->irq_pending_on_cpu);
1602 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1604 if (!enabled || !can_inject) {
1609 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1611 * Level interrupt in progress, will be picked up
1619 vgic_cpu_irq_set(vcpu, irq_num);
1620 set_bit(cpuid, dist->irq_pending_on_cpu);
1624 spin_unlock(&dist->lock);
1627 /* kick the specified vcpu */
1628 kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
1634 static int vgic_lazy_init(struct kvm *kvm)
1638 if (unlikely(!vgic_initialized(kvm))) {
1640 * We only provide the automatic initialization of the VGIC
1641 * for the legacy case of a GICv2. Any other type must
1642 * be explicitly initialized once setup with the respective
1645 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
1648 mutex_lock(&kvm->lock);
1649 ret = vgic_init(kvm);
1650 mutex_unlock(&kvm->lock);
1657 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1658 * @kvm: The VM structure pointer
1659 * @cpuid: The CPU for PPIs
1660 * @irq_num: The IRQ number that is assigned to the device. This IRQ
1661 * must not be mapped to a HW interrupt.
1662 * @level: Edge-triggered: true: to trigger the interrupt
1663 * false: to ignore the call
1664 * Level-sensitive true: raise the input signal
1665 * false: lower the input signal
1667 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1668 * level-sensitive interrupts. You can think of the level parameter as 1
1669 * being HIGH and 0 being LOW and all devices being active-HIGH.
1671 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1674 struct irq_phys_map *map;
1677 ret = vgic_lazy_init(kvm);
1681 map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
1685 return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
1689 * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
1690 * @kvm: The VM structure pointer
1691 * @cpuid: The CPU for PPIs
1692 * @map: Pointer to a irq_phys_map structure describing the mapping
1693 * @level: Edge-triggered: true: to trigger the interrupt
1694 * false: to ignore the call
1695 * Level-sensitive true: raise the input signal
1696 * false: lower the input signal
1698 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1699 * level-sensitive interrupts. You can think of the level parameter as 1
1700 * being HIGH and 0 being LOW and all devices being active-HIGH.
1702 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
1703 struct irq_phys_map *map, bool level)
1707 ret = vgic_lazy_init(kvm);
1711 return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
1714 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1717 * We cannot rely on the vgic maintenance interrupt to be
1718 * delivered synchronously. This means we can only use it to
1719 * exit the VM, and we perform the handling of EOIed
1720 * interrupts on the exit path (see vgic_process_maintenance).
1725 static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
1728 if (virt_irq < VGIC_NR_PRIVATE_IRQS)
1729 return &vcpu->arch.vgic_cpu.irq_phys_map_list;
1731 return &vcpu->kvm->arch.vgic.irq_phys_map_list;
1735 * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
1736 * @vcpu: The VCPU pointer
1737 * @virt_irq: The virtual irq number
1738 * @irq: The Linux IRQ number
1740 * Establish a mapping between a guest visible irq (@virt_irq) and a
1741 * Linux irq (@irq). On injection, @virt_irq will be associated with
1742 * the physical interrupt represented by @irq. This mapping can be
1743 * established multiple times as long as the parameters are the same.
1745 * Returns a valid pointer on success, and an error pointer otherwise
1747 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
1748 int virt_irq, int irq)
1750 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1751 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1752 struct irq_phys_map *map;
1753 struct irq_phys_map_entry *entry;
1754 struct irq_desc *desc;
1755 struct irq_data *data;
1758 desc = irq_to_desc(irq);
1760 kvm_err("%s: no interrupt descriptor\n", __func__);
1761 return ERR_PTR(-EINVAL);
1764 data = irq_desc_get_irq_data(desc);
1765 while (data->parent_data)
1766 data = data->parent_data;
1768 phys_irq = data->hwirq;
1770 /* Create a new mapping */
1771 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1773 return ERR_PTR(-ENOMEM);
1775 spin_lock(&dist->irq_phys_map_lock);
1777 /* Try to match an existing mapping */
1778 map = vgic_irq_map_search(vcpu, virt_irq);
1780 /* Make sure this mapping matches */
1781 if (map->phys_irq != phys_irq ||
1783 map = ERR_PTR(-EINVAL);
1785 /* Found an existing, valid mapping */
1790 map->virt_irq = virt_irq;
1791 map->phys_irq = phys_irq;
1794 list_add_tail_rcu(&entry->entry, root);
1797 spin_unlock(&dist->irq_phys_map_lock);
1798 /* If we've found a hit in the existing list, free the useless
1800 if (IS_ERR(map) || map != &entry->map)
1805 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
1808 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1809 struct irq_phys_map_entry *entry;
1810 struct irq_phys_map *map;
1814 list_for_each_entry_rcu(entry, root, entry) {
1816 if (map->virt_irq == virt_irq) {
1827 static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
1829 struct irq_phys_map_entry *entry;
1831 entry = container_of(rcu, struct irq_phys_map_entry, rcu);
1836 * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
1838 * Return the logical active state of a mapped interrupt. This doesn't
1839 * necessarily reflects the current HW state.
1841 bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
1848 * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
1850 * Set the logical active state of a mapped interrupt. This doesn't
1851 * immediately affects the HW state.
1853 void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
1856 map->active = active;
1860 * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
1861 * @vcpu: The VCPU pointer
1862 * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
1864 * Remove an existing mapping between virtual and physical interrupts.
1866 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1868 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1869 struct irq_phys_map_entry *entry;
1870 struct list_head *root;
1875 root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
1877 spin_lock(&dist->irq_phys_map_lock);
1879 list_for_each_entry(entry, root, entry) {
1880 if (&entry->map == map) {
1881 list_del_rcu(&entry->entry);
1882 call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1887 spin_unlock(&dist->irq_phys_map_lock);
1892 static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
1894 struct vgic_dist *dist = &kvm->arch.vgic;
1895 struct irq_phys_map_entry *entry;
1897 spin_lock(&dist->irq_phys_map_lock);
1899 list_for_each_entry(entry, root, entry) {
1900 list_del_rcu(&entry->entry);
1901 call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1904 spin_unlock(&dist->irq_phys_map_lock);
1907 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1909 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1911 kfree(vgic_cpu->pending_shared);
1912 kfree(vgic_cpu->active_shared);
1913 kfree(vgic_cpu->pend_act_shared);
1914 kfree(vgic_cpu->vgic_irq_lr_map);
1915 vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
1916 vgic_cpu->pending_shared = NULL;
1917 vgic_cpu->active_shared = NULL;
1918 vgic_cpu->pend_act_shared = NULL;
1919 vgic_cpu->vgic_irq_lr_map = NULL;
1922 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1924 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1926 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1927 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1928 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1929 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1930 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1932 if (!vgic_cpu->pending_shared
1933 || !vgic_cpu->active_shared
1934 || !vgic_cpu->pend_act_shared
1935 || !vgic_cpu->vgic_irq_lr_map) {
1936 kvm_vgic_vcpu_destroy(vcpu);
1940 memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
1943 * Store the number of LRs per vcpu, so we don't have to go
1944 * all the way to the distributor structure to find out. Only
1945 * assembly code should use this one.
1947 vgic_cpu->nr_lr = vgic->nr_lr;
1953 * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
1955 * No memory allocation should be performed here, only static init.
1957 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
1959 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1960 INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
1964 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1966 * The host's GIC naturally limits the maximum amount of VCPUs a guest
1969 int kvm_vgic_get_max_vcpus(void)
1971 return vgic->max_gic_vcpus;
1974 void kvm_vgic_destroy(struct kvm *kvm)
1976 struct vgic_dist *dist = &kvm->arch.vgic;
1977 struct kvm_vcpu *vcpu;
1980 kvm_for_each_vcpu(i, vcpu, kvm)
1981 kvm_vgic_vcpu_destroy(vcpu);
1983 vgic_free_bitmap(&dist->irq_enabled);
1984 vgic_free_bitmap(&dist->irq_level);
1985 vgic_free_bitmap(&dist->irq_pending);
1986 vgic_free_bitmap(&dist->irq_soft_pend);
1987 vgic_free_bitmap(&dist->irq_queued);
1988 vgic_free_bitmap(&dist->irq_cfg);
1989 vgic_free_bytemap(&dist->irq_priority);
1990 if (dist->irq_spi_target) {
1991 for (i = 0; i < dist->nr_cpus; i++)
1992 vgic_free_bitmap(&dist->irq_spi_target[i]);
1994 kfree(dist->irq_sgi_sources);
1995 kfree(dist->irq_spi_cpu);
1996 kfree(dist->irq_spi_mpidr);
1997 kfree(dist->irq_spi_target);
1998 kfree(dist->irq_pending_on_cpu);
1999 kfree(dist->irq_active_on_cpu);
2000 vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
2001 dist->irq_sgi_sources = NULL;
2002 dist->irq_spi_cpu = NULL;
2003 dist->irq_spi_target = NULL;
2004 dist->irq_pending_on_cpu = NULL;
2005 dist->irq_active_on_cpu = NULL;
2010 * Allocate and initialize the various data structures. Must be called
2011 * with kvm->lock held!
2013 int vgic_init(struct kvm *kvm)
2015 struct vgic_dist *dist = &kvm->arch.vgic;
2016 struct kvm_vcpu *vcpu;
2017 int nr_cpus, nr_irqs;
2018 int ret, i, vcpu_id;
2020 if (vgic_initialized(kvm))
2023 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
2024 if (!nr_cpus) /* No vcpus? Can't be good... */
2028 * If nobody configured the number of interrupts, use the
2032 dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
2034 nr_irqs = dist->nr_irqs;
2036 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
2037 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
2038 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
2039 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
2040 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
2041 ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
2042 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
2043 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
2048 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
2049 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
2050 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
2052 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2054 dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2056 if (!dist->irq_sgi_sources ||
2057 !dist->irq_spi_cpu ||
2058 !dist->irq_spi_target ||
2059 !dist->irq_pending_on_cpu ||
2060 !dist->irq_active_on_cpu) {
2065 for (i = 0; i < nr_cpus; i++)
2066 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
2072 ret = kvm->arch.vgic.vm_ops.init_model(kvm);
2076 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
2077 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
2079 kvm_err("VGIC: Failed to allocate vcpu memory\n");
2083 for (i = 0; i < dist->nr_irqs; i++) {
2084 if (i < VGIC_NR_PPIS)
2085 vgic_bitmap_set_irq_val(&dist->irq_enabled,
2086 vcpu->vcpu_id, i, 1);
2087 if (i < VGIC_NR_PRIVATE_IRQS)
2088 vgic_bitmap_set_irq_val(&dist->irq_cfg,
2098 kvm_vgic_destroy(kvm);
2103 static int init_vgic_model(struct kvm *kvm, int type)
2106 case KVM_DEV_TYPE_ARM_VGIC_V2:
2107 vgic_v2_init_emulation(kvm);
2109 #ifdef CONFIG_ARM_GIC_V3
2110 case KVM_DEV_TYPE_ARM_VGIC_V3:
2111 vgic_v3_init_emulation(kvm);
2118 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
2125 * kvm_vgic_early_init - Earliest possible vgic initialization stage
2127 * No memory allocation should be performed here, only static init.
2129 void kvm_vgic_early_init(struct kvm *kvm)
2131 spin_lock_init(&kvm->arch.vgic.lock);
2132 spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
2133 INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
2136 int kvm_vgic_create(struct kvm *kvm, u32 type)
2138 int i, vcpu_lock_idx = -1, ret;
2139 struct kvm_vcpu *vcpu;
2141 mutex_lock(&kvm->lock);
2143 if (irqchip_in_kernel(kvm)) {
2149 * This function is also called by the KVM_CREATE_IRQCHIP handler,
2150 * which had no chance yet to check the availability of the GICv2
2151 * emulation. So check this here again. KVM_CREATE_DEVICE does
2152 * the proper checks already.
2154 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
2160 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2161 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
2162 * that no other VCPUs are run while we create the vgic.
2165 kvm_for_each_vcpu(i, vcpu, kvm) {
2166 if (!mutex_trylock(&vcpu->mutex))
2171 kvm_for_each_vcpu(i, vcpu, kvm) {
2172 if (vcpu->arch.has_run_once)
2177 ret = init_vgic_model(kvm, type);
2181 kvm->arch.vgic.in_kernel = true;
2182 kvm->arch.vgic.vgic_model = type;
2183 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
2184 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
2185 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
2186 kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
2189 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2190 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2191 mutex_unlock(&vcpu->mutex);
2195 mutex_unlock(&kvm->lock);
2199 static int vgic_ioaddr_overlap(struct kvm *kvm)
2201 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
2202 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
2204 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
2206 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
2207 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
2212 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
2213 phys_addr_t addr, phys_addr_t size)
2217 if (addr & ~KVM_PHYS_MASK)
2220 if (addr & (SZ_4K - 1))
2223 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2225 if (addr + size < addr)
2229 ret = vgic_ioaddr_overlap(kvm);
2231 *ioaddr = VGIC_ADDR_UNDEF;
2237 * kvm_vgic_addr - set or get vgic VM base addresses
2238 * @kvm: pointer to the vm struct
2239 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
2240 * @addr: pointer to address value
2241 * @write: if true set the address in the VM address space, if false read the
2244 * Set or get the vgic base addresses for the distributor and the virtual CPU
2245 * interface in the VM physical address space. These addresses are properties
2246 * of the emulated core/SoC and therefore user space initially knows this
2249 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2252 struct vgic_dist *vgic = &kvm->arch.vgic;
2254 phys_addr_t *addr_ptr, block_size;
2255 phys_addr_t alignment;
2257 mutex_lock(&kvm->lock);
2259 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2260 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2261 addr_ptr = &vgic->vgic_dist_base;
2262 block_size = KVM_VGIC_V2_DIST_SIZE;
2265 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2266 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2267 addr_ptr = &vgic->vgic_cpu_base;
2268 block_size = KVM_VGIC_V2_CPU_SIZE;
2271 #ifdef CONFIG_ARM_GIC_V3
2272 case KVM_VGIC_V3_ADDR_TYPE_DIST:
2273 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2274 addr_ptr = &vgic->vgic_dist_base;
2275 block_size = KVM_VGIC_V3_DIST_SIZE;
2278 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
2279 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2280 addr_ptr = &vgic->vgic_redist_base;
2281 block_size = KVM_VGIC_V3_REDIST_SIZE;
2290 if (vgic->vgic_model != type_needed) {
2296 if (!IS_ALIGNED(*addr, alignment))
2299 r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
2306 mutex_unlock(&kvm->lock);
2310 int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2314 switch (attr->group) {
2315 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2316 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2318 unsigned long type = (unsigned long)attr->attr;
2320 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2323 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2324 return (r == -ENODEV) ? -ENXIO : r;
2326 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2327 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2331 if (get_user(val, uaddr))
2336 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2337 * - at most 1024 interrupts
2338 * - a multiple of 32 interrupts
2340 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2341 val > VGIC_MAX_IRQS ||
2345 mutex_lock(&dev->kvm->lock);
2347 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2350 dev->kvm->arch.vgic.nr_irqs = val;
2352 mutex_unlock(&dev->kvm->lock);
2356 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2357 switch (attr->attr) {
2358 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2359 r = vgic_init(dev->kvm);
2369 int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2373 switch (attr->group) {
2374 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2375 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2377 unsigned long type = (unsigned long)attr->attr;
2379 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2381 return (r == -ENODEV) ? -ENXIO : r;
2383 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2387 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2388 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2390 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2399 int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
2401 if (vgic_find_range(ranges, 4, offset))
2407 static void vgic_init_maintenance_interrupt(void *info)
2409 enable_percpu_irq(vgic->maint_irq, 0);
2412 static int vgic_cpu_notify(struct notifier_block *self,
2413 unsigned long action, void *cpu)
2417 case CPU_STARTING_FROZEN:
2418 vgic_init_maintenance_interrupt(NULL);
2421 case CPU_DYING_FROZEN:
2422 disable_percpu_irq(vgic->maint_irq);
2429 static struct notifier_block vgic_cpu_nb = {
2430 .notifier_call = vgic_cpu_notify,
2433 static const struct of_device_id vgic_ids[] = {
2434 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2435 { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
2436 { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
2437 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2441 int kvm_vgic_hyp_init(void)
2443 const struct of_device_id *matched_id;
2444 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2445 const struct vgic_params **);
2446 struct device_node *vgic_node;
2449 vgic_node = of_find_matching_node_and_match(NULL,
2450 vgic_ids, &matched_id);
2452 kvm_err("error: no compatible GIC node found\n");
2456 vgic_probe = matched_id->data;
2457 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2461 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2462 "vgic", kvm_get_running_vcpus());
2464 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2468 ret = __register_cpu_notifier(&vgic_cpu_nb);
2470 kvm_err("Cannot register vgic CPU notifier\n");
2474 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2479 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
2483 int kvm_irq_map_gsi(struct kvm *kvm,
2484 struct kvm_kernel_irq_routing_entry *entries,
2490 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2495 int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2496 u32 irq, int level, bool line_status)
2498 unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2500 trace_kvm_set_irq(irq, level, irq_source_id);
2502 BUG_ON(!vgic_initialized(kvm));
2504 return kvm_vgic_inject_irq(kvm, 0, spi, level);
2507 /* MSI not implemented yet */
2508 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2509 struct kvm *kvm, int irq_source_id,
2510 int level, bool line_status)