2 * VGICv2 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
21 #include "vgic-mmio.h"
23 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
24 gpa_t addr, unsigned int len)
28 switch (addr & 0x0c) {
30 value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
33 value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
34 value = (value >> 5) - 1;
35 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
38 value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
47 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
48 gpa_t addr, unsigned int len,
51 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
52 bool was_enabled = dist->enabled;
54 switch (addr & 0x0c) {
56 dist->enabled = val & GICD_ENABLE;
57 if (!was_enabled && dist->enabled)
58 vgic_kick_vcpus(vcpu->kvm);
67 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
68 gpa_t addr, unsigned int len,
71 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
72 int intid = val & 0xf;
73 int targets = (val >> 16) & 0xff;
74 int mode = (val >> 24) & 0x03;
76 struct kvm_vcpu *vcpu;
79 case 0x0: /* as specified by targets */
82 targets = (1U << nr_vcpus) - 1; /* all, ... */
83 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
85 case 0x2: /* this very vCPU only */
86 targets = (1U << source_vcpu->vcpu_id);
88 case 0x3: /* reserved */
92 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
95 if (!(targets & (1U << c)))
98 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
100 spin_lock(&irq->irq_lock);
102 irq->source |= 1U << source_vcpu->vcpu_id;
104 vgic_queue_irq_unlock(source_vcpu->kvm, irq);
108 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
109 gpa_t addr, unsigned int len)
111 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
115 for (i = 0; i < len; i++) {
116 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
118 val |= (u64)irq->targets << (i * 8);
124 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
125 gpa_t addr, unsigned int len,
128 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
131 /* GICD_ITARGETSR[0-7] are read-only */
132 if (intid < VGIC_NR_PRIVATE_IRQS)
135 for (i = 0; i < len; i++) {
136 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
139 spin_lock(&irq->irq_lock);
141 irq->targets = (val >> (i * 8)) & 0xff;
142 target = irq->targets ? __ffs(irq->targets) : 0;
143 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
145 spin_unlock(&irq->irq_lock);
149 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
150 gpa_t addr, unsigned int len)
152 u32 intid = addr & 0x0f;
156 for (i = 0; i < len; i++) {
157 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
159 val |= (u64)irq->source << (i * 8);
164 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
165 gpa_t addr, unsigned int len,
168 u32 intid = addr & 0x0f;
171 for (i = 0; i < len; i++) {
172 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
174 spin_lock(&irq->irq_lock);
176 irq->source &= ~((val >> (i * 8)) & 0xff);
178 irq->pending = false;
180 spin_unlock(&irq->irq_lock);
184 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
185 gpa_t addr, unsigned int len,
188 u32 intid = addr & 0x0f;
191 for (i = 0; i < len; i++) {
192 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
194 spin_lock(&irq->irq_lock);
196 irq->source |= (val >> (i * 8)) & 0xff;
200 vgic_queue_irq_unlock(vcpu->kvm, irq);
202 spin_unlock(&irq->irq_lock);
207 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
209 if (kvm_vgic_global_state.type == VGIC_V2)
210 vgic_v2_set_vmcr(vcpu, vmcr);
212 vgic_v3_set_vmcr(vcpu, vmcr);
215 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
217 if (kvm_vgic_global_state.type == VGIC_V2)
218 vgic_v2_get_vmcr(vcpu, vmcr);
220 vgic_v3_get_vmcr(vcpu, vmcr);
223 #define GICC_ARCH_VERSION_V2 0x2
225 /* These are for userland accesses only, there is no guest-facing emulation. */
226 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
227 gpa_t addr, unsigned int len)
229 struct vgic_vmcr vmcr;
232 vgic_get_vmcr(vcpu, &vmcr);
234 switch (addr & 0xff) {
238 case GIC_CPU_PRIMASK:
241 case GIC_CPU_BINPOINT:
244 case GIC_CPU_ALIAS_BINPOINT:
248 val = ((PRODUCT_ID_KVM << 20) |
249 (GICC_ARCH_VERSION_V2 << 16) |
259 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
260 gpa_t addr, unsigned int len,
263 struct vgic_vmcr vmcr;
265 vgic_get_vmcr(vcpu, &vmcr);
267 switch (addr & 0xff) {
271 case GIC_CPU_PRIMASK:
274 case GIC_CPU_BINPOINT:
277 case GIC_CPU_ALIAS_BINPOINT:
282 vgic_set_vmcr(vcpu, &vmcr);
285 static const struct vgic_register_region vgic_v2_dist_registers[] = {
286 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
287 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
289 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
290 vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
292 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
293 vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
295 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
296 vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
298 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
299 vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
301 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
302 vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
304 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
305 vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
307 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
308 vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
310 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
311 vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
312 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
313 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
314 vgic_mmio_read_target, vgic_mmio_write_target, 8,
315 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
316 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
317 vgic_mmio_read_config, vgic_mmio_write_config, 2,
319 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
320 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
322 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
323 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
324 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
325 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
326 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
327 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
330 static const struct vgic_register_region vgic_v2_cpu_registers[] = {
331 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
332 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
334 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
335 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
337 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
338 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
340 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
341 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
343 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
344 vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
346 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
347 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
351 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
353 dev->regions = vgic_v2_dist_registers;
354 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
356 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
361 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
363 int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
364 const struct vgic_register_region *regions;
366 int nr_regions, i, len;
368 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
370 switch (attr->group) {
371 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
372 regions = vgic_v2_dist_registers;
373 nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
375 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
376 regions = vgic_v2_cpu_registers;
377 nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
383 /* We only support aligned 32-bit accesses. */
387 for (i = 0; i < nr_regions; i++) {
388 if (regions[i].bits_per_irq)
389 len = (regions[i].bits_per_irq * nr_irqs) / 8;
391 len = regions[i].len;
393 if (regions[i].reg_offset <= addr &&
394 regions[i].reg_offset + len > addr)
402 * When userland tries to access the VGIC register handlers, we need to
403 * create a usable struct vgic_io_device to be passed to the handlers and we
404 * have to set up a buffer similar to what would have happened if a guest MMIO
405 * access occurred, including doing endian conversions on BE systems.
407 static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
408 bool is_write, int offset, u32 *val)
410 unsigned int len = 4;
415 vgic_data_host_to_mmio_bus(buf, len, *val);
416 ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
418 ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
420 *val = vgic_data_mmio_bus_to_host(buf, len);
426 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
427 int offset, u32 *val)
429 struct vgic_io_device dev = {
430 .regions = vgic_v2_cpu_registers,
431 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
434 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
437 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
438 int offset, u32 *val)
440 struct vgic_io_device dev = {
441 .regions = vgic_v2_dist_registers,
442 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
445 return vgic_uaccess(vcpu, &dev, is_write, offset, val);