2 * VGICv2 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
21 #include "vgic-mmio.h"
23 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
24 gpa_t addr, unsigned int len)
28 switch (addr & 0x0c) {
30 value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
33 value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
34 value = (value >> 5) - 1;
35 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
38 value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
47 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
48 gpa_t addr, unsigned int len,
51 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
52 bool was_enabled = dist->enabled;
54 switch (addr & 0x0c) {
56 dist->enabled = val & GICD_ENABLE;
57 if (!was_enabled && dist->enabled)
58 vgic_kick_vcpus(vcpu->kvm);
67 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
68 gpa_t addr, unsigned int len,
71 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
72 int intid = val & 0xf;
73 int targets = (val >> 16) & 0xff;
74 int mode = (val >> 24) & 0x03;
76 struct kvm_vcpu *vcpu;
79 case 0x0: /* as specified by targets */
82 targets = (1U << nr_vcpus) - 1; /* all, ... */
83 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
85 case 0x2: /* this very vCPU only */
86 targets = (1U << source_vcpu->vcpu_id);
88 case 0x3: /* reserved */
92 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
95 if (!(targets & (1U << c)))
98 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
100 spin_lock(&irq->irq_lock);
101 irq->pending_latch = true;
102 irq->source |= 1U << source_vcpu->vcpu_id;
104 vgic_queue_irq_unlock(source_vcpu->kvm, irq);
105 vgic_put_irq(source_vcpu->kvm, irq);
109 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
110 gpa_t addr, unsigned int len)
112 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
116 for (i = 0; i < len; i++) {
117 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
119 val |= (u64)irq->targets << (i * 8);
121 vgic_put_irq(vcpu->kvm, irq);
127 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
128 gpa_t addr, unsigned int len,
131 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
132 u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
135 /* GICD_ITARGETSR[0-7] are read-only */
136 if (intid < VGIC_NR_PRIVATE_IRQS)
139 for (i = 0; i < len; i++) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
143 spin_lock(&irq->irq_lock);
145 irq->targets = (val >> (i * 8)) & cpu_mask;
146 target = irq->targets ? __ffs(irq->targets) : 0;
147 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
149 spin_unlock(&irq->irq_lock);
150 vgic_put_irq(vcpu->kvm, irq);
154 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
155 gpa_t addr, unsigned int len)
157 u32 intid = addr & 0x0f;
161 for (i = 0; i < len; i++) {
162 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
164 val |= (u64)irq->source << (i * 8);
166 vgic_put_irq(vcpu->kvm, irq);
171 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
172 gpa_t addr, unsigned int len,
175 u32 intid = addr & 0x0f;
178 for (i = 0; i < len; i++) {
179 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
181 spin_lock(&irq->irq_lock);
183 irq->source &= ~((val >> (i * 8)) & 0xff);
185 irq->pending_latch = false;
187 spin_unlock(&irq->irq_lock);
188 vgic_put_irq(vcpu->kvm, irq);
192 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
193 gpa_t addr, unsigned int len,
196 u32 intid = addr & 0x0f;
199 for (i = 0; i < len; i++) {
200 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
202 spin_lock(&irq->irq_lock);
204 irq->source |= (val >> (i * 8)) & 0xff;
207 irq->pending_latch = true;
208 vgic_queue_irq_unlock(vcpu->kvm, irq);
210 spin_unlock(&irq->irq_lock);
212 vgic_put_irq(vcpu->kvm, irq);
216 #define GICC_ARCH_VERSION_V2 0x2
218 /* These are for userland accesses only, there is no guest-facing emulation. */
219 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
220 gpa_t addr, unsigned int len)
222 struct vgic_vmcr vmcr;
225 vgic_get_vmcr(vcpu, &vmcr);
227 switch (addr & 0xff) {
229 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
230 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
231 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
232 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
233 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
234 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
237 case GIC_CPU_PRIMASK:
239 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
240 * the PMR field as GICH_VMCR.VMPriMask rather than
241 * GICC_PMR.Priority, so we expose the upper five bits of
242 * priority mask to userspace using the lower bits in the
245 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
246 GICV_PMR_PRIORITY_SHIFT;
248 case GIC_CPU_BINPOINT:
251 case GIC_CPU_ALIAS_BINPOINT:
255 val = ((PRODUCT_ID_KVM << 20) |
256 (GICC_ARCH_VERSION_V2 << 16) |
266 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
267 gpa_t addr, unsigned int len,
270 struct vgic_vmcr vmcr;
272 vgic_get_vmcr(vcpu, &vmcr);
274 switch (addr & 0xff) {
276 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
277 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
278 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
279 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
280 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
281 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
284 case GIC_CPU_PRIMASK:
286 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
287 * the PMR field as GICH_VMCR.VMPriMask rather than
288 * GICC_PMR.Priority, so we expose the upper five bits of
289 * priority mask to userspace using the lower bits in the
292 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
293 GICV_PMR_PRIORITY_MASK;
295 case GIC_CPU_BINPOINT:
298 case GIC_CPU_ALIAS_BINPOINT:
303 vgic_set_vmcr(vcpu, &vmcr);
306 static const struct vgic_register_region vgic_v2_dist_registers[] = {
307 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
308 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
310 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
311 vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
313 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
314 vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
316 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
317 vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
319 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
320 vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
322 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
323 vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
325 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
326 vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
328 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
329 vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
331 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
332 vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
333 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
334 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
335 vgic_mmio_read_target, vgic_mmio_write_target, 8,
336 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
337 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
338 vgic_mmio_read_config, vgic_mmio_write_config, 2,
340 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
341 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
343 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
344 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
345 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
346 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
347 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
348 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
351 static const struct vgic_register_region vgic_v2_cpu_registers[] = {
352 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
353 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
355 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
356 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
358 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
359 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
361 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
362 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
364 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
365 vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
367 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
368 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
372 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
374 dev->regions = vgic_v2_dist_registers;
375 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
377 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
382 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
384 const struct vgic_register_region *region;
385 struct vgic_io_device iodev;
386 struct vgic_reg_attr reg_attr;
387 struct kvm_vcpu *vcpu;
391 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
395 vcpu = reg_attr.vcpu;
396 addr = reg_attr.addr;
398 switch (attr->group) {
399 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
400 iodev.regions = vgic_v2_dist_registers;
401 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
404 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
405 iodev.regions = vgic_v2_cpu_registers;
406 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
413 /* We only support aligned 32-bit accesses. */
417 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
424 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
425 int offset, u32 *val)
427 struct vgic_io_device dev = {
428 .regions = vgic_v2_cpu_registers,
429 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
430 .iodev_type = IODEV_CPUIF,
433 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
436 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
437 int offset, u32 *val)
439 struct vgic_io_device dev = {
440 .regions = vgic_v2_dist_registers,
441 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
442 .iodev_type = IODEV_DIST,
445 return vgic_uaccess(vcpu, &dev, is_write, offset, val);