4 * Copyright (C) 2015 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 #include <linux/kvm_host.h>
17 #include <kvm/arm_vgic.h>
18 #include <linux/uaccess.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/cputype.h>
25 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
26 phys_addr_t addr, phys_addr_t alignment)
28 if (addr & ~KVM_PHYS_MASK)
31 if (!IS_ALIGNED(addr, alignment))
34 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
41 * kvm_vgic_addr - set or get vgic VM base addresses
42 * @kvm: pointer to the vm struct
43 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
44 * @addr: pointer to address value
45 * @write: if true set the address in the VM address space, if false read the
48 * Set or get the vgic base addresses for the distributor and the virtual CPU
49 * interface in the VM physical address space. These addresses are properties
50 * of the emulated core/SoC and therefore user space initially knows this
52 * Check them for sanity (alignment, double assignment). We can't check for
53 * overlapping regions in case of a virtual GICv3 here, since we don't know
54 * the number of VCPUs yet, so we defer this check to map_resources().
56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
59 struct vgic_dist *vgic = &kvm->arch.vgic;
61 phys_addr_t *addr_ptr, alignment;
63 mutex_lock(&kvm->lock);
65 case KVM_VGIC_V2_ADDR_TYPE_DIST:
66 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
67 addr_ptr = &vgic->vgic_dist_base;
70 case KVM_VGIC_V2_ADDR_TYPE_CPU:
71 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
72 addr_ptr = &vgic->vgic_cpu_base;
75 case KVM_VGIC_V3_ADDR_TYPE_DIST:
76 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
77 addr_ptr = &vgic->vgic_dist_base;
80 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
81 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
82 addr_ptr = &vgic->vgic_redist_base;
90 if (vgic->vgic_model != type_needed) {
96 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
104 mutex_unlock(&kvm->lock);
108 static int vgic_set_common_attr(struct kvm_device *dev,
109 struct kvm_device_attr *attr)
113 switch (attr->group) {
114 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
115 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
117 unsigned long type = (unsigned long)attr->attr;
119 if (copy_from_user(&addr, uaddr, sizeof(addr)))
122 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
123 return (r == -ENODEV) ? -ENXIO : r;
125 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
126 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
130 if (get_user(val, uaddr))
135 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
136 * - at most 1024 interrupts
137 * - a multiple of 32 interrupts
139 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
140 val > VGIC_MAX_RESERVED ||
144 mutex_lock(&dev->kvm->lock);
146 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
149 dev->kvm->arch.vgic.nr_spis =
150 val - VGIC_NR_PRIVATE_IRQS;
152 mutex_unlock(&dev->kvm->lock);
156 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
157 switch (attr->attr) {
158 case KVM_DEV_ARM_VGIC_CTRL_INIT:
159 mutex_lock(&dev->kvm->lock);
160 r = vgic_init(dev->kvm);
161 mutex_unlock(&dev->kvm->lock);
171 static int vgic_get_common_attr(struct kvm_device *dev,
172 struct kvm_device_attr *attr)
176 switch (attr->group) {
177 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
178 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
180 unsigned long type = (unsigned long)attr->attr;
182 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
184 return (r == -ENODEV) ? -ENXIO : r;
186 if (copy_to_user(uaddr, &addr, sizeof(addr)))
190 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
191 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
193 r = put_user(dev->kvm->arch.vgic.nr_spis +
194 VGIC_NR_PRIVATE_IRQS, uaddr);
202 static int vgic_create(struct kvm_device *dev, u32 type)
204 return kvm_vgic_create(dev->kvm, type);
207 static void vgic_destroy(struct kvm_device *dev)
212 int kvm_register_vgic_device(unsigned long type)
217 case KVM_DEV_TYPE_ARM_VGIC_V2:
218 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
219 KVM_DEV_TYPE_ARM_VGIC_V2);
221 case KVM_DEV_TYPE_ARM_VGIC_V3:
222 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
223 KVM_DEV_TYPE_ARM_VGIC_V3);
227 ret = kvm_vgic_register_its_device();
234 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
235 struct vgic_reg_attr *reg_attr)
239 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
240 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
242 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
245 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
246 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
251 /* unlocks vcpus from @vcpu_lock_idx and smaller */
252 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
254 struct kvm_vcpu *tmp_vcpu;
256 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
257 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
258 mutex_unlock(&tmp_vcpu->mutex);
262 static void unlock_all_vcpus(struct kvm *kvm)
264 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
267 /* Returns true if all vcpus were locked, false otherwise */
268 static bool lock_all_vcpus(struct kvm *kvm)
270 struct kvm_vcpu *tmp_vcpu;
274 * Any time a vcpu is run, vcpu_load is called which tries to grab the
275 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
276 * that no other VCPUs are run and fiddle with the vgic state while we
279 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
280 if (!mutex_trylock(&tmp_vcpu->mutex)) {
281 unlock_vcpus(kvm, c - 1);
290 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
292 * @dev: kvm device handle
293 * @attr: kvm device attribute
294 * @reg: address the value is read or written
295 * @is_write: true if userspace is writing a register
297 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
298 struct kvm_device_attr *attr,
299 u32 *reg, bool is_write)
301 struct vgic_reg_attr reg_attr;
303 struct kvm_vcpu *vcpu;
306 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
310 vcpu = reg_attr.vcpu;
311 addr = reg_attr.addr;
313 mutex_lock(&dev->kvm->lock);
315 ret = vgic_init(dev->kvm);
319 if (!lock_all_vcpus(dev->kvm)) {
324 switch (attr->group) {
325 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
326 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
328 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
329 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
336 unlock_all_vcpus(dev->kvm);
338 mutex_unlock(&dev->kvm->lock);
342 static int vgic_v2_set_attr(struct kvm_device *dev,
343 struct kvm_device_attr *attr)
347 ret = vgic_set_common_attr(dev, attr);
351 switch (attr->group) {
352 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
353 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
354 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
357 if (get_user(reg, uaddr))
360 return vgic_v2_attr_regs_access(dev, attr, ®, true);
367 static int vgic_v2_get_attr(struct kvm_device *dev,
368 struct kvm_device_attr *attr)
372 ret = vgic_get_common_attr(dev, attr);
376 switch (attr->group) {
377 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
378 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
379 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
382 ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
385 return put_user(reg, uaddr);
392 static int vgic_v2_has_attr(struct kvm_device *dev,
393 struct kvm_device_attr *attr)
395 switch (attr->group) {
396 case KVM_DEV_ARM_VGIC_GRP_ADDR:
397 switch (attr->attr) {
398 case KVM_VGIC_V2_ADDR_TYPE_DIST:
399 case KVM_VGIC_V2_ADDR_TYPE_CPU:
403 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
404 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
405 return vgic_v2_has_attr_regs(dev, attr);
406 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
408 case KVM_DEV_ARM_VGIC_GRP_CTRL:
409 switch (attr->attr) {
410 case KVM_DEV_ARM_VGIC_CTRL_INIT:
417 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
418 .name = "kvm-arm-vgic-v2",
419 .create = vgic_create,
420 .destroy = vgic_destroy,
421 .set_attr = vgic_v2_set_attr,
422 .get_attr = vgic_v2_get_attr,
423 .has_attr = vgic_v2_has_attr,
426 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
427 struct vgic_reg_attr *reg_attr)
429 unsigned long vgic_mpidr, mpidr_reg;
432 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
433 * attr might not hold MPIDR. Hence assume vcpu0.
435 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
436 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
437 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
439 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
440 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
442 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
448 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
454 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
456 * @dev: kvm device handle
457 * @attr: kvm device attribute
458 * @reg: address the value is read or written
459 * @is_write: true if userspace is writing a register
461 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
462 struct kvm_device_attr *attr,
463 u64 *reg, bool is_write)
465 struct vgic_reg_attr reg_attr;
467 struct kvm_vcpu *vcpu;
471 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
475 vcpu = reg_attr.vcpu;
476 addr = reg_attr.addr;
478 mutex_lock(&dev->kvm->lock);
480 if (unlikely(!vgic_initialized(dev->kvm))) {
485 if (!lock_all_vcpus(dev->kvm)) {
490 switch (attr->group) {
491 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
495 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
499 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
503 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
507 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
510 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
511 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
520 unlock_all_vcpus(dev->kvm);
522 mutex_unlock(&dev->kvm->lock);
526 static int vgic_v3_set_attr(struct kvm_device *dev,
527 struct kvm_device_attr *attr)
531 ret = vgic_set_common_attr(dev, attr);
535 switch (attr->group) {
536 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
537 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
538 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
542 if (get_user(tmp32, uaddr))
546 return vgic_v3_attr_regs_access(dev, attr, ®, true);
548 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
549 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
552 if (get_user(reg, uaddr))
555 return vgic_v3_attr_regs_access(dev, attr, ®, true);
561 static int vgic_v3_get_attr(struct kvm_device *dev,
562 struct kvm_device_attr *attr)
566 ret = vgic_get_common_attr(dev, attr);
570 switch (attr->group) {
571 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
572 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
573 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
577 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
581 return put_user(tmp32, uaddr);
583 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
584 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
587 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
590 return put_user(reg, uaddr);
597 static int vgic_v3_has_attr(struct kvm_device *dev,
598 struct kvm_device_attr *attr)
600 switch (attr->group) {
601 case KVM_DEV_ARM_VGIC_GRP_ADDR:
602 switch (attr->attr) {
603 case KVM_VGIC_V3_ADDR_TYPE_DIST:
604 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
608 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
609 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
610 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
611 return vgic_v3_has_attr_regs(dev, attr);
612 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
614 case KVM_DEV_ARM_VGIC_GRP_CTRL:
615 switch (attr->attr) {
616 case KVM_DEV_ARM_VGIC_CTRL_INIT:
623 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
624 .name = "kvm-arm-vgic-v3",
625 .create = vgic_create,
626 .destroy = vgic_destroy,
627 .set_attr = vgic_v3_set_attr,
628 .get_attr = vgic_v3_get_attr,
629 .has_attr = vgic_v3_has_attr,