4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
27 #include <linux/irqchip/arm-gic-v3.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
34 #include "vgic-mmio.h"
36 static int vgic_its_save_tables_v0(struct vgic_its *its);
37 static int vgic_its_restore_tables_v0(struct vgic_its *its);
38 static int vgic_its_commit_v0(struct vgic_its *its);
39 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
40 struct kvm_vcpu *filter_vcpu);
43 * Creates a new (reference to a) struct vgic_irq for a given LPI.
44 * If this LPI is already mapped on another ITS, we increase its refcount
45 * and return a pointer to the existing structure.
46 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
47 * This function returns a pointer to the _unlocked_ structure.
49 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
50 struct kvm_vcpu *vcpu)
52 struct vgic_dist *dist = &kvm->arch.vgic;
53 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
56 /* In this case there is no put, since we keep the reference. */
60 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
62 return ERR_PTR(-ENOMEM);
64 INIT_LIST_HEAD(&irq->lpi_list);
65 INIT_LIST_HEAD(&irq->ap_list);
66 spin_lock_init(&irq->irq_lock);
68 irq->config = VGIC_CONFIG_EDGE;
69 kref_init(&irq->refcount);
71 irq->target_vcpu = vcpu;
73 spin_lock(&dist->lpi_list_lock);
76 * There could be a race with another vgic_add_lpi(), so we need to
77 * check that we don't add a second list entry with the same LPI.
79 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
80 if (oldirq->intid != intid)
83 /* Someone was faster with adding this LPI, lets use that. */
88 * This increases the refcount, the caller is expected to
89 * call vgic_put_irq() on the returned pointer once it's
90 * finished with the IRQ.
92 vgic_get_irq_kref(irq);
97 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
98 dist->lpi_list_count++;
101 spin_unlock(&dist->lpi_list_lock);
104 * We "cache" the configuration table entries in our struct vgic_irq's.
105 * However we only have those structs for mapped IRQs, so we read in
106 * the respective config data from memory here upon mapping the LPI.
108 ret = update_lpi_config(kvm, irq, NULL);
112 ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
120 struct list_head dev_list;
122 /* the head for the list of ITTEs */
123 struct list_head itt_head;
124 u32 num_eventid_bits;
129 #define COLLECTION_NOT_MAPPED ((u32)~0)
131 struct its_collection {
132 struct list_head coll_list;
138 #define its_is_collection_mapped(coll) ((coll) && \
139 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
142 struct list_head ite_list;
144 struct vgic_irq *irq;
145 struct its_collection *collection;
151 * struct vgic_its_abi - ITS abi ops and settings
152 * @cte_esz: collection table entry size
153 * @dte_esz: device table entry size
154 * @ite_esz: interrupt translation table entry size
155 * @save tables: save the ITS tables into guest RAM
156 * @restore_tables: restore the ITS internal structs from tables
157 * stored in guest RAM
158 * @commit: initialize the registers which expose the ABI settings,
159 * especially the entry sizes
161 struct vgic_its_abi {
165 int (*save_tables)(struct vgic_its *its);
166 int (*restore_tables)(struct vgic_its *its);
167 int (*commit)(struct vgic_its *its);
170 static const struct vgic_its_abi its_table_abi_versions[] = {
171 [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8,
172 .save_tables = vgic_its_save_tables_v0,
173 .restore_tables = vgic_its_restore_tables_v0,
174 .commit = vgic_its_commit_v0,
178 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
180 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
182 return &its_table_abi_versions[its->abi_rev];
185 int vgic_its_set_abi(struct vgic_its *its, int rev)
187 const struct vgic_its_abi *abi;
190 abi = vgic_its_get_abi(its);
191 return abi->commit(its);
195 * Find and returns a device in the device table for an ITS.
196 * Must be called with the its_lock mutex held.
198 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
200 struct its_device *device;
202 list_for_each_entry(device, &its->device_list, dev_list)
203 if (device_id == device->device_id)
210 * Find and returns an interrupt translation table entry (ITTE) for a given
211 * Device ID/Event ID pair on an ITS.
212 * Must be called with the its_lock mutex held.
214 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
217 struct its_device *device;
220 device = find_its_device(its, device_id);
224 list_for_each_entry(ite, &device->itt_head, ite_list)
225 if (ite->event_id == event_id)
231 /* To be used as an iterator this macro misses the enclosing parentheses */
232 #define for_each_lpi_its(dev, ite, its) \
233 list_for_each_entry(dev, &(its)->device_list, dev_list) \
234 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
237 * We only implement 48 bits of PA at the moment, although the ITS
238 * supports more. Let's be restrictive here.
240 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
241 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
243 #define GIC_LPI_OFFSET 8192
245 #define VITS_TYPER_IDBITS 16
246 #define VITS_TYPER_DEVBITS 16
247 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
248 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
251 * Finds and returns a collection in the ITS collection table.
252 * Must be called with the its_lock mutex held.
254 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
256 struct its_collection *collection;
258 list_for_each_entry(collection, &its->collection_list, coll_list) {
259 if (coll_id == collection->collection_id)
266 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
267 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
270 * Reads the configuration data for a given LPI from guest memory and
271 * updates the fields in struct vgic_irq.
272 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
273 * VCPU. Unconditionally applies if filter_vcpu is NULL.
275 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
276 struct kvm_vcpu *filter_vcpu)
278 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
282 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
288 spin_lock(&irq->irq_lock);
290 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
291 irq->priority = LPI_PROP_PRIORITY(prop);
292 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
294 vgic_queue_irq_unlock(kvm, irq);
296 spin_unlock(&irq->irq_lock);
303 * Create a snapshot of the current LPI list, so that we can enumerate all
304 * LPIs without holding any lock.
305 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
307 static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
309 struct vgic_dist *dist = &kvm->arch.vgic;
310 struct vgic_irq *irq;
312 int irq_count = dist->lpi_list_count, i = 0;
315 * We use the current value of the list length, which may change
316 * after the kmalloc. We don't care, because the guest shouldn't
317 * change anything while the command handling is still running,
318 * and in the worst case we would miss a new IRQ, which one wouldn't
319 * expect to be covered by this command anyway.
321 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
325 spin_lock(&dist->lpi_list_lock);
326 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
327 /* We don't need to "get" the IRQ, as we hold the list lock. */
328 intids[i] = irq->intid;
329 if (++i == irq_count)
332 spin_unlock(&dist->lpi_list_lock);
339 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
340 * is targeting) to the VGIC's view, which deals with target VCPUs.
341 * Needs to be called whenever either the collection for a LPIs has
342 * changed or the collection itself got retargeted.
344 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
346 struct kvm_vcpu *vcpu;
348 if (!its_is_collection_mapped(ite->collection))
351 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
353 spin_lock(&ite->irq->irq_lock);
354 ite->irq->target_vcpu = vcpu;
355 spin_unlock(&ite->irq->irq_lock);
359 * Updates the target VCPU for every LPI targeting this collection.
360 * Must be called with the its_lock mutex held.
362 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
363 struct its_collection *coll)
365 struct its_device *device;
368 for_each_lpi_its(device, ite, its) {
369 if (!ite->collection || coll != ite->collection)
372 update_affinity_ite(kvm, ite);
376 static u32 max_lpis_propbaser(u64 propbaser)
378 int nr_idbits = (propbaser & 0x1f) + 1;
380 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
384 * Scan the whole LPI pending table and sync the pending bit in there
385 * with our own data structures. This relies on the LPI being
388 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
390 gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
391 struct vgic_irq *irq;
392 int last_byte_offset = -1;
397 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
401 for (i = 0; i < nr_irqs; i++) {
402 int byte_offset, bit_nr;
405 byte_offset = intids[i] / BITS_PER_BYTE;
406 bit_nr = intids[i] % BITS_PER_BYTE;
409 * For contiguously allocated LPIs chances are we just read
410 * this very same byte in the last iteration. Reuse that.
412 if (byte_offset != last_byte_offset) {
413 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
419 last_byte_offset = byte_offset;
422 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
423 spin_lock(&irq->irq_lock);
424 irq->pending_latch = pendmask & (1U << bit_nr);
425 vgic_queue_irq_unlock(vcpu->kvm, irq);
426 vgic_put_irq(vcpu->kvm, irq);
434 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
435 struct vgic_its *its,
436 gpa_t addr, unsigned int len)
438 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
439 u64 reg = GITS_TYPER_PLPIS;
442 * We use linear CPU numbers for redistributor addressing,
443 * so GITS_TYPER.PTA is 0.
444 * Also we force all PROPBASER registers to be the same, so
445 * CommonLPIAff is 0 as well.
446 * To avoid memory waste in the guest, we keep the number of IDBits and
447 * DevBits low - as least for the time being.
449 reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
450 reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
451 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
453 return extract_bytes(reg, addr & 7, len);
456 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
457 struct vgic_its *its,
458 gpa_t addr, unsigned int len)
462 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
463 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
467 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
468 struct vgic_its *its,
469 gpa_t addr, unsigned int len,
472 u32 rev = GITS_IIDR_REV(val);
474 if (rev >= NR_ITS_ABIS)
476 return vgic_its_set_abi(its, rev);
479 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
480 struct vgic_its *its,
481 gpa_t addr, unsigned int len)
483 switch (addr & 0xffff) {
485 return 0x92; /* part number, bits[7:0] */
487 return 0xb4; /* part number, bits[11:8] */
489 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
491 return 0x40; /* This is a 64K software visible page */
492 /* The following are the ID registers for (any) GIC. */
507 * Find the target VCPU and the LPI number for a given devid/eventid pair
508 * and make this IRQ pending, possibly injecting it.
509 * Must be called with the its_lock mutex held.
510 * Returns 0 on success, a positive error value for any ITS mapping
511 * related errors and negative error values for generic errors.
513 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
514 u32 devid, u32 eventid)
516 struct kvm_vcpu *vcpu;
522 ite = find_ite(its, devid, eventid);
523 if (!ite || !its_is_collection_mapped(ite->collection))
524 return E_ITS_INT_UNMAPPED_INTERRUPT;
526 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
528 return E_ITS_INT_UNMAPPED_INTERRUPT;
530 if (!vcpu->arch.vgic_cpu.lpis_enabled)
533 spin_lock(&ite->irq->irq_lock);
534 ite->irq->pending_latch = true;
535 vgic_queue_irq_unlock(kvm, ite->irq);
540 static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
542 struct vgic_io_device *iodev;
544 if (dev->ops != &kvm_io_gic_ops)
547 iodev = container_of(dev, struct vgic_io_device, dev);
549 if (iodev->iodev_type != IODEV_ITS)
556 * Queries the KVM IO bus framework to get the ITS pointer from the given
558 * We then call vgic_its_trigger_msi() with the decoded data.
559 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
561 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
564 struct kvm_io_device *kvm_io_dev;
565 struct vgic_io_device *iodev;
568 if (!vgic_has_its(kvm))
571 if (!(msi->flags & KVM_MSI_VALID_DEVID))
574 address = (u64)msi->address_hi << 32 | msi->address_lo;
576 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
580 iodev = vgic_get_its_iodev(kvm_io_dev);
584 mutex_lock(&iodev->its->its_lock);
585 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
586 mutex_unlock(&iodev->its->its_lock);
592 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
593 * if the guest has blocked the MSI. So we map any LPI mapping
594 * related error to that.
602 /* Requires the its_lock to be held. */
603 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
605 list_del(&ite->ite_list);
607 /* This put matches the get in vgic_add_lpi. */
609 vgic_put_irq(kvm, ite->irq);
614 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
616 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
619 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
620 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
621 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
622 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
623 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
624 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
625 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
626 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
627 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
630 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
631 * Must be called with the its_lock mutex held.
633 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
636 u32 device_id = its_cmd_get_deviceid(its_cmd);
637 u32 event_id = its_cmd_get_id(its_cmd);
641 ite = find_ite(its, device_id, event_id);
642 if (ite && ite->collection) {
644 * Though the spec talks about removing the pending state, we
645 * don't bother here since we clear the ITTE anyway and the
646 * pending state is a property of the ITTE struct.
648 its_free_ite(kvm, ite);
652 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
656 * The MOVI command moves an ITTE to a different collection.
657 * Must be called with the its_lock mutex held.
659 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
662 u32 device_id = its_cmd_get_deviceid(its_cmd);
663 u32 event_id = its_cmd_get_id(its_cmd);
664 u32 coll_id = its_cmd_get_collection(its_cmd);
665 struct kvm_vcpu *vcpu;
667 struct its_collection *collection;
669 ite = find_ite(its, device_id, event_id);
671 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
673 if (!its_is_collection_mapped(ite->collection))
674 return E_ITS_MOVI_UNMAPPED_COLLECTION;
676 collection = find_collection(its, coll_id);
677 if (!its_is_collection_mapped(collection))
678 return E_ITS_MOVI_UNMAPPED_COLLECTION;
680 ite->collection = collection;
681 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
683 spin_lock(&ite->irq->irq_lock);
684 ite->irq->target_vcpu = vcpu;
685 spin_unlock(&ite->irq->irq_lock);
691 * Check whether an ID can be stored into the corresponding guest table.
692 * For a direct table this is pretty easy, but gets a bit nasty for
693 * indirect tables. We check whether the resulting guest physical address
694 * is actually valid (covered by a memslot and guest accessible).
695 * For this we have to read the respective first level entry.
697 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
700 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
701 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
702 int esz = GITS_BASER_ENTRY_SIZE(baser);
707 case GITS_BASER_TYPE_DEVICE:
708 if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
711 case GITS_BASER_TYPE_COLLECTION:
712 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
713 if (id >= BIT_ULL(16))
720 if (!(baser & GITS_BASER_INDIRECT)) {
723 if (id >= (l1_tbl_size / esz))
726 addr = BASER_ADDRESS(baser) + id * esz;
727 gfn = addr >> PAGE_SHIFT;
731 return kvm_is_visible_gfn(its->dev->kvm, gfn);
734 /* calculate and check the index into the 1st level */
735 index = id / (SZ_64K / esz);
736 if (index >= (l1_tbl_size / sizeof(u64)))
739 /* Each 1st level entry is represented by a 64-bit value. */
740 if (kvm_read_guest(its->dev->kvm,
741 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
742 &indirect_ptr, sizeof(indirect_ptr)))
745 indirect_ptr = le64_to_cpu(indirect_ptr);
747 /* check the valid bit of the first level entry */
748 if (!(indirect_ptr & BIT_ULL(63)))
752 * Mask the guest physical address and calculate the frame number.
753 * Any address beyond our supported 48 bits of PA will be caught
754 * by the actual check in the final step.
756 indirect_ptr &= GENMASK_ULL(51, 16);
758 /* Find the address of the actual entry */
759 index = id % (SZ_64K / esz);
760 indirect_ptr += index * esz;
761 gfn = indirect_ptr >> PAGE_SHIFT;
764 *eaddr = indirect_ptr;
765 return kvm_is_visible_gfn(its->dev->kvm, gfn);
768 static int vgic_its_alloc_collection(struct vgic_its *its,
769 struct its_collection **colp,
772 struct its_collection *collection;
774 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
775 return E_ITS_MAPC_COLLECTION_OOR;
777 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
779 collection->collection_id = coll_id;
780 collection->target_addr = COLLECTION_NOT_MAPPED;
782 list_add_tail(&collection->coll_list, &its->collection_list);
788 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
790 struct its_collection *collection;
791 struct its_device *device;
795 * Clearing the mapping for that collection ID removes the
796 * entry from the list. If there wasn't any before, we can
799 collection = find_collection(its, coll_id);
803 for_each_lpi_its(device, ite, its)
804 if (ite->collection &&
805 ite->collection->collection_id == coll_id)
806 ite->collection = NULL;
808 list_del(&collection->coll_list);
812 /* Must be called with its_lock mutex held */
813 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
814 struct its_collection *collection,
815 u32 lpi_id, u32 event_id)
819 ite = kzalloc(sizeof(*ite), GFP_KERNEL);
821 return ERR_PTR(-ENOMEM);
823 ite->event_id = event_id;
824 ite->collection = collection;
827 list_add_tail(&ite->ite_list, &device->itt_head);
832 * The MAPTI and MAPI commands map LPIs to ITTEs.
833 * Must be called with its_lock mutex held.
835 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
838 u32 device_id = its_cmd_get_deviceid(its_cmd);
839 u32 event_id = its_cmd_get_id(its_cmd);
840 u32 coll_id = its_cmd_get_collection(its_cmd);
842 struct kvm_vcpu *vcpu = NULL;
843 struct its_device *device;
844 struct its_collection *collection, *new_coll = NULL;
845 struct vgic_irq *irq;
848 device = find_its_device(its, device_id);
850 return E_ITS_MAPTI_UNMAPPED_DEVICE;
852 if (event_id >= BIT_ULL(device->num_eventid_bits))
853 return E_ITS_MAPTI_ID_OOR;
855 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
856 lpi_nr = its_cmd_get_physical_id(its_cmd);
859 if (lpi_nr < GIC_LPI_OFFSET ||
860 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
861 return E_ITS_MAPTI_PHYSICALID_OOR;
863 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
864 if (find_ite(its, device_id, event_id))
867 collection = find_collection(its, coll_id);
869 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
872 new_coll = collection;
875 ite = vgic_its_alloc_ite(device, collection, lpi_nr, event_id);
878 vgic_its_free_collection(its, coll_id);
882 if (its_is_collection_mapped(collection))
883 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
885 irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
888 vgic_its_free_collection(its, coll_id);
889 its_free_ite(kvm, ite);
897 /* Requires the its_lock to be held. */
898 static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
900 struct its_ite *ite, *temp;
903 * The spec says that unmapping a device with still valid
904 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
905 * since we cannot leave the memory unreferenced.
907 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
908 its_free_ite(kvm, ite);
910 list_del(&device->dev_list);
914 /* Must be called with its_lock mutex held */
915 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
916 u32 device_id, gpa_t itt_addr,
919 struct its_device *device;
921 device = kzalloc(sizeof(*device), GFP_KERNEL);
923 return ERR_PTR(-ENOMEM);
925 device->device_id = device_id;
926 device->itt_addr = itt_addr;
927 device->num_eventid_bits = num_eventid_bits;
928 INIT_LIST_HEAD(&device->itt_head);
930 list_add_tail(&device->dev_list, &its->device_list);
935 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
936 * Must be called with the its_lock mutex held.
938 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
941 u32 device_id = its_cmd_get_deviceid(its_cmd);
942 bool valid = its_cmd_get_validbit(its_cmd);
943 u8 num_eventid_bits = its_cmd_get_size(its_cmd);
944 gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
945 struct its_device *device;
947 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
948 return E_ITS_MAPD_DEVICE_OOR;
950 if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
951 return E_ITS_MAPD_ITTSIZE_OOR;
953 device = find_its_device(its, device_id);
956 * The spec says that calling MAPD on an already mapped device
957 * invalidates all cached data for this device. We implement this
958 * by removing the mapping and re-establishing it.
961 vgic_its_unmap_device(kvm, device);
964 * The spec does not say whether unmapping a not-mapped device
965 * is an error, so we are done in any case.
970 device = vgic_its_alloc_device(its, device_id, itt_addr,
973 return PTR_ERR(device);
979 * The MAPC command maps collection IDs to redistributors.
980 * Must be called with the its_lock mutex held.
982 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
987 struct its_collection *collection;
990 valid = its_cmd_get_validbit(its_cmd);
991 coll_id = its_cmd_get_collection(its_cmd);
992 target_addr = its_cmd_get_target_addr(its_cmd);
994 if (target_addr >= atomic_read(&kvm->online_vcpus))
995 return E_ITS_MAPC_PROCNUM_OOR;
998 vgic_its_free_collection(its, coll_id);
1000 collection = find_collection(its, coll_id);
1005 ret = vgic_its_alloc_collection(its, &collection,
1009 collection->target_addr = target_addr;
1011 collection->target_addr = target_addr;
1012 update_affinity_collection(kvm, its, collection);
1020 * The CLEAR command removes the pending state for a particular LPI.
1021 * Must be called with the its_lock mutex held.
1023 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1026 u32 device_id = its_cmd_get_deviceid(its_cmd);
1027 u32 event_id = its_cmd_get_id(its_cmd);
1028 struct its_ite *ite;
1031 ite = find_ite(its, device_id, event_id);
1033 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1035 ite->irq->pending_latch = false;
1041 * The INV command syncs the configuration bits from the memory table.
1042 * Must be called with the its_lock mutex held.
1044 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1047 u32 device_id = its_cmd_get_deviceid(its_cmd);
1048 u32 event_id = its_cmd_get_id(its_cmd);
1049 struct its_ite *ite;
1052 ite = find_ite(its, device_id, event_id);
1054 return E_ITS_INV_UNMAPPED_INTERRUPT;
1056 return update_lpi_config(kvm, ite->irq, NULL);
1060 * The INVALL command requests flushing of all IRQ data in this collection.
1061 * Find the VCPU mapped to that collection, then iterate over the VM's list
1062 * of mapped LPIs and update the configuration for each IRQ which targets
1063 * the specified vcpu. The configuration will be read from the in-memory
1064 * configuration table.
1065 * Must be called with the its_lock mutex held.
1067 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1070 u32 coll_id = its_cmd_get_collection(its_cmd);
1071 struct its_collection *collection;
1072 struct kvm_vcpu *vcpu;
1073 struct vgic_irq *irq;
1077 collection = find_collection(its, coll_id);
1078 if (!its_is_collection_mapped(collection))
1079 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1081 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1083 irq_count = vgic_copy_lpi_list(kvm, &intids);
1087 for (i = 0; i < irq_count; i++) {
1088 irq = vgic_get_irq(kvm, NULL, intids[i]);
1091 update_lpi_config(kvm, irq, vcpu);
1092 vgic_put_irq(kvm, irq);
1101 * The MOVALL command moves the pending state of all IRQs targeting one
1102 * redistributor to another. We don't hold the pending state in the VCPUs,
1103 * but in the IRQs instead, so there is really not much to do for us here.
1104 * However the spec says that no IRQ must target the old redistributor
1105 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1106 * This command affects all LPIs in the system that target that redistributor.
1108 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1111 struct vgic_dist *dist = &kvm->arch.vgic;
1112 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1113 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1114 struct kvm_vcpu *vcpu1, *vcpu2;
1115 struct vgic_irq *irq;
1117 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1118 target2_addr >= atomic_read(&kvm->online_vcpus))
1119 return E_ITS_MOVALL_PROCNUM_OOR;
1121 if (target1_addr == target2_addr)
1124 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1125 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1127 spin_lock(&dist->lpi_list_lock);
1129 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
1130 spin_lock(&irq->irq_lock);
1132 if (irq->target_vcpu == vcpu1)
1133 irq->target_vcpu = vcpu2;
1135 spin_unlock(&irq->irq_lock);
1138 spin_unlock(&dist->lpi_list_lock);
1144 * The INT command injects the LPI associated with that DevID/EvID pair.
1145 * Must be called with the its_lock mutex held.
1147 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1150 u32 msi_data = its_cmd_get_id(its_cmd);
1151 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1153 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1157 * This function is called with the its_cmd lock held, but the ITS data
1158 * structure lock dropped.
1160 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1165 mutex_lock(&its->its_lock);
1166 switch (its_cmd_get_command(its_cmd)) {
1168 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1171 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1174 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1176 case GITS_CMD_MAPTI:
1177 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1180 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1182 case GITS_CMD_DISCARD:
1183 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1185 case GITS_CMD_CLEAR:
1186 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1188 case GITS_CMD_MOVALL:
1189 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1192 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1195 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1197 case GITS_CMD_INVALL:
1198 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1201 /* we ignore this command: we are in sync all of the time */
1205 mutex_unlock(&its->its_lock);
1210 static u64 vgic_sanitise_its_baser(u64 reg)
1212 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1213 GITS_BASER_SHAREABILITY_SHIFT,
1214 vgic_sanitise_shareability);
1215 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1216 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1217 vgic_sanitise_inner_cacheability);
1218 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1219 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1220 vgic_sanitise_outer_cacheability);
1222 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1223 reg &= ~GENMASK_ULL(15, 12);
1225 /* We support only one (ITS) page size: 64K */
1226 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1231 static u64 vgic_sanitise_its_cbaser(u64 reg)
1233 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1234 GITS_CBASER_SHAREABILITY_SHIFT,
1235 vgic_sanitise_shareability);
1236 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1237 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1238 vgic_sanitise_inner_cacheability);
1239 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1240 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1241 vgic_sanitise_outer_cacheability);
1244 * Sanitise the physical address to be 64k aligned.
1245 * Also limit the physical addresses to 48 bits.
1247 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1252 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1253 struct vgic_its *its,
1254 gpa_t addr, unsigned int len)
1256 return extract_bytes(its->cbaser, addr & 7, len);
1259 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1260 gpa_t addr, unsigned int len,
1263 /* When GITS_CTLR.Enable is 1, this register is RO. */
1267 mutex_lock(&its->cmd_lock);
1268 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1269 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1272 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1273 * it to CREADR to make sure we start with an empty command buffer.
1275 its->cwriter = its->creadr;
1276 mutex_unlock(&its->cmd_lock);
1279 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1280 #define ITS_CMD_SIZE 32
1281 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1283 /* Must be called with the cmd_lock held. */
1284 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1289 /* Commands are only processed when the ITS is enabled. */
1293 cbaser = CBASER_ADDRESS(its->cbaser);
1295 while (its->cwriter != its->creadr) {
1296 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1297 cmd_buf, ITS_CMD_SIZE);
1299 * If kvm_read_guest() fails, this could be due to the guest
1300 * programming a bogus value in CBASER or something else going
1301 * wrong from which we cannot easily recover.
1302 * According to section 6.3.2 in the GICv3 spec we can just
1303 * ignore that command then.
1306 vgic_its_handle_command(kvm, its, cmd_buf);
1308 its->creadr += ITS_CMD_SIZE;
1309 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1315 * By writing to CWRITER the guest announces new commands to be processed.
1316 * To avoid any races in the first place, we take the its_cmd lock, which
1317 * protects our ring buffer variables, so that there is only one user
1318 * per ITS handling commands at a given time.
1320 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1321 gpa_t addr, unsigned int len,
1329 mutex_lock(&its->cmd_lock);
1331 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1332 reg = ITS_CMD_OFFSET(reg);
1333 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1334 mutex_unlock(&its->cmd_lock);
1339 vgic_its_process_commands(kvm, its);
1341 mutex_unlock(&its->cmd_lock);
1344 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1345 struct vgic_its *its,
1346 gpa_t addr, unsigned int len)
1348 return extract_bytes(its->cwriter, addr & 0x7, len);
1351 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1352 struct vgic_its *its,
1353 gpa_t addr, unsigned int len)
1355 return extract_bytes(its->creadr, addr & 0x7, len);
1358 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1359 struct vgic_its *its,
1360 gpa_t addr, unsigned int len,
1366 mutex_lock(&its->cmd_lock);
1373 cmd_offset = ITS_CMD_OFFSET(val);
1374 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1379 its->creadr = cmd_offset;
1381 mutex_unlock(&its->cmd_lock);
1385 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1386 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1387 struct vgic_its *its,
1388 gpa_t addr, unsigned int len)
1392 switch (BASER_INDEX(addr)) {
1394 reg = its->baser_device_table;
1397 reg = its->baser_coll_table;
1404 return extract_bytes(reg, addr & 7, len);
1407 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1408 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1409 struct vgic_its *its,
1410 gpa_t addr, unsigned int len,
1413 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1414 u64 entry_size, device_type;
1415 u64 reg, *regptr, clearbits = 0;
1417 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1421 switch (BASER_INDEX(addr)) {
1423 regptr = &its->baser_device_table;
1424 entry_size = abi->dte_esz;
1425 device_type = GITS_BASER_TYPE_DEVICE;
1428 regptr = &its->baser_coll_table;
1429 entry_size = abi->cte_esz;
1430 device_type = GITS_BASER_TYPE_COLLECTION;
1431 clearbits = GITS_BASER_INDIRECT;
1437 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1438 reg &= ~GITS_BASER_RO_MASK;
1441 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1442 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1443 reg = vgic_sanitise_its_baser(reg);
1448 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1449 struct vgic_its *its,
1450 gpa_t addr, unsigned int len)
1454 mutex_lock(&its->cmd_lock);
1455 if (its->creadr == its->cwriter)
1456 reg |= GITS_CTLR_QUIESCENT;
1458 reg |= GITS_CTLR_ENABLE;
1459 mutex_unlock(&its->cmd_lock);
1464 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1465 gpa_t addr, unsigned int len,
1468 mutex_lock(&its->cmd_lock);
1470 its->enabled = !!(val & GITS_CTLR_ENABLE);
1473 * Try to process any pending commands. This function bails out early
1474 * if the ITS is disabled or no commands have been queued.
1476 vgic_its_process_commands(kvm, its);
1478 mutex_unlock(&its->cmd_lock);
1481 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1483 .reg_offset = off, \
1485 .access_flags = acc, \
1490 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1492 .reg_offset = off, \
1494 .access_flags = acc, \
1497 .uaccess_its_write = uwr, \
1500 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1501 gpa_t addr, unsigned int len, unsigned long val)
1506 static struct vgic_register_region its_registers[] = {
1507 REGISTER_ITS_DESC(GITS_CTLR,
1508 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1510 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1511 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1512 vgic_mmio_uaccess_write_its_iidr, 4,
1514 REGISTER_ITS_DESC(GITS_TYPER,
1515 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1516 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1517 REGISTER_ITS_DESC(GITS_CBASER,
1518 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1519 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1520 REGISTER_ITS_DESC(GITS_CWRITER,
1521 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1522 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1523 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1524 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1525 vgic_mmio_uaccess_write_its_creadr, 8,
1526 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1527 REGISTER_ITS_DESC(GITS_BASER,
1528 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1529 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1530 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1531 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1535 /* This is called on setting the LPI enable bit in the redistributor. */
1536 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1538 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1539 its_sync_lpi_pending_table(vcpu);
1542 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
1544 struct vgic_io_device *iodev = &its->iodev;
1547 if (!its->initialized)
1550 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1553 iodev->regions = its_registers;
1554 iodev->nr_regions = ARRAY_SIZE(its_registers);
1555 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1557 iodev->base_addr = its->vgic_its_base;
1558 iodev->iodev_type = IODEV_ITS;
1560 mutex_lock(&kvm->slots_lock);
1561 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1562 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1563 mutex_unlock(&kvm->slots_lock);
1568 #define INITIAL_BASER_VALUE \
1569 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1570 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1571 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1572 GITS_BASER_PAGE_SIZE_64K)
1574 #define INITIAL_PROPBASER_VALUE \
1575 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1576 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1577 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1579 static int vgic_its_create(struct kvm_device *dev, u32 type)
1581 struct vgic_its *its;
1583 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1586 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1590 mutex_init(&its->its_lock);
1591 mutex_init(&its->cmd_lock);
1593 its->vgic_its_base = VGIC_ADDR_UNDEF;
1595 INIT_LIST_HEAD(&its->device_list);
1596 INIT_LIST_HEAD(&its->collection_list);
1598 dev->kvm->arch.vgic.has_its = true;
1599 its->initialized = false;
1600 its->enabled = false;
1603 its->baser_device_table = INITIAL_BASER_VALUE |
1604 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1605 its->baser_coll_table = INITIAL_BASER_VALUE |
1606 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1607 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1611 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1614 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1616 struct kvm *kvm = kvm_dev->kvm;
1617 struct vgic_its *its = kvm_dev->private;
1618 struct its_device *dev;
1619 struct its_ite *ite;
1620 struct list_head *dev_cur, *dev_temp;
1621 struct list_head *cur, *temp;
1624 * We may end up here without the lists ever having been initialized.
1625 * Check this and bail out early to avoid dereferencing a NULL pointer.
1627 if (!its->device_list.next)
1630 mutex_lock(&its->its_lock);
1631 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1632 dev = container_of(dev_cur, struct its_device, dev_list);
1633 list_for_each_safe(cur, temp, &dev->itt_head) {
1634 ite = (container_of(cur, struct its_ite, ite_list));
1635 its_free_ite(kvm, ite);
1641 list_for_each_safe(cur, temp, &its->collection_list) {
1643 kfree(container_of(cur, struct its_collection, coll_list));
1645 mutex_unlock(&its->its_lock);
1650 int vgic_its_has_attr_regs(struct kvm_device *dev,
1651 struct kvm_device_attr *attr)
1653 const struct vgic_register_region *region;
1654 gpa_t offset = attr->attr;
1657 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1662 region = vgic_find_mmio_region(its_registers,
1663 ARRAY_SIZE(its_registers),
1671 int vgic_its_attr_regs_access(struct kvm_device *dev,
1672 struct kvm_device_attr *attr,
1673 u64 *reg, bool is_write)
1675 const struct vgic_register_region *region;
1676 struct vgic_its *its;
1682 offset = attr->attr;
1685 * Although the spec supports upper/lower 32-bit accesses to
1686 * 64-bit ITS registers, the userspace ABI requires 64-bit
1687 * accesses to all 64-bit wide registers. We therefore only
1688 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1691 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1699 mutex_lock(&dev->kvm->lock);
1701 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1706 region = vgic_find_mmio_region(its_registers,
1707 ARRAY_SIZE(its_registers),
1714 if (!lock_all_vcpus(dev->kvm)) {
1719 addr = its->vgic_its_base + offset;
1721 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1724 if (region->uaccess_its_write)
1725 ret = region->uaccess_its_write(dev->kvm, its, addr,
1728 region->its_write(dev->kvm, its, addr, len, *reg);
1730 *reg = region->its_read(dev->kvm, its, addr, len);
1732 unlock_all_vcpus(dev->kvm);
1734 mutex_unlock(&dev->kvm->lock);
1738 u32 compute_next_devid_offset(struct list_head *h, struct its_device *dev)
1740 struct its_device *next;
1743 if (list_is_last(&dev->dev_list, h))
1745 next = list_next_entry(dev, dev_list);
1746 next_offset = next->device_id - dev->device_id;
1748 return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
1751 u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
1753 struct its_ite *next;
1756 if (list_is_last(&ite->ite_list, h))
1758 next = list_next_entry(ite, ite_list);
1759 next_offset = next->event_id - ite->event_id;
1761 return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
1765 * entry_fn_t - Callback called on a table entry restore path
1767 * @id: id of the entry
1768 * @entry: pointer to the entry
1769 * @opaque: pointer to an opaque data
1771 * Return: < 0 on error, 0 if last element was identified, id offset to next
1774 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
1778 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1782 * @base: base gpa of the table
1783 * @size: size of the table in bytes
1784 * @esz: entry size in bytes
1785 * @start_id: the ID of the first entry in the table
1786 * (non zero for 2d level tables)
1787 * @fn: function to apply on each entry
1789 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1790 * (the last element may not be found on second level tables)
1792 int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
1793 int start_id, entry_fn_t fn, void *opaque)
1795 void *entry = kzalloc(esz, GFP_KERNEL);
1796 struct kvm *kvm = its->dev->kvm;
1797 unsigned long len = size;
1806 ret = kvm_read_guest(kvm, gpa, entry, esz);
1810 next_offset = fn(its, id, entry, opaque);
1811 if (next_offset <= 0) {
1816 byte_offset = next_offset * esz;
1829 * vgic_its_save_device_tables - Save the device table and all ITT
1832 static int vgic_its_save_device_tables(struct vgic_its *its)
1838 * vgic_its_restore_device_tables - Restore the device table and all ITT
1839 * from guest RAM to internal data structs
1841 static int vgic_its_restore_device_tables(struct vgic_its *its)
1846 static int vgic_its_save_cte(struct vgic_its *its,
1847 struct its_collection *collection,
1852 val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
1853 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
1854 collection->collection_id);
1855 val = cpu_to_le64(val);
1856 return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
1859 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
1861 struct its_collection *collection;
1862 struct kvm *kvm = its->dev->kvm;
1863 u32 target_addr, coll_id;
1867 BUG_ON(esz > sizeof(val));
1868 ret = kvm_read_guest(kvm, gpa, &val, esz);
1871 val = le64_to_cpu(val);
1872 if (!(val & KVM_ITS_CTE_VALID_MASK))
1875 target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
1876 coll_id = val & KVM_ITS_CTE_ICID_MASK;
1878 if (target_addr >= atomic_read(&kvm->online_vcpus))
1881 collection = find_collection(its, coll_id);
1884 ret = vgic_its_alloc_collection(its, &collection, coll_id);
1887 collection->target_addr = target_addr;
1892 * vgic_its_save_collection_table - Save the collection table into
1895 static int vgic_its_save_collection_table(struct vgic_its *its)
1897 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1898 struct its_collection *collection;
1901 size_t max_size, filled = 0;
1902 int ret, cte_esz = abi->cte_esz;
1904 gpa = BASER_ADDRESS(its->baser_coll_table);
1908 max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K;
1910 list_for_each_entry(collection, &its->collection_list, coll_list) {
1911 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
1918 if (filled == max_size)
1922 * table is not fully filled, add a last dummy element
1923 * with valid bit unset
1926 BUG_ON(cte_esz > sizeof(val));
1927 ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
1932 * vgic_its_restore_collection_table - reads the collection table
1933 * in guest memory and restores the ITS internal state. Requires the
1934 * BASER registers to be restored before.
1936 static int vgic_its_restore_collection_table(struct vgic_its *its)
1938 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1939 int cte_esz = abi->cte_esz;
1940 size_t max_size, read = 0;
1944 if (!(its->baser_coll_table & GITS_BASER_VALID))
1947 gpa = BASER_ADDRESS(its->baser_coll_table);
1949 max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K;
1951 while (read < max_size) {
1952 ret = vgic_its_restore_cte(its, gpa, cte_esz);
1962 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
1963 * according to v0 ABI
1965 static int vgic_its_save_tables_v0(struct vgic_its *its)
1967 struct kvm *kvm = its->dev->kvm;
1970 mutex_lock(&kvm->lock);
1971 mutex_lock(&its->its_lock);
1973 if (!lock_all_vcpus(kvm)) {
1974 mutex_unlock(&its->its_lock);
1975 mutex_unlock(&kvm->lock);
1979 ret = vgic_its_save_device_tables(its);
1983 ret = vgic_its_save_collection_table(its);
1986 unlock_all_vcpus(kvm);
1987 mutex_unlock(&its->its_lock);
1988 mutex_unlock(&kvm->lock);
1993 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
1994 * to internal data structs according to V0 ABI
1997 static int vgic_its_restore_tables_v0(struct vgic_its *its)
1999 struct kvm *kvm = its->dev->kvm;
2002 mutex_lock(&kvm->lock);
2003 mutex_lock(&its->its_lock);
2005 if (!lock_all_vcpus(kvm)) {
2006 mutex_unlock(&its->its_lock);
2007 mutex_unlock(&kvm->lock);
2011 ret = vgic_its_restore_collection_table(its);
2015 ret = vgic_its_restore_device_tables(its);
2018 unlock_all_vcpus(kvm);
2019 mutex_unlock(&its->its_lock);
2020 mutex_unlock(&kvm->lock);
2026 * On restore path, MSI injections can happen before the
2027 * first VCPU run so let's complete the GIC init here.
2029 return kvm_vgic_map_resources(its->dev->kvm);
2032 static int vgic_its_commit_v0(struct vgic_its *its)
2034 const struct vgic_its_abi *abi;
2036 abi = vgic_its_get_abi(its);
2037 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2038 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2040 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2041 << GITS_BASER_ENTRY_SIZE_SHIFT);
2043 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2044 << GITS_BASER_ENTRY_SIZE_SHIFT);
2048 static int vgic_its_has_attr(struct kvm_device *dev,
2049 struct kvm_device_attr *attr)
2051 switch (attr->group) {
2052 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2053 switch (attr->attr) {
2054 case KVM_VGIC_ITS_ADDR_TYPE:
2058 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2059 switch (attr->attr) {
2060 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2062 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2064 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2068 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2069 return vgic_its_has_attr_regs(dev, attr);
2074 static int vgic_its_set_attr(struct kvm_device *dev,
2075 struct kvm_device_attr *attr)
2077 struct vgic_its *its = dev->private;
2080 switch (attr->group) {
2081 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2082 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2083 unsigned long type = (unsigned long)attr->attr;
2086 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2089 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2092 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2097 its->vgic_its_base = addr;
2101 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2102 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2104 switch (attr->attr) {
2105 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2106 its->initialized = true;
2109 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2110 return abi->save_tables(its);
2111 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2112 return abi->restore_tables(its);
2115 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2116 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2119 if (get_user(reg, uaddr))
2122 return vgic_its_attr_regs_access(dev, attr, ®, true);
2128 static int vgic_its_get_attr(struct kvm_device *dev,
2129 struct kvm_device_attr *attr)
2131 switch (attr->group) {
2132 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2133 struct vgic_its *its = dev->private;
2134 u64 addr = its->vgic_its_base;
2135 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2136 unsigned long type = (unsigned long)attr->attr;
2138 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2141 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2145 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2146 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2150 ret = vgic_its_attr_regs_access(dev, attr, ®, false);
2153 return put_user(reg, uaddr);
2162 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2163 .name = "kvm-arm-vgic-its",
2164 .create = vgic_its_create,
2165 .destroy = vgic_its_destroy,
2166 .set_attr = vgic_its_set_attr,
2167 .get_attr = vgic_its_get_attr,
2168 .has_attr = vgic_its_has_attr,
2171 int kvm_vgic_register_its_device(void)
2173 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2174 KVM_DEV_TYPE_ARM_VGIC_ITS);
2178 * Registers all ITSes with the kvm_io_bus framework.
2179 * To follow the existing VGIC initialization sequence, this has to be
2180 * done as late as possible, just before the first VCPU runs.
2182 int vgic_register_its_iodevs(struct kvm *kvm)
2184 struct kvm_device *dev;
2187 list_for_each_entry(dev, &kvm->devices, vm_node) {
2188 if (dev->ops != &kvm_arm_vgic_its_ops)
2191 ret = vgic_register_its_iodev(kvm, dev->private);
2195 * We don't need to care about tearing down previously
2196 * registered ITSes, as the kvm_io_bus framework removes
2197 * them for us if the VM gets destroyed.