struct iommu_group *grp;
};
+/*
+ * A container needs to remember which preregistered region it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+ struct list_head next;
+ struct mm_iommu_table_group_mem_t *mem;
+};
+
/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
+ struct list_head prereg_list;
};
static long tce_iommu_mm_set(struct tce_container *container)
return 0;
}
+static long tce_iommu_prereg_free(struct tce_container *container,
+ struct tce_iommu_prereg *tcemem)
+{
+ long ret;
+
+ ret = mm_iommu_put(container->mm, tcemem->mem);
+ if (ret)
+ return ret;
+
+ list_del(&tcemem->next);
+ kfree(tcemem);
+
+ return 0;
+}
+
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
+ struct tce_iommu_prereg *tcemem;
+ bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
if (!mem)
return -ENOENT;
- return mm_iommu_put(container->mm, mem);
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ return tce_iommu_prereg_free(container, tcemem);
}
static long tce_iommu_register_pages(struct tce_container *container,
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
+ struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
+ mem = mm_iommu_find(container->mm, vaddr, entries);
+ if (mem) {
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem)
+ return -EBUSY;
+ }
+ }
+
ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ tcemem->mem = mem;
+ list_add(&tcemem->next, &container->prereg_list);
+
container->enabled = true;
return 0;
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
+ INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
tce_iommu_free_table(container, tbl);
}
+ while (!list_empty(&container->prereg_list)) {
+ struct tce_iommu_prereg *tcemem;
+
+ tcemem = list_first_entry(&container->prereg_list,
+ struct tce_iommu_prereg, next);
+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
+ }
+
tce_iommu_disable(container);
if (container->mm)
mmdrop(container->mm);