From: Joerg Roedel Date: Tue, 21 Jul 2015 08:41:21 +0000 (+0200) Subject: iommu/vt-d: Split up iommu->domains array X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=8bf478163e69;p=linux-beck.git iommu/vt-d: Split up iommu->domains array This array is indexed by the domain-id and contains the pointers to the domains attached to this iommu. Modern systems support 65536 domain ids, so that this array has a size of 512kb, per iommu. This is a huge waste of space, as the array is usually sparsely populated. This patch makes the array two-dimensional and allocates the memory for the domain pointers on-demand. Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1c2d6126e5fd..90ab4b0d975c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -571,13 +571,32 @@ static struct kmem_cache *iommu_devinfo_cache; static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) { - return iommu->domains[did]; + struct dmar_domain **domains; + int idx = did >> 8; + + domains = iommu->domains[idx]; + if (!domains) + return NULL; + + return domains[did & 0xff]; } static void set_iommu_domain(struct intel_iommu *iommu, u16 did, struct dmar_domain *domain) { - iommu->domains[did] = domain; + struct dmar_domain **domains; + int idx = did >> 8; + + if (!iommu->domains[idx]) { + size_t size = 256 * sizeof(struct dmar_domain *); + iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); + } + + domains = iommu->domains[idx]; + if (WARN_ON(!domains)) + return; + else + domains[did & 0xff] = domain; } static inline void *alloc_pgtable_page(int node) @@ -1530,35 +1549,43 @@ static void iommu_disable_translation(struct intel_iommu *iommu) static int iommu_init_domains(struct intel_iommu *iommu) { - unsigned long ndomains; - unsigned long nlongs; + u32 ndomains, nlongs; + size_t size; ndomains = cap_ndoms(iommu->cap); - pr_debug("%s: Number of Domains supported <%ld>\n", + pr_debug("%s: Number of Domains supported <%d>\n", iommu->name, ndomains); nlongs = BITS_TO_LONGS(ndomains); spin_lock_init(&iommu->lock); - /* TBD: there might be 64K domains, - * consider other allocation for future chip - */ iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); if (!iommu->domain_ids) { pr_err("%s: Allocating domain id array failed\n", iommu->name); return -ENOMEM; } - iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), - GFP_KERNEL); - if (!iommu->domains) { + + size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **); + iommu->domains = kzalloc(size, GFP_KERNEL); + + if (iommu->domains) { + size = 256 * sizeof(struct dmar_domain *); + iommu->domains[0] = kzalloc(size, GFP_KERNEL); + } + + if (!iommu->domains || !iommu->domains[0]) { pr_err("%s: Allocating domain array failed\n", iommu->name); kfree(iommu->domain_ids); + kfree(iommu->domains); iommu->domain_ids = NULL; + iommu->domains = NULL; return -ENOMEM; } + + /* * If Caching mode is set, then invalid translations are tagged * with domain-id 0, hence we need to pre-allocate it. We also @@ -1600,6 +1627,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { if ((iommu->domains) && (iommu->domain_ids)) { + int elems = (cap_ndoms(iommu->cap) >> 8) + 1; + int i; + + for (i = 0; i < elems; i++) + kfree(iommu->domains[i]); kfree(iommu->domains); kfree(iommu->domain_ids); iommu->domains = NULL; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d9a366d24e3b..6240063bdcac 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -344,7 +344,7 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain **domains; /* ptr to domains */ + struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */