2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
49 #include "irq_remapping.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
57 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
59 #define IOAPIC_RANGE_START (0xfee00000)
60 #define IOAPIC_RANGE_END (0xfeefffff)
61 #define IOVA_START_ADDR (0x1000)
63 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
65 #define MAX_AGAW_WIDTH 64
66 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
68 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
69 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
71 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
72 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
73 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
74 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
75 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
77 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
78 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
79 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
81 /* page table handling */
82 #define LEVEL_STRIDE (9)
83 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
101 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
103 static inline int agaw_to_level(int agaw)
108 static inline int agaw_to_width(int agaw)
110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
113 static inline int width_to_agaw(int width)
115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
118 static inline unsigned int level_to_offset_bits(int level)
120 return (level - 1) * LEVEL_STRIDE;
123 static inline int pfn_level_offset(unsigned long pfn, int level)
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
128 static inline unsigned long level_mask(int level)
130 return -1UL << level_to_offset_bits(level);
133 static inline unsigned long level_size(int level)
135 return 1UL << level_to_offset_bits(level);
138 static inline unsigned long align_to_level(unsigned long pfn, int level)
140 return (pfn + level_size(level) - 1) & level_mask(level);
143 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
148 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
159 static inline unsigned long page_to_dma_pfn(struct page *pg)
161 return mm_to_dma_pfn(page_to_pfn(pg));
163 static inline unsigned long virt_to_dma_pfn(void *p)
165 return page_to_dma_pfn(virt_to_page(p));
168 /* global iommu list, set NULL for ignored DMAR units */
169 static struct intel_iommu **g_iommus;
171 static void __init check_tylersburg_isoch(void);
172 static int rwbf_quirk;
175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
178 static int force_on = 0;
183 * 12-63: Context Ptr (12 - (haw-1))
190 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191 static inline bool root_present(struct root_entry *root)
193 return (root->val & 1);
195 static inline void set_root_present(struct root_entry *root)
199 static inline void set_root_value(struct root_entry *root, unsigned long value)
201 root->val |= value & VTD_PAGE_MASK;
204 static inline struct context_entry *
205 get_context_addr_from_root(struct root_entry *root)
207 return (struct context_entry *)
208 (root_present(root)?phys_to_virt(
209 root->val & VTD_PAGE_MASK) :
216 * 1: fault processing disable
217 * 2-3: translation type
218 * 12-63: address space root
224 struct context_entry {
229 static inline bool context_present(struct context_entry *context)
231 return (context->lo & 1);
233 static inline void context_set_present(struct context_entry *context)
238 static inline void context_set_fault_enable(struct context_entry *context)
240 context->lo &= (((u64)-1) << 2) | 1;
243 static inline void context_set_translation_type(struct context_entry *context,
246 context->lo &= (((u64)-1) << 4) | 3;
247 context->lo |= (value & 3) << 2;
250 static inline void context_set_address_root(struct context_entry *context,
253 context->lo |= value & VTD_PAGE_MASK;
256 static inline void context_set_address_width(struct context_entry *context,
259 context->hi |= value & 7;
262 static inline void context_set_domain_id(struct context_entry *context,
265 context->hi |= (value & ((1 << 16) - 1)) << 8;
268 static inline void context_clear_entry(struct context_entry *context)
281 * 12-63: Host physcial address
287 static inline void dma_clear_pte(struct dma_pte *pte)
292 static inline u64 dma_pte_addr(struct dma_pte *pte)
295 return pte->val & VTD_PAGE_MASK;
297 /* Must have a full atomic 64-bit read */
298 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
302 static inline bool dma_pte_present(struct dma_pte *pte)
304 return (pte->val & 3) != 0;
307 static inline bool dma_pte_superpage(struct dma_pte *pte)
309 return (pte->val & (1 << 7));
312 static inline int first_pte_in_page(struct dma_pte *pte)
314 return !((unsigned long)pte & ~VTD_PAGE_MASK);
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
323 static struct dmar_domain *si_domain;
324 static int hw_pass_through = 1;
326 /* devices under the same p2p bridge are owned in one domain */
327 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
329 /* domain represents a virtual machine, more than one devices
330 * across iommus may be owned in one domain, e.g. kvm guest.
332 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
334 /* si_domain contains mulitple devices */
335 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
337 /* define the limit of IOMMUs supported in each domain */
339 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
341 # define IOMMU_UNITS_SUPPORTED 64
345 int id; /* domain id */
346 int nid; /* node id */
347 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
348 /* bitmap of iommus this domain uses*/
350 struct list_head devices; /* all devices' list */
351 struct iova_domain iovad; /* iova's that belong to this domain */
353 struct dma_pte *pgd; /* virtual address */
354 int gaw; /* max guest address width */
356 /* adjusted guest address width, 0 is level 2 30-bit */
359 int flags; /* flags to find out type of domain */
361 int iommu_coherency;/* indicate coherency of iommu access */
362 int iommu_snooping; /* indicate snooping control feature*/
363 int iommu_count; /* reference count of iommu */
364 int iommu_superpage;/* Level of superpages supported:
365 0 == 4KiB (no superpages), 1 == 2MiB,
366 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
367 spinlock_t iommu_lock; /* protect iommu set in domain */
368 u64 max_addr; /* maximum mapped address */
371 /* PCI domain-device relationship */
372 struct device_domain_info {
373 struct list_head link; /* link to domain siblings */
374 struct list_head global; /* link to global list */
375 int segment; /* PCI domain */
376 u8 bus; /* PCI bus number */
377 u8 devfn; /* PCI devfn number */
378 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
379 struct intel_iommu *iommu; /* IOMMU used by this device */
380 struct dmar_domain *domain; /* pointer to domain */
383 static void flush_unmaps_timeout(unsigned long data);
385 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
387 #define HIGH_WATER_MARK 250
388 struct deferred_flush_tables {
390 struct iova *iova[HIGH_WATER_MARK];
391 struct dmar_domain *domain[HIGH_WATER_MARK];
394 static struct deferred_flush_tables *deferred_flush;
396 /* bitmap for indexing intel_iommus */
397 static int g_num_of_iommus;
399 static DEFINE_SPINLOCK(async_umap_flush_lock);
400 static LIST_HEAD(unmaps_to_do);
403 static long list_size;
405 static void domain_remove_dev_info(struct dmar_domain *domain);
407 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
408 int dmar_disabled = 0;
410 int dmar_disabled = 1;
411 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
413 int intel_iommu_enabled = 0;
414 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
416 static int dmar_map_gfx = 1;
417 static int dmar_forcedac;
418 static int intel_iommu_strict;
419 static int intel_iommu_superpage = 1;
421 int intel_iommu_gfx_mapped;
422 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
424 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
425 static DEFINE_SPINLOCK(device_domain_lock);
426 static LIST_HEAD(device_domain_list);
428 static struct iommu_ops intel_iommu_ops;
430 static int __init intel_iommu_setup(char *str)
435 if (!strncmp(str, "on", 2)) {
437 printk(KERN_INFO "Intel-IOMMU: enabled\n");
438 } else if (!strncmp(str, "off", 3)) {
440 printk(KERN_INFO "Intel-IOMMU: disabled\n");
441 } else if (!strncmp(str, "igfx_off", 8)) {
444 "Intel-IOMMU: disable GFX device mapping\n");
445 } else if (!strncmp(str, "forcedac", 8)) {
447 "Intel-IOMMU: Forcing DAC for PCI devices\n");
449 } else if (!strncmp(str, "strict", 6)) {
451 "Intel-IOMMU: disable batched IOTLB flush\n");
452 intel_iommu_strict = 1;
453 } else if (!strncmp(str, "sp_off", 6)) {
455 "Intel-IOMMU: disable supported super page\n");
456 intel_iommu_superpage = 0;
459 str += strcspn(str, ",");
465 __setup("intel_iommu=", intel_iommu_setup);
467 static struct kmem_cache *iommu_domain_cache;
468 static struct kmem_cache *iommu_devinfo_cache;
469 static struct kmem_cache *iommu_iova_cache;
471 static inline void *alloc_pgtable_page(int node)
476 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
478 vaddr = page_address(page);
482 static inline void free_pgtable_page(void *vaddr)
484 free_page((unsigned long)vaddr);
487 static inline void *alloc_domain_mem(void)
489 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
492 static void free_domain_mem(void *vaddr)
494 kmem_cache_free(iommu_domain_cache, vaddr);
497 static inline void * alloc_devinfo_mem(void)
499 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
502 static inline void free_devinfo_mem(void *vaddr)
504 kmem_cache_free(iommu_devinfo_cache, vaddr);
507 struct iova *alloc_iova_mem(void)
509 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
512 void free_iova_mem(struct iova *iova)
514 kmem_cache_free(iommu_iova_cache, iova);
518 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
523 sagaw = cap_sagaw(iommu->cap);
524 for (agaw = width_to_agaw(max_gaw);
526 if (test_bit(agaw, &sagaw))
534 * Calculate max SAGAW for each iommu.
536 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
538 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
542 * calculate agaw for each iommu.
543 * "SAGAW" may be different across iommus, use a default agaw, and
544 * get a supported less agaw for iommus that don't support the default agaw.
546 int iommu_calculate_agaw(struct intel_iommu *iommu)
548 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
551 /* This functionin only returns single iommu in a domain */
552 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
556 /* si_domain and vm domain should not get here. */
557 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
558 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
560 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
561 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
564 return g_iommus[iommu_id];
567 static void domain_update_iommu_coherency(struct dmar_domain *domain)
571 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
573 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
575 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
576 if (!ecap_coherent(g_iommus[i]->ecap)) {
577 domain->iommu_coherency = 0;
583 static void domain_update_iommu_snooping(struct dmar_domain *domain)
587 domain->iommu_snooping = 1;
589 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
590 if (!ecap_sc_support(g_iommus[i]->ecap)) {
591 domain->iommu_snooping = 0;
597 static void domain_update_iommu_superpage(struct dmar_domain *domain)
599 struct dmar_drhd_unit *drhd;
600 struct intel_iommu *iommu = NULL;
603 if (!intel_iommu_superpage) {
604 domain->iommu_superpage = 0;
608 /* set iommu_superpage to the smallest common denominator */
609 for_each_active_iommu(iommu, drhd) {
610 mask &= cap_super_page_val(iommu->cap);
615 domain->iommu_superpage = fls(mask);
618 /* Some capabilities may be different across iommus */
619 static void domain_update_iommu_cap(struct dmar_domain *domain)
621 domain_update_iommu_coherency(domain);
622 domain_update_iommu_snooping(domain);
623 domain_update_iommu_superpage(domain);
626 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
628 struct dmar_drhd_unit *drhd = NULL;
631 for_each_active_drhd_unit(drhd) {
632 if (segment != drhd->segment)
635 for (i = 0; i < drhd->devices_cnt; i++) {
636 if (drhd->devices[i] &&
637 drhd->devices[i]->bus->number == bus &&
638 drhd->devices[i]->devfn == devfn)
640 if (drhd->devices[i] &&
641 drhd->devices[i]->subordinate &&
642 drhd->devices[i]->subordinate->number <= bus &&
643 drhd->devices[i]->subordinate->busn_res.end >= bus)
647 if (drhd->include_all)
654 static void domain_flush_cache(struct dmar_domain *domain,
655 void *addr, int size)
657 if (!domain->iommu_coherency)
658 clflush_cache_range(addr, size);
661 /* Gets context entry for a given bus and devfn */
662 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
665 struct root_entry *root;
666 struct context_entry *context;
667 unsigned long phy_addr;
670 spin_lock_irqsave(&iommu->lock, flags);
671 root = &iommu->root_entry[bus];
672 context = get_context_addr_from_root(root);
674 context = (struct context_entry *)
675 alloc_pgtable_page(iommu->node);
677 spin_unlock_irqrestore(&iommu->lock, flags);
680 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
681 phy_addr = virt_to_phys((void *)context);
682 set_root_value(root, phy_addr);
683 set_root_present(root);
684 __iommu_flush_cache(iommu, root, sizeof(*root));
686 spin_unlock_irqrestore(&iommu->lock, flags);
687 return &context[devfn];
690 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
692 struct root_entry *root;
693 struct context_entry *context;
697 spin_lock_irqsave(&iommu->lock, flags);
698 root = &iommu->root_entry[bus];
699 context = get_context_addr_from_root(root);
704 ret = context_present(&context[devfn]);
706 spin_unlock_irqrestore(&iommu->lock, flags);
710 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
712 struct root_entry *root;
713 struct context_entry *context;
716 spin_lock_irqsave(&iommu->lock, flags);
717 root = &iommu->root_entry[bus];
718 context = get_context_addr_from_root(root);
720 context_clear_entry(&context[devfn]);
721 __iommu_flush_cache(iommu, &context[devfn], \
724 spin_unlock_irqrestore(&iommu->lock, flags);
727 static void free_context_table(struct intel_iommu *iommu)
729 struct root_entry *root;
732 struct context_entry *context;
734 spin_lock_irqsave(&iommu->lock, flags);
735 if (!iommu->root_entry) {
738 for (i = 0; i < ROOT_ENTRY_NR; i++) {
739 root = &iommu->root_entry[i];
740 context = get_context_addr_from_root(root);
742 free_pgtable_page(context);
744 free_pgtable_page(iommu->root_entry);
745 iommu->root_entry = NULL;
747 spin_unlock_irqrestore(&iommu->lock, flags);
750 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
751 unsigned long pfn, int target_level)
753 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
754 struct dma_pte *parent, *pte = NULL;
755 int level = agaw_to_level(domain->agaw);
758 BUG_ON(!domain->pgd);
760 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
761 /* Address beyond IOMMU's addressing capabilities. */
764 parent = domain->pgd;
769 offset = pfn_level_offset(pfn, level);
770 pte = &parent[offset];
771 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
773 if (level == target_level)
776 if (!dma_pte_present(pte)) {
779 tmp_page = alloc_pgtable_page(domain->nid);
784 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
785 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
786 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
787 /* Someone else set it while we were thinking; use theirs. */
788 free_pgtable_page(tmp_page);
791 domain_flush_cache(domain, pte, sizeof(*pte));
794 parent = phys_to_virt(dma_pte_addr(pte));
802 /* return address's pte at specific level */
803 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
805 int level, int *large_page)
807 struct dma_pte *parent, *pte = NULL;
808 int total = agaw_to_level(domain->agaw);
811 parent = domain->pgd;
812 while (level <= total) {
813 offset = pfn_level_offset(pfn, total);
814 pte = &parent[offset];
818 if (!dma_pte_present(pte)) {
823 if (pte->val & DMA_PTE_LARGE_PAGE) {
828 parent = phys_to_virt(dma_pte_addr(pte));
834 /* clear last level pte, a tlb flush should be followed */
835 static int dma_pte_clear_range(struct dmar_domain *domain,
836 unsigned long start_pfn,
837 unsigned long last_pfn)
839 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
840 unsigned int large_page = 1;
841 struct dma_pte *first_pte, *pte;
843 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
844 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
845 BUG_ON(start_pfn > last_pfn);
847 /* we don't need lock here; nobody else touches the iova range */
850 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
852 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
857 start_pfn += lvl_to_nr_pages(large_page);
859 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
861 domain_flush_cache(domain, first_pte,
862 (void *)pte - (void *)first_pte);
864 } while (start_pfn && start_pfn <= last_pfn);
866 return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
869 static void dma_pte_free_level(struct dmar_domain *domain, int level,
870 struct dma_pte *pte, unsigned long pfn,
871 unsigned long start_pfn, unsigned long last_pfn)
873 pfn = max(start_pfn, pfn);
874 pte = &pte[pfn_level_offset(pfn, level)];
877 unsigned long level_pfn;
878 struct dma_pte *level_pte;
880 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
883 level_pfn = pfn & level_mask(level - 1);
884 level_pte = phys_to_virt(dma_pte_addr(pte));
887 dma_pte_free_level(domain, level - 1, level_pte,
888 level_pfn, start_pfn, last_pfn);
890 /* If range covers entire pagetable, free it */
891 if (!(start_pfn > level_pfn ||
892 last_pfn < level_pfn + level_size(level))) {
894 domain_flush_cache(domain, pte, sizeof(*pte));
895 free_pgtable_page(level_pte);
898 pfn += level_size(level);
899 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
902 /* free page table pages. last level pte should already be cleared */
903 static void dma_pte_free_pagetable(struct dmar_domain *domain,
904 unsigned long start_pfn,
905 unsigned long last_pfn)
907 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
909 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
910 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
911 BUG_ON(start_pfn > last_pfn);
913 /* We don't need lock here; nobody else touches the iova range */
914 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
915 domain->pgd, 0, start_pfn, last_pfn);
918 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
919 free_pgtable_page(domain->pgd);
925 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
927 struct root_entry *root;
930 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
934 __iommu_flush_cache(iommu, root, ROOT_SIZE);
936 spin_lock_irqsave(&iommu->lock, flags);
937 iommu->root_entry = root;
938 spin_unlock_irqrestore(&iommu->lock, flags);
943 static void iommu_set_root_entry(struct intel_iommu *iommu)
949 addr = iommu->root_entry;
951 raw_spin_lock_irqsave(&iommu->register_lock, flag);
952 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
954 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
956 /* Make sure hardware complete it */
957 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
958 readl, (sts & DMA_GSTS_RTPS), sts);
960 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
963 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
968 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
971 raw_spin_lock_irqsave(&iommu->register_lock, flag);
972 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
976 readl, (!(val & DMA_GSTS_WBFS)), val);
978 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
981 /* return value determine if we need a write buffer flush */
982 static void __iommu_flush_context(struct intel_iommu *iommu,
983 u16 did, u16 source_id, u8 function_mask,
990 case DMA_CCMD_GLOBAL_INVL:
991 val = DMA_CCMD_GLOBAL_INVL;
993 case DMA_CCMD_DOMAIN_INVL:
994 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
996 case DMA_CCMD_DEVICE_INVL:
997 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
998 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1003 val |= DMA_CCMD_ICC;
1005 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1006 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1008 /* Make sure hardware complete it */
1009 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1010 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1012 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1015 /* return value determine if we need a write buffer flush */
1016 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1017 u64 addr, unsigned int size_order, u64 type)
1019 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1020 u64 val = 0, val_iva = 0;
1024 case DMA_TLB_GLOBAL_FLUSH:
1025 /* global flush doesn't need set IVA_REG */
1026 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1028 case DMA_TLB_DSI_FLUSH:
1029 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1031 case DMA_TLB_PSI_FLUSH:
1032 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1033 /* Note: always flush non-leaf currently */
1034 val_iva = size_order | addr;
1039 /* Note: set drain read/write */
1042 * This is probably to be super secure.. Looks like we can
1043 * ignore it without any impact.
1045 if (cap_read_drain(iommu->cap))
1046 val |= DMA_TLB_READ_DRAIN;
1048 if (cap_write_drain(iommu->cap))
1049 val |= DMA_TLB_WRITE_DRAIN;
1051 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1052 /* Note: Only uses first TLB reg currently */
1054 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1055 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1057 /* Make sure hardware complete it */
1058 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1059 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1061 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1063 /* check IOTLB invalidation granularity */
1064 if (DMA_TLB_IAIG(val) == 0)
1065 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1066 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1067 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1068 (unsigned long long)DMA_TLB_IIRG(type),
1069 (unsigned long long)DMA_TLB_IAIG(val));
1072 static struct device_domain_info *iommu_support_dev_iotlb(
1073 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1076 unsigned long flags;
1077 struct device_domain_info *info;
1078 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1080 if (!ecap_dev_iotlb_support(iommu->ecap))
1086 spin_lock_irqsave(&device_domain_lock, flags);
1087 list_for_each_entry(info, &domain->devices, link)
1088 if (info->bus == bus && info->devfn == devfn) {
1092 spin_unlock_irqrestore(&device_domain_lock, flags);
1094 if (!found || !info->dev)
1097 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1100 if (!dmar_find_matched_atsr_unit(info->dev))
1103 info->iommu = iommu;
1108 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1113 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1116 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1118 if (!info->dev || !pci_ats_enabled(info->dev))
1121 pci_disable_ats(info->dev);
1124 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1125 u64 addr, unsigned mask)
1128 unsigned long flags;
1129 struct device_domain_info *info;
1131 spin_lock_irqsave(&device_domain_lock, flags);
1132 list_for_each_entry(info, &domain->devices, link) {
1133 if (!info->dev || !pci_ats_enabled(info->dev))
1136 sid = info->bus << 8 | info->devfn;
1137 qdep = pci_ats_queue_depth(info->dev);
1138 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1140 spin_unlock_irqrestore(&device_domain_lock, flags);
1143 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1144 unsigned long pfn, unsigned int pages, int map)
1146 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1147 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1152 * Fallback to domain selective flush if no PSI support or the size is
1154 * PSI requires page size to be 2 ^ x, and the base address is naturally
1155 * aligned to the size
1157 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1158 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1161 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1165 * In caching mode, changes of pages from non-present to present require
1166 * flush. However, device IOTLB doesn't need to be flushed in this case.
1168 if (!cap_caching_mode(iommu->cap) || !map)
1169 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1172 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1175 unsigned long flags;
1177 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1178 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1179 pmen &= ~DMA_PMEN_EPM;
1180 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1182 /* wait for the protected region status bit to clear */
1183 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1184 readl, !(pmen & DMA_PMEN_PRS), pmen);
1186 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1189 static int iommu_enable_translation(struct intel_iommu *iommu)
1192 unsigned long flags;
1194 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1195 iommu->gcmd |= DMA_GCMD_TE;
1196 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1198 /* Make sure hardware complete it */
1199 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1200 readl, (sts & DMA_GSTS_TES), sts);
1202 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1206 static int iommu_disable_translation(struct intel_iommu *iommu)
1211 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1212 iommu->gcmd &= ~DMA_GCMD_TE;
1213 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1215 /* Make sure hardware complete it */
1216 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1217 readl, (!(sts & DMA_GSTS_TES)), sts);
1219 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1224 static int iommu_init_domains(struct intel_iommu *iommu)
1226 unsigned long ndomains;
1227 unsigned long nlongs;
1229 ndomains = cap_ndoms(iommu->cap);
1230 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1231 iommu->seq_id, ndomains);
1232 nlongs = BITS_TO_LONGS(ndomains);
1234 spin_lock_init(&iommu->lock);
1236 /* TBD: there might be 64K domains,
1237 * consider other allocation for future chip
1239 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1240 if (!iommu->domain_ids) {
1241 pr_err("IOMMU%d: allocating domain id array failed\n",
1245 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1247 if (!iommu->domains) {
1248 pr_err("IOMMU%d: allocating domain array failed\n",
1250 kfree(iommu->domain_ids);
1251 iommu->domain_ids = NULL;
1256 * if Caching mode is set, then invalid translations are tagged
1257 * with domainid 0. Hence we need to pre-allocate it.
1259 if (cap_caching_mode(iommu->cap))
1260 set_bit(0, iommu->domain_ids);
1265 static void domain_exit(struct dmar_domain *domain);
1266 static void vm_domain_exit(struct dmar_domain *domain);
1268 static void free_dmar_iommu(struct intel_iommu *iommu)
1270 struct dmar_domain *domain;
1272 unsigned long flags;
1274 if ((iommu->domains) && (iommu->domain_ids)) {
1275 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1276 domain = iommu->domains[i];
1277 clear_bit(i, iommu->domain_ids);
1279 spin_lock_irqsave(&domain->iommu_lock, flags);
1280 if (--domain->iommu_count == 0) {
1281 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1282 vm_domain_exit(domain);
1284 domain_exit(domain);
1286 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1290 if (iommu->gcmd & DMA_GCMD_TE)
1291 iommu_disable_translation(iommu);
1293 kfree(iommu->domains);
1294 kfree(iommu->domain_ids);
1295 iommu->domains = NULL;
1296 iommu->domain_ids = NULL;
1298 g_iommus[iommu->seq_id] = NULL;
1300 /* if all iommus are freed, free g_iommus */
1301 for (i = 0; i < g_num_of_iommus; i++) {
1306 if (i == g_num_of_iommus)
1309 /* free context mapping */
1310 free_context_table(iommu);
1313 static struct dmar_domain *alloc_domain(void)
1315 struct dmar_domain *domain;
1317 domain = alloc_domain_mem();
1322 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1328 static int iommu_attach_domain(struct dmar_domain *domain,
1329 struct intel_iommu *iommu)
1332 unsigned long ndomains;
1333 unsigned long flags;
1335 ndomains = cap_ndoms(iommu->cap);
1337 spin_lock_irqsave(&iommu->lock, flags);
1339 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1340 if (num >= ndomains) {
1341 spin_unlock_irqrestore(&iommu->lock, flags);
1342 printk(KERN_ERR "IOMMU: no free domain ids\n");
1347 set_bit(num, iommu->domain_ids);
1348 set_bit(iommu->seq_id, domain->iommu_bmp);
1349 iommu->domains[num] = domain;
1350 spin_unlock_irqrestore(&iommu->lock, flags);
1355 static void iommu_detach_domain(struct dmar_domain *domain,
1356 struct intel_iommu *iommu)
1358 unsigned long flags;
1362 spin_lock_irqsave(&iommu->lock, flags);
1363 ndomains = cap_ndoms(iommu->cap);
1364 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1365 if (iommu->domains[num] == domain) {
1372 clear_bit(num, iommu->domain_ids);
1373 clear_bit(iommu->seq_id, domain->iommu_bmp);
1374 iommu->domains[num] = NULL;
1376 spin_unlock_irqrestore(&iommu->lock, flags);
1379 static struct iova_domain reserved_iova_list;
1380 static struct lock_class_key reserved_rbtree_key;
1382 static int dmar_init_reserved_ranges(void)
1384 struct pci_dev *pdev = NULL;
1388 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1390 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1391 &reserved_rbtree_key);
1393 /* IOAPIC ranges shouldn't be accessed by DMA */
1394 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1395 IOVA_PFN(IOAPIC_RANGE_END));
1397 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1401 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1402 for_each_pci_dev(pdev) {
1405 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1406 r = &pdev->resource[i];
1407 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1409 iova = reserve_iova(&reserved_iova_list,
1413 printk(KERN_ERR "Reserve iova failed\n");
1421 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1423 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1426 static inline int guestwidth_to_adjustwidth(int gaw)
1429 int r = (gaw - 12) % 9;
1440 static int domain_init(struct dmar_domain *domain, int guest_width)
1442 struct intel_iommu *iommu;
1443 int adjust_width, agaw;
1444 unsigned long sagaw;
1446 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1447 spin_lock_init(&domain->iommu_lock);
1449 domain_reserve_special_ranges(domain);
1451 /* calculate AGAW */
1452 iommu = domain_get_iommu(domain);
1453 if (guest_width > cap_mgaw(iommu->cap))
1454 guest_width = cap_mgaw(iommu->cap);
1455 domain->gaw = guest_width;
1456 adjust_width = guestwidth_to_adjustwidth(guest_width);
1457 agaw = width_to_agaw(adjust_width);
1458 sagaw = cap_sagaw(iommu->cap);
1459 if (!test_bit(agaw, &sagaw)) {
1460 /* hardware doesn't support it, choose a bigger one */
1461 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1462 agaw = find_next_bit(&sagaw, 5, agaw);
1466 domain->agaw = agaw;
1467 INIT_LIST_HEAD(&domain->devices);
1469 if (ecap_coherent(iommu->ecap))
1470 domain->iommu_coherency = 1;
1472 domain->iommu_coherency = 0;
1474 if (ecap_sc_support(iommu->ecap))
1475 domain->iommu_snooping = 1;
1477 domain->iommu_snooping = 0;
1479 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1480 domain->iommu_count = 1;
1481 domain->nid = iommu->node;
1483 /* always allocate the top pgd */
1484 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1487 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1491 static void domain_exit(struct dmar_domain *domain)
1493 struct dmar_drhd_unit *drhd;
1494 struct intel_iommu *iommu;
1496 /* Domain 0 is reserved, so dont process it */
1500 /* Flush any lazy unmaps that may reference this domain */
1501 if (!intel_iommu_strict)
1502 flush_unmaps_timeout(0);
1504 domain_remove_dev_info(domain);
1506 put_iova_domain(&domain->iovad);
1509 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1511 /* free page tables */
1512 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1514 for_each_active_iommu(iommu, drhd)
1515 if (test_bit(iommu->seq_id, domain->iommu_bmp))
1516 iommu_detach_domain(domain, iommu);
1518 free_domain_mem(domain);
1521 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1522 u8 bus, u8 devfn, int translation)
1524 struct context_entry *context;
1525 unsigned long flags;
1526 struct intel_iommu *iommu;
1527 struct dma_pte *pgd;
1529 unsigned long ndomains;
1532 struct device_domain_info *info = NULL;
1534 pr_debug("Set context mapping for %02x:%02x.%d\n",
1535 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1537 BUG_ON(!domain->pgd);
1538 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1539 translation != CONTEXT_TT_MULTI_LEVEL);
1541 iommu = device_to_iommu(segment, bus, devfn);
1545 context = device_to_context_entry(iommu, bus, devfn);
1548 spin_lock_irqsave(&iommu->lock, flags);
1549 if (context_present(context)) {
1550 spin_unlock_irqrestore(&iommu->lock, flags);
1557 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1558 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1561 /* find an available domain id for this device in iommu */
1562 ndomains = cap_ndoms(iommu->cap);
1563 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1564 if (iommu->domains[num] == domain) {
1572 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1573 if (num >= ndomains) {
1574 spin_unlock_irqrestore(&iommu->lock, flags);
1575 printk(KERN_ERR "IOMMU: no free domain ids\n");
1579 set_bit(num, iommu->domain_ids);
1580 iommu->domains[num] = domain;
1584 /* Skip top levels of page tables for
1585 * iommu which has less agaw than default.
1586 * Unnecessary for PT mode.
1588 if (translation != CONTEXT_TT_PASS_THROUGH) {
1589 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1590 pgd = phys_to_virt(dma_pte_addr(pgd));
1591 if (!dma_pte_present(pgd)) {
1592 spin_unlock_irqrestore(&iommu->lock, flags);
1599 context_set_domain_id(context, id);
1601 if (translation != CONTEXT_TT_PASS_THROUGH) {
1602 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1603 translation = info ? CONTEXT_TT_DEV_IOTLB :
1604 CONTEXT_TT_MULTI_LEVEL;
1607 * In pass through mode, AW must be programmed to indicate the largest
1608 * AGAW value supported by hardware. And ASR is ignored by hardware.
1610 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1611 context_set_address_width(context, iommu->msagaw);
1613 context_set_address_root(context, virt_to_phys(pgd));
1614 context_set_address_width(context, iommu->agaw);
1617 context_set_translation_type(context, translation);
1618 context_set_fault_enable(context);
1619 context_set_present(context);
1620 domain_flush_cache(domain, context, sizeof(*context));
1623 * It's a non-present to present mapping. If hardware doesn't cache
1624 * non-present entry we only need to flush the write-buffer. If the
1625 * _does_ cache non-present entries, then it does so in the special
1626 * domain #0, which we have to flush:
1628 if (cap_caching_mode(iommu->cap)) {
1629 iommu->flush.flush_context(iommu, 0,
1630 (((u16)bus) << 8) | devfn,
1631 DMA_CCMD_MASK_NOBIT,
1632 DMA_CCMD_DEVICE_INVL);
1633 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1635 iommu_flush_write_buffer(iommu);
1637 iommu_enable_dev_iotlb(info);
1638 spin_unlock_irqrestore(&iommu->lock, flags);
1640 spin_lock_irqsave(&domain->iommu_lock, flags);
1641 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1642 domain->iommu_count++;
1643 if (domain->iommu_count == 1)
1644 domain->nid = iommu->node;
1645 domain_update_iommu_cap(domain);
1647 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1652 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1656 struct pci_dev *tmp, *parent;
1658 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1659 pdev->bus->number, pdev->devfn,
1664 /* dependent device mapping */
1665 tmp = pci_find_upstream_pcie_bridge(pdev);
1668 /* Secondary interface's bus number and devfn 0 */
1669 parent = pdev->bus->self;
1670 while (parent != tmp) {
1671 ret = domain_context_mapping_one(domain,
1672 pci_domain_nr(parent->bus),
1673 parent->bus->number,
1674 parent->devfn, translation);
1677 parent = parent->bus->self;
1679 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1680 return domain_context_mapping_one(domain,
1681 pci_domain_nr(tmp->subordinate),
1682 tmp->subordinate->number, 0,
1684 else /* this is a legacy PCI bridge */
1685 return domain_context_mapping_one(domain,
1686 pci_domain_nr(tmp->bus),
1692 static int domain_context_mapped(struct pci_dev *pdev)
1695 struct pci_dev *tmp, *parent;
1696 struct intel_iommu *iommu;
1698 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1703 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1706 /* dependent device mapping */
1707 tmp = pci_find_upstream_pcie_bridge(pdev);
1710 /* Secondary interface's bus number and devfn 0 */
1711 parent = pdev->bus->self;
1712 while (parent != tmp) {
1713 ret = device_context_mapped(iommu, parent->bus->number,
1717 parent = parent->bus->self;
1719 if (pci_is_pcie(tmp))
1720 return device_context_mapped(iommu, tmp->subordinate->number,
1723 return device_context_mapped(iommu, tmp->bus->number,
1727 /* Returns a number of VTD pages, but aligned to MM page size */
1728 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1731 host_addr &= ~PAGE_MASK;
1732 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1735 /* Return largest possible superpage level for a given mapping */
1736 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1737 unsigned long iov_pfn,
1738 unsigned long phy_pfn,
1739 unsigned long pages)
1741 int support, level = 1;
1742 unsigned long pfnmerge;
1744 support = domain->iommu_superpage;
1746 /* To use a large page, the virtual *and* physical addresses
1747 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1748 of them will mean we have to use smaller pages. So just
1749 merge them and check both at once. */
1750 pfnmerge = iov_pfn | phy_pfn;
1752 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1753 pages >>= VTD_STRIDE_SHIFT;
1756 pfnmerge >>= VTD_STRIDE_SHIFT;
1763 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1764 struct scatterlist *sg, unsigned long phys_pfn,
1765 unsigned long nr_pages, int prot)
1767 struct dma_pte *first_pte = NULL, *pte = NULL;
1768 phys_addr_t uninitialized_var(pteval);
1769 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1770 unsigned long sg_res;
1771 unsigned int largepage_lvl = 0;
1772 unsigned long lvl_pages = 0;
1774 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1776 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1779 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1784 sg_res = nr_pages + 1;
1785 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1788 while (nr_pages > 0) {
1792 sg_res = aligned_nrpages(sg->offset, sg->length);
1793 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1794 sg->dma_length = sg->length;
1795 pteval = page_to_phys(sg_page(sg)) | prot;
1796 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1800 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1802 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1805 /* It is large page*/
1806 if (largepage_lvl > 1) {
1807 pteval |= DMA_PTE_LARGE_PAGE;
1808 /* Ensure that old small page tables are removed to make room
1809 for superpage, if they exist. */
1810 dma_pte_clear_range(domain, iov_pfn,
1811 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1812 dma_pte_free_pagetable(domain, iov_pfn,
1813 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1815 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1819 /* We don't need lock here, nobody else
1820 * touches the iova range
1822 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1824 static int dumps = 5;
1825 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1826 iov_pfn, tmp, (unsigned long long)pteval);
1829 debug_dma_dump_mappings(NULL);
1834 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1836 BUG_ON(nr_pages < lvl_pages);
1837 BUG_ON(sg_res < lvl_pages);
1839 nr_pages -= lvl_pages;
1840 iov_pfn += lvl_pages;
1841 phys_pfn += lvl_pages;
1842 pteval += lvl_pages * VTD_PAGE_SIZE;
1843 sg_res -= lvl_pages;
1845 /* If the next PTE would be the first in a new page, then we
1846 need to flush the cache on the entries we've just written.
1847 And then we'll need to recalculate 'pte', so clear it and
1848 let it get set again in the if (!pte) block above.
1850 If we're done (!nr_pages) we need to flush the cache too.
1852 Also if we've been setting superpages, we may need to
1853 recalculate 'pte' and switch back to smaller pages for the
1854 end of the mapping, if the trailing size is not enough to
1855 use another superpage (i.e. sg_res < lvl_pages). */
1857 if (!nr_pages || first_pte_in_page(pte) ||
1858 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1859 domain_flush_cache(domain, first_pte,
1860 (void *)pte - (void *)first_pte);
1864 if (!sg_res && nr_pages)
1870 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1871 struct scatterlist *sg, unsigned long nr_pages,
1874 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1877 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1878 unsigned long phys_pfn, unsigned long nr_pages,
1881 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1884 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1889 clear_context_table(iommu, bus, devfn);
1890 iommu->flush.flush_context(iommu, 0, 0, 0,
1891 DMA_CCMD_GLOBAL_INVL);
1892 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1895 static inline void unlink_domain_info(struct device_domain_info *info)
1897 assert_spin_locked(&device_domain_lock);
1898 list_del(&info->link);
1899 list_del(&info->global);
1901 info->dev->dev.archdata.iommu = NULL;
1904 static void domain_remove_dev_info(struct dmar_domain *domain)
1906 struct device_domain_info *info;
1907 unsigned long flags;
1908 struct intel_iommu *iommu;
1910 spin_lock_irqsave(&device_domain_lock, flags);
1911 while (!list_empty(&domain->devices)) {
1912 info = list_entry(domain->devices.next,
1913 struct device_domain_info, link);
1914 unlink_domain_info(info);
1915 spin_unlock_irqrestore(&device_domain_lock, flags);
1917 iommu_disable_dev_iotlb(info);
1918 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1919 iommu_detach_dev(iommu, info->bus, info->devfn);
1920 free_devinfo_mem(info);
1922 spin_lock_irqsave(&device_domain_lock, flags);
1924 spin_unlock_irqrestore(&device_domain_lock, flags);
1929 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1931 static struct dmar_domain *
1932 find_domain(struct pci_dev *pdev)
1934 struct device_domain_info *info;
1936 /* No lock here, assumes no domain exit in normal case */
1937 info = pdev->dev.archdata.iommu;
1939 return info->domain;
1943 /* domain is initialized */
1944 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1946 struct dmar_domain *domain, *found = NULL;
1947 struct intel_iommu *iommu;
1948 struct dmar_drhd_unit *drhd;
1949 struct device_domain_info *info, *tmp;
1950 struct pci_dev *dev_tmp;
1951 unsigned long flags;
1952 int bus = 0, devfn = 0;
1956 domain = find_domain(pdev);
1960 segment = pci_domain_nr(pdev->bus);
1962 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1964 if (pci_is_pcie(dev_tmp)) {
1965 bus = dev_tmp->subordinate->number;
1968 bus = dev_tmp->bus->number;
1969 devfn = dev_tmp->devfn;
1971 spin_lock_irqsave(&device_domain_lock, flags);
1972 list_for_each_entry(info, &device_domain_list, global) {
1973 if (info->segment == segment &&
1974 info->bus == bus && info->devfn == devfn) {
1975 found = info->domain;
1979 spin_unlock_irqrestore(&device_domain_lock, flags);
1980 /* pcie-pci bridge already has a domain, uses it */
1987 domain = alloc_domain();
1991 /* Allocate new domain for the device */
1992 drhd = dmar_find_matched_drhd_unit(pdev);
1994 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1996 free_domain_mem(domain);
1999 iommu = drhd->iommu;
2001 ret = iommu_attach_domain(domain, iommu);
2003 free_domain_mem(domain);
2007 if (domain_init(domain, gaw)) {
2008 domain_exit(domain);
2012 /* register pcie-to-pci device */
2014 info = alloc_devinfo_mem();
2016 domain_exit(domain);
2019 info->segment = segment;
2021 info->devfn = devfn;
2023 info->domain = domain;
2024 /* This domain is shared by devices under p2p bridge */
2025 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2027 /* pcie-to-pci bridge already has a domain, uses it */
2029 spin_lock_irqsave(&device_domain_lock, flags);
2030 list_for_each_entry(tmp, &device_domain_list, global) {
2031 if (tmp->segment == segment &&
2032 tmp->bus == bus && tmp->devfn == devfn) {
2033 found = tmp->domain;
2038 spin_unlock_irqrestore(&device_domain_lock, flags);
2039 free_devinfo_mem(info);
2040 domain_exit(domain);
2043 list_add(&info->link, &domain->devices);
2044 list_add(&info->global, &device_domain_list);
2045 spin_unlock_irqrestore(&device_domain_lock, flags);
2050 info = alloc_devinfo_mem();
2053 info->segment = segment;
2054 info->bus = pdev->bus->number;
2055 info->devfn = pdev->devfn;
2057 info->domain = domain;
2058 spin_lock_irqsave(&device_domain_lock, flags);
2059 /* somebody is fast */
2060 found = find_domain(pdev);
2061 if (found != NULL) {
2062 spin_unlock_irqrestore(&device_domain_lock, flags);
2063 if (found != domain) {
2064 domain_exit(domain);
2067 free_devinfo_mem(info);
2070 list_add(&info->link, &domain->devices);
2071 list_add(&info->global, &device_domain_list);
2072 pdev->dev.archdata.iommu = info;
2073 spin_unlock_irqrestore(&device_domain_lock, flags);
2076 /* recheck it here, maybe others set it */
2077 return find_domain(pdev);
2080 static int iommu_identity_mapping;
2081 #define IDENTMAP_ALL 1
2082 #define IDENTMAP_GFX 2
2083 #define IDENTMAP_AZALIA 4
2085 static int iommu_domain_identity_map(struct dmar_domain *domain,
2086 unsigned long long start,
2087 unsigned long long end)
2089 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2090 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2092 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2093 dma_to_mm_pfn(last_vpfn))) {
2094 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2098 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2099 start, end, domain->id);
2101 * RMRR range might have overlap with physical memory range,
2104 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2106 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2107 last_vpfn - first_vpfn + 1,
2108 DMA_PTE_READ|DMA_PTE_WRITE);
2111 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2112 unsigned long long start,
2113 unsigned long long end)
2115 struct dmar_domain *domain;
2118 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2122 /* For _hardware_ passthrough, don't bother. But for software
2123 passthrough, we do it anyway -- it may indicate a memory
2124 range which is reserved in E820, so which didn't get set
2125 up to start with in si_domain */
2126 if (domain == si_domain && hw_pass_through) {
2127 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2128 pci_name(pdev), start, end);
2133 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2134 pci_name(pdev), start, end);
2137 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2138 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2139 dmi_get_system_info(DMI_BIOS_VENDOR),
2140 dmi_get_system_info(DMI_BIOS_VERSION),
2141 dmi_get_system_info(DMI_PRODUCT_VERSION));
2146 if (end >> agaw_to_width(domain->agaw)) {
2147 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2148 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2149 agaw_to_width(domain->agaw),
2150 dmi_get_system_info(DMI_BIOS_VENDOR),
2151 dmi_get_system_info(DMI_BIOS_VERSION),
2152 dmi_get_system_info(DMI_PRODUCT_VERSION));
2157 ret = iommu_domain_identity_map(domain, start, end);
2161 /* context entry init */
2162 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2169 domain_exit(domain);
2173 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2174 struct pci_dev *pdev)
2176 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2178 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2182 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2183 static inline void iommu_prepare_isa(void)
2185 struct pci_dev *pdev;
2188 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2192 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2193 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2196 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2197 "floppy might not work\n");
2201 static inline void iommu_prepare_isa(void)
2205 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2207 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2209 static int __init si_domain_init(int hw)
2211 struct dmar_drhd_unit *drhd;
2212 struct intel_iommu *iommu;
2215 si_domain = alloc_domain();
2219 for_each_active_iommu(iommu, drhd) {
2220 ret = iommu_attach_domain(si_domain, iommu);
2222 domain_exit(si_domain);
2227 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2228 domain_exit(si_domain);
2232 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2233 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2239 for_each_online_node(nid) {
2240 unsigned long start_pfn, end_pfn;
2243 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2244 ret = iommu_domain_identity_map(si_domain,
2245 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2254 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2255 struct pci_dev *pdev);
2256 static int identity_mapping(struct pci_dev *pdev)
2258 struct device_domain_info *info;
2260 if (likely(!iommu_identity_mapping))
2263 info = pdev->dev.archdata.iommu;
2264 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2265 return (info->domain == si_domain);
2270 static int domain_add_dev_info(struct dmar_domain *domain,
2271 struct pci_dev *pdev,
2274 struct device_domain_info *info;
2275 unsigned long flags;
2278 info = alloc_devinfo_mem();
2282 info->segment = pci_domain_nr(pdev->bus);
2283 info->bus = pdev->bus->number;
2284 info->devfn = pdev->devfn;
2286 info->domain = domain;
2288 spin_lock_irqsave(&device_domain_lock, flags);
2289 list_add(&info->link, &domain->devices);
2290 list_add(&info->global, &device_domain_list);
2291 pdev->dev.archdata.iommu = info;
2292 spin_unlock_irqrestore(&device_domain_lock, flags);
2294 ret = domain_context_mapping(domain, pdev, translation);
2296 spin_lock_irqsave(&device_domain_lock, flags);
2297 unlink_domain_info(info);
2298 spin_unlock_irqrestore(&device_domain_lock, flags);
2299 free_devinfo_mem(info);
2306 static bool device_has_rmrr(struct pci_dev *dev)
2308 struct dmar_rmrr_unit *rmrr;
2311 for_each_rmrr_units(rmrr) {
2312 for (i = 0; i < rmrr->devices_cnt; i++) {
2314 * Return TRUE if this RMRR contains the device that
2317 if (rmrr->devices[i] == dev)
2324 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2328 * We want to prevent any device associated with an RMRR from
2329 * getting placed into the SI Domain. This is done because
2330 * problems exist when devices are moved in and out of domains
2331 * and their respective RMRR info is lost. We exempt USB devices
2332 * from this process due to their usage of RMRRs that are known
2333 * to not be needed after BIOS hand-off to OS.
2335 if (device_has_rmrr(pdev) &&
2336 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2339 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2342 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2345 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2349 * We want to start off with all devices in the 1:1 domain, and
2350 * take them out later if we find they can't access all of memory.
2352 * However, we can't do this for PCI devices behind bridges,
2353 * because all PCI devices behind the same bridge will end up
2354 * with the same source-id on their transactions.
2356 * Practically speaking, we can't change things around for these
2357 * devices at run-time, because we can't be sure there'll be no
2358 * DMA transactions in flight for any of their siblings.
2360 * So PCI devices (unless they're on the root bus) as well as
2361 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2362 * the 1:1 domain, just in _case_ one of their siblings turns out
2363 * not to be able to map all of memory.
2365 if (!pci_is_pcie(pdev)) {
2366 if (!pci_is_root_bus(pdev->bus))
2368 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2370 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2374 * At boot time, we don't yet know if devices will be 64-bit capable.
2375 * Assume that they will -- if they turn out not to be, then we can
2376 * take them out of the 1:1 domain later.
2380 * If the device's dma_mask is less than the system's memory
2381 * size then this is not a candidate for identity mapping.
2383 u64 dma_mask = pdev->dma_mask;
2385 if (pdev->dev.coherent_dma_mask &&
2386 pdev->dev.coherent_dma_mask < dma_mask)
2387 dma_mask = pdev->dev.coherent_dma_mask;
2389 return dma_mask >= dma_get_required_mask(&pdev->dev);
2395 static int __init iommu_prepare_static_identity_mapping(int hw)
2397 struct pci_dev *pdev = NULL;
2400 ret = si_domain_init(hw);
2404 for_each_pci_dev(pdev) {
2405 if (iommu_should_identity_map(pdev, 1)) {
2406 ret = domain_add_dev_info(si_domain, pdev,
2407 hw ? CONTEXT_TT_PASS_THROUGH :
2408 CONTEXT_TT_MULTI_LEVEL);
2410 /* device not associated with an iommu */
2415 pr_info("IOMMU: %s identity mapping for device %s\n",
2416 hw ? "hardware" : "software", pci_name(pdev));
2423 static int __init init_dmars(void)
2425 struct dmar_drhd_unit *drhd;
2426 struct dmar_rmrr_unit *rmrr;
2427 struct pci_dev *pdev;
2428 struct intel_iommu *iommu;
2434 * initialize and program root entry to not present
2437 for_each_drhd_unit(drhd) {
2439 * lock not needed as this is only incremented in the single
2440 * threaded kernel __init code path all other access are read
2443 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2447 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2448 IOMMU_UNITS_SUPPORTED);
2451 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2454 printk(KERN_ERR "Allocating global iommu array failed\n");
2459 deferred_flush = kzalloc(g_num_of_iommus *
2460 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2461 if (!deferred_flush) {
2466 for_each_active_iommu(iommu, drhd) {
2467 g_iommus[iommu->seq_id] = iommu;
2469 ret = iommu_init_domains(iommu);
2475 * we could share the same root & context tables
2476 * among all IOMMU's. Need to Split it later.
2478 ret = iommu_alloc_root_entry(iommu);
2480 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2483 if (!ecap_pass_through(iommu->ecap))
2484 hw_pass_through = 0;
2488 * Start from the sane iommu hardware state.
2490 for_each_active_iommu(iommu, drhd) {
2492 * If the queued invalidation is already initialized by us
2493 * (for example, while enabling interrupt-remapping) then
2494 * we got the things already rolling from a sane state.
2500 * Clear any previous faults.
2502 dmar_fault(-1, iommu);
2504 * Disable queued invalidation if supported and already enabled
2505 * before OS handover.
2507 dmar_disable_qi(iommu);
2510 for_each_active_iommu(iommu, drhd) {
2511 if (dmar_enable_qi(iommu)) {
2513 * Queued Invalidate not enabled, use Register Based
2516 iommu->flush.flush_context = __iommu_flush_context;
2517 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2518 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2521 (unsigned long long)drhd->reg_base_addr);
2523 iommu->flush.flush_context = qi_flush_context;
2524 iommu->flush.flush_iotlb = qi_flush_iotlb;
2525 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2528 (unsigned long long)drhd->reg_base_addr);
2532 if (iommu_pass_through)
2533 iommu_identity_mapping |= IDENTMAP_ALL;
2535 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2536 iommu_identity_mapping |= IDENTMAP_GFX;
2539 check_tylersburg_isoch();
2542 * If pass through is not set or not enabled, setup context entries for
2543 * identity mappings for rmrr, gfx, and isa and may fall back to static
2544 * identity mapping if iommu_identity_mapping is set.
2546 if (iommu_identity_mapping) {
2547 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2549 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2555 * for each dev attached to rmrr
2557 * locate drhd for dev, alloc domain for dev
2558 * allocate free domain
2559 * allocate page table entries for rmrr
2560 * if context not allocated for bus
2561 * allocate and init context
2562 * set present in root table for this bus
2563 * init context with domain, translation etc
2567 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2568 for_each_rmrr_units(rmrr) {
2569 for (i = 0; i < rmrr->devices_cnt; i++) {
2570 pdev = rmrr->devices[i];
2572 * some BIOS lists non-exist devices in DMAR
2577 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2580 "IOMMU: mapping reserved region failed\n");
2584 iommu_prepare_isa();
2589 * global invalidate context cache
2590 * global invalidate iotlb
2591 * enable translation
2593 for_each_iommu(iommu, drhd) {
2594 if (drhd->ignored) {
2596 * we always have to disable PMRs or DMA may fail on
2600 iommu_disable_protect_mem_regions(iommu);
2604 iommu_flush_write_buffer(iommu);
2606 ret = dmar_set_interrupt(iommu);
2610 iommu_set_root_entry(iommu);
2612 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2613 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2615 ret = iommu_enable_translation(iommu);
2619 iommu_disable_protect_mem_regions(iommu);
2624 for_each_active_iommu(iommu, drhd)
2625 free_dmar_iommu(iommu);
2630 /* This takes a number of _MM_ pages, not VTD pages */
2631 static struct iova *intel_alloc_iova(struct device *dev,
2632 struct dmar_domain *domain,
2633 unsigned long nrpages, uint64_t dma_mask)
2635 struct pci_dev *pdev = to_pci_dev(dev);
2636 struct iova *iova = NULL;
2638 /* Restrict dma_mask to the width that the iommu can handle */
2639 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2641 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2643 * First try to allocate an io virtual address in
2644 * DMA_BIT_MASK(32) and if that fails then try allocating
2647 iova = alloc_iova(&domain->iovad, nrpages,
2648 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2652 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2653 if (unlikely(!iova)) {
2654 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2655 nrpages, pci_name(pdev));
2662 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2664 struct dmar_domain *domain;
2667 domain = get_domain_for_dev(pdev,
2668 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2671 "Allocating domain for %s failed", pci_name(pdev));
2675 /* make sure context mapping is ok */
2676 if (unlikely(!domain_context_mapped(pdev))) {
2677 ret = domain_context_mapping(domain, pdev,
2678 CONTEXT_TT_MULTI_LEVEL);
2681 "Domain context map for %s failed",
2690 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2692 struct device_domain_info *info;
2694 /* No lock here, assumes no domain exit in normal case */
2695 info = dev->dev.archdata.iommu;
2697 return info->domain;
2699 return __get_valid_domain_for_dev(dev);
2702 static int iommu_dummy(struct pci_dev *pdev)
2704 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2707 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2708 static int iommu_no_mapping(struct device *dev)
2710 struct pci_dev *pdev;
2713 if (unlikely(!dev_is_pci(dev)))
2716 pdev = to_pci_dev(dev);
2717 if (iommu_dummy(pdev))
2720 if (!iommu_identity_mapping)
2723 found = identity_mapping(pdev);
2725 if (iommu_should_identity_map(pdev, 0))
2729 * 32 bit DMA is removed from si_domain and fall back
2730 * to non-identity mapping.
2732 domain_remove_one_dev_info(si_domain, pdev);
2733 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2739 * In case of a detached 64 bit DMA device from vm, the device
2740 * is put into si_domain for identity mapping.
2742 if (iommu_should_identity_map(pdev, 0)) {
2744 ret = domain_add_dev_info(si_domain, pdev,
2746 CONTEXT_TT_PASS_THROUGH :
2747 CONTEXT_TT_MULTI_LEVEL);
2749 printk(KERN_INFO "64bit %s uses identity mapping\n",
2759 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2760 size_t size, int dir, u64 dma_mask)
2762 struct pci_dev *pdev = to_pci_dev(hwdev);
2763 struct dmar_domain *domain;
2764 phys_addr_t start_paddr;
2768 struct intel_iommu *iommu;
2769 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2771 BUG_ON(dir == DMA_NONE);
2773 if (iommu_no_mapping(hwdev))
2776 domain = get_valid_domain_for_dev(pdev);
2780 iommu = domain_get_iommu(domain);
2781 size = aligned_nrpages(paddr, size);
2783 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2788 * Check if DMAR supports zero-length reads on write only
2791 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2792 !cap_zlr(iommu->cap))
2793 prot |= DMA_PTE_READ;
2794 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2795 prot |= DMA_PTE_WRITE;
2797 * paddr - (paddr + size) might be partial page, we should map the whole
2798 * page. Note: if two part of one page are separately mapped, we
2799 * might have two guest_addr mapping to the same host paddr, but this
2800 * is not a big problem
2802 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2803 mm_to_dma_pfn(paddr_pfn), size, prot);
2807 /* it's a non-present to present mapping. Only flush if caching mode */
2808 if (cap_caching_mode(iommu->cap))
2809 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2811 iommu_flush_write_buffer(iommu);
2813 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2814 start_paddr += paddr & ~PAGE_MASK;
2819 __free_iova(&domain->iovad, iova);
2820 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2821 pci_name(pdev), size, (unsigned long long)paddr, dir);
2825 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2826 unsigned long offset, size_t size,
2827 enum dma_data_direction dir,
2828 struct dma_attrs *attrs)
2830 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2831 dir, to_pci_dev(dev)->dma_mask);
2834 static void flush_unmaps(void)
2840 /* just flush them all */
2841 for (i = 0; i < g_num_of_iommus; i++) {
2842 struct intel_iommu *iommu = g_iommus[i];
2846 if (!deferred_flush[i].next)
2849 /* In caching mode, global flushes turn emulation expensive */
2850 if (!cap_caching_mode(iommu->cap))
2851 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2852 DMA_TLB_GLOBAL_FLUSH);
2853 for (j = 0; j < deferred_flush[i].next; j++) {
2855 struct iova *iova = deferred_flush[i].iova[j];
2856 struct dmar_domain *domain = deferred_flush[i].domain[j];
2858 /* On real hardware multiple invalidations are expensive */
2859 if (cap_caching_mode(iommu->cap))
2860 iommu_flush_iotlb_psi(iommu, domain->id,
2861 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2863 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2864 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2865 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2867 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2869 deferred_flush[i].next = 0;
2875 static void flush_unmaps_timeout(unsigned long data)
2877 unsigned long flags;
2879 spin_lock_irqsave(&async_umap_flush_lock, flags);
2881 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2884 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2886 unsigned long flags;
2888 struct intel_iommu *iommu;
2890 spin_lock_irqsave(&async_umap_flush_lock, flags);
2891 if (list_size == HIGH_WATER_MARK)
2894 iommu = domain_get_iommu(dom);
2895 iommu_id = iommu->seq_id;
2897 next = deferred_flush[iommu_id].next;
2898 deferred_flush[iommu_id].domain[next] = dom;
2899 deferred_flush[iommu_id].iova[next] = iova;
2900 deferred_flush[iommu_id].next++;
2903 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2907 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2910 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2911 size_t size, enum dma_data_direction dir,
2912 struct dma_attrs *attrs)
2914 struct pci_dev *pdev = to_pci_dev(dev);
2915 struct dmar_domain *domain;
2916 unsigned long start_pfn, last_pfn;
2918 struct intel_iommu *iommu;
2920 if (iommu_no_mapping(dev))
2923 domain = find_domain(pdev);
2926 iommu = domain_get_iommu(domain);
2928 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2929 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2930 (unsigned long long)dev_addr))
2933 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2934 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2936 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2937 pci_name(pdev), start_pfn, last_pfn);
2939 /* clear the whole page */
2940 dma_pte_clear_range(domain, start_pfn, last_pfn);
2942 /* free page tables */
2943 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2945 if (intel_iommu_strict) {
2946 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2947 last_pfn - start_pfn + 1, 0);
2949 __free_iova(&domain->iovad, iova);
2951 add_unmap(domain, iova);
2953 * queue up the release of the unmap to save the 1/6th of the
2954 * cpu used up by the iotlb flush operation...
2959 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2960 dma_addr_t *dma_handle, gfp_t flags,
2961 struct dma_attrs *attrs)
2966 size = PAGE_ALIGN(size);
2967 order = get_order(size);
2969 if (!iommu_no_mapping(hwdev))
2970 flags &= ~(GFP_DMA | GFP_DMA32);
2971 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2972 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2978 vaddr = (void *)__get_free_pages(flags, order);
2981 memset(vaddr, 0, size);
2983 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2985 hwdev->coherent_dma_mask);
2988 free_pages((unsigned long)vaddr, order);
2992 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2993 dma_addr_t dma_handle, struct dma_attrs *attrs)
2997 size = PAGE_ALIGN(size);
2998 order = get_order(size);
3000 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3001 free_pages((unsigned long)vaddr, order);
3004 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3005 int nelems, enum dma_data_direction dir,
3006 struct dma_attrs *attrs)
3008 struct pci_dev *pdev = to_pci_dev(hwdev);
3009 struct dmar_domain *domain;
3010 unsigned long start_pfn, last_pfn;
3012 struct intel_iommu *iommu;
3014 if (iommu_no_mapping(hwdev))
3017 domain = find_domain(pdev);
3020 iommu = domain_get_iommu(domain);
3022 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3023 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3024 (unsigned long long)sglist[0].dma_address))
3027 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3028 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3030 /* clear the whole page */
3031 dma_pte_clear_range(domain, start_pfn, last_pfn);
3033 /* free page tables */
3034 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3036 if (intel_iommu_strict) {
3037 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3038 last_pfn - start_pfn + 1, 0);
3040 __free_iova(&domain->iovad, iova);
3042 add_unmap(domain, iova);
3044 * queue up the release of the unmap to save the 1/6th of the
3045 * cpu used up by the iotlb flush operation...
3050 static int intel_nontranslate_map_sg(struct device *hddev,
3051 struct scatterlist *sglist, int nelems, int dir)
3054 struct scatterlist *sg;
3056 for_each_sg(sglist, sg, nelems, i) {
3057 BUG_ON(!sg_page(sg));
3058 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3059 sg->dma_length = sg->length;
3064 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3065 enum dma_data_direction dir, struct dma_attrs *attrs)
3068 struct pci_dev *pdev = to_pci_dev(hwdev);
3069 struct dmar_domain *domain;
3072 struct iova *iova = NULL;
3074 struct scatterlist *sg;
3075 unsigned long start_vpfn;
3076 struct intel_iommu *iommu;
3078 BUG_ON(dir == DMA_NONE);
3079 if (iommu_no_mapping(hwdev))
3080 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3082 domain = get_valid_domain_for_dev(pdev);
3086 iommu = domain_get_iommu(domain);
3088 for_each_sg(sglist, sg, nelems, i)
3089 size += aligned_nrpages(sg->offset, sg->length);
3091 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3094 sglist->dma_length = 0;
3099 * Check if DMAR supports zero-length reads on write only
3102 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3103 !cap_zlr(iommu->cap))
3104 prot |= DMA_PTE_READ;
3105 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3106 prot |= DMA_PTE_WRITE;
3108 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3110 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3111 if (unlikely(ret)) {
3112 /* clear the page */
3113 dma_pte_clear_range(domain, start_vpfn,
3114 start_vpfn + size - 1);
3115 /* free page tables */
3116 dma_pte_free_pagetable(domain, start_vpfn,
3117 start_vpfn + size - 1);
3119 __free_iova(&domain->iovad, iova);
3123 /* it's a non-present to present mapping. Only flush if caching mode */
3124 if (cap_caching_mode(iommu->cap))
3125 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3127 iommu_flush_write_buffer(iommu);
3132 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3137 struct dma_map_ops intel_dma_ops = {
3138 .alloc = intel_alloc_coherent,
3139 .free = intel_free_coherent,
3140 .map_sg = intel_map_sg,
3141 .unmap_sg = intel_unmap_sg,
3142 .map_page = intel_map_page,
3143 .unmap_page = intel_unmap_page,
3144 .mapping_error = intel_mapping_error,
3147 static inline int iommu_domain_cache_init(void)
3151 iommu_domain_cache = kmem_cache_create("iommu_domain",
3152 sizeof(struct dmar_domain),
3157 if (!iommu_domain_cache) {
3158 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3165 static inline int iommu_devinfo_cache_init(void)
3169 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3170 sizeof(struct device_domain_info),
3174 if (!iommu_devinfo_cache) {
3175 printk(KERN_ERR "Couldn't create devinfo cache\n");
3182 static inline int iommu_iova_cache_init(void)
3186 iommu_iova_cache = kmem_cache_create("iommu_iova",
3187 sizeof(struct iova),
3191 if (!iommu_iova_cache) {
3192 printk(KERN_ERR "Couldn't create iova cache\n");
3199 static int __init iommu_init_mempool(void)
3202 ret = iommu_iova_cache_init();
3206 ret = iommu_domain_cache_init();
3210 ret = iommu_devinfo_cache_init();
3214 kmem_cache_destroy(iommu_domain_cache);
3216 kmem_cache_destroy(iommu_iova_cache);
3221 static void __init iommu_exit_mempool(void)
3223 kmem_cache_destroy(iommu_devinfo_cache);
3224 kmem_cache_destroy(iommu_domain_cache);
3225 kmem_cache_destroy(iommu_iova_cache);
3229 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3231 struct dmar_drhd_unit *drhd;
3235 /* We know that this device on this chipset has its own IOMMU.
3236 * If we find it under a different IOMMU, then the BIOS is lying
3237 * to us. Hope that the IOMMU for this device is actually
3238 * disabled, and it needs no translation...
3240 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3242 /* "can't" happen */
3243 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3246 vtbar &= 0xffff0000;
3248 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3249 drhd = dmar_find_matched_drhd_unit(pdev);
3250 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3251 TAINT_FIRMWARE_WORKAROUND,
3252 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3253 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3255 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3257 static void __init init_no_remapping_devices(void)
3259 struct dmar_drhd_unit *drhd;
3261 for_each_drhd_unit(drhd) {
3262 if (!drhd->include_all) {
3264 for (i = 0; i < drhd->devices_cnt; i++)
3265 if (drhd->devices[i] != NULL)
3267 /* ignore DMAR unit if no pci devices exist */
3268 if (i == drhd->devices_cnt)
3273 for_each_active_drhd_unit(drhd) {
3275 if (drhd->include_all)
3278 for (i = 0; i < drhd->devices_cnt; i++)
3279 if (drhd->devices[i] &&
3280 !IS_GFX_DEVICE(drhd->devices[i]))
3283 if (i < drhd->devices_cnt)
3286 /* This IOMMU has *only* gfx devices. Either bypass it or
3287 set the gfx_mapped flag, as appropriate */
3289 intel_iommu_gfx_mapped = 1;
3292 for (i = 0; i < drhd->devices_cnt; i++) {
3293 if (!drhd->devices[i])
3295 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3301 #ifdef CONFIG_SUSPEND
3302 static int init_iommu_hw(void)
3304 struct dmar_drhd_unit *drhd;
3305 struct intel_iommu *iommu = NULL;
3307 for_each_active_iommu(iommu, drhd)
3309 dmar_reenable_qi(iommu);
3311 for_each_iommu(iommu, drhd) {
3312 if (drhd->ignored) {
3314 * we always have to disable PMRs or DMA may fail on
3318 iommu_disable_protect_mem_regions(iommu);
3322 iommu_flush_write_buffer(iommu);
3324 iommu_set_root_entry(iommu);
3326 iommu->flush.flush_context(iommu, 0, 0, 0,
3327 DMA_CCMD_GLOBAL_INVL);
3328 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3329 DMA_TLB_GLOBAL_FLUSH);
3330 if (iommu_enable_translation(iommu))
3332 iommu_disable_protect_mem_regions(iommu);
3338 static void iommu_flush_all(void)
3340 struct dmar_drhd_unit *drhd;
3341 struct intel_iommu *iommu;
3343 for_each_active_iommu(iommu, drhd) {
3344 iommu->flush.flush_context(iommu, 0, 0, 0,
3345 DMA_CCMD_GLOBAL_INVL);
3346 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3347 DMA_TLB_GLOBAL_FLUSH);
3351 static int iommu_suspend(void)
3353 struct dmar_drhd_unit *drhd;
3354 struct intel_iommu *iommu = NULL;
3357 for_each_active_iommu(iommu, drhd) {
3358 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3360 if (!iommu->iommu_state)
3366 for_each_active_iommu(iommu, drhd) {
3367 iommu_disable_translation(iommu);
3369 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3371 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3372 readl(iommu->reg + DMAR_FECTL_REG);
3373 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3374 readl(iommu->reg + DMAR_FEDATA_REG);
3375 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3376 readl(iommu->reg + DMAR_FEADDR_REG);
3377 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3378 readl(iommu->reg + DMAR_FEUADDR_REG);
3380 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3385 for_each_active_iommu(iommu, drhd)
3386 kfree(iommu->iommu_state);
3391 static void iommu_resume(void)
3393 struct dmar_drhd_unit *drhd;
3394 struct intel_iommu *iommu = NULL;
3397 if (init_iommu_hw()) {
3399 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3401 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3405 for_each_active_iommu(iommu, drhd) {
3407 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3409 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3410 iommu->reg + DMAR_FECTL_REG);
3411 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3412 iommu->reg + DMAR_FEDATA_REG);
3413 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3414 iommu->reg + DMAR_FEADDR_REG);
3415 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3416 iommu->reg + DMAR_FEUADDR_REG);
3418 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3421 for_each_active_iommu(iommu, drhd)
3422 kfree(iommu->iommu_state);
3425 static struct syscore_ops iommu_syscore_ops = {
3426 .resume = iommu_resume,
3427 .suspend = iommu_suspend,
3430 static void __init init_iommu_pm_ops(void)
3432 register_syscore_ops(&iommu_syscore_ops);
3436 static inline void init_iommu_pm_ops(void) {}
3437 #endif /* CONFIG_PM */
3439 LIST_HEAD(dmar_rmrr_units);
3441 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3443 list_add(&rmrr->list, &dmar_rmrr_units);
3447 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3449 struct acpi_dmar_reserved_memory *rmrr;
3450 struct dmar_rmrr_unit *rmrru;
3452 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3456 rmrru->hdr = header;
3457 rmrr = (struct acpi_dmar_reserved_memory *)header;
3458 rmrru->base_address = rmrr->base_address;
3459 rmrru->end_address = rmrr->end_address;
3461 dmar_register_rmrr_unit(rmrru);
3466 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3468 struct acpi_dmar_reserved_memory *rmrr;
3471 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3472 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3473 ((void *)rmrr) + rmrr->header.length,
3474 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3476 if (ret || (rmrru->devices_cnt == 0)) {
3477 list_del(&rmrru->list);
3483 static LIST_HEAD(dmar_atsr_units);
3485 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3487 struct acpi_dmar_atsr *atsr;
3488 struct dmar_atsr_unit *atsru;
3490 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3491 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3496 atsru->include_all = atsr->flags & 0x1;
3498 list_add(&atsru->list, &dmar_atsr_units);
3503 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3506 struct acpi_dmar_atsr *atsr;
3508 if (atsru->include_all)
3511 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3512 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3513 (void *)atsr + atsr->header.length,
3514 &atsru->devices_cnt, &atsru->devices,
3516 if (rc || !atsru->devices_cnt) {
3517 list_del(&atsru->list);
3524 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3527 struct pci_bus *bus;
3528 struct acpi_dmar_atsr *atsr;
3529 struct dmar_atsr_unit *atsru;
3531 dev = pci_physfn(dev);
3533 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3534 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3535 if (atsr->segment == pci_domain_nr(dev->bus))
3542 for (bus = dev->bus; bus; bus = bus->parent) {
3543 struct pci_dev *bridge = bus->self;
3545 if (!bridge || !pci_is_pcie(bridge) ||
3546 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3549 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
3550 for (i = 0; i < atsru->devices_cnt; i++)
3551 if (atsru->devices[i] == bridge)
3557 if (atsru->include_all)
3563 int __init dmar_parse_rmrr_atsr_dev(void)
3565 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3566 struct dmar_atsr_unit *atsr, *atsr_n;
3569 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3570 ret = rmrr_parse_dev(rmrr);
3575 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3576 ret = atsr_parse_dev(atsr);
3585 * Here we only respond to action of unbound device from driver.
3587 * Added device is not attached to its DMAR domain here yet. That will happen
3588 * when mapping the device to iova.
3590 static int device_notifier(struct notifier_block *nb,
3591 unsigned long action, void *data)
3593 struct device *dev = data;
3594 struct pci_dev *pdev = to_pci_dev(dev);
3595 struct dmar_domain *domain;
3597 if (iommu_no_mapping(dev))
3600 domain = find_domain(pdev);
3604 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3605 domain_remove_one_dev_info(domain, pdev);
3607 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3608 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3609 list_empty(&domain->devices))
3610 domain_exit(domain);
3616 static struct notifier_block device_nb = {
3617 .notifier_call = device_notifier,
3620 int __init intel_iommu_init(void)
3623 struct dmar_drhd_unit *drhd;
3624 struct intel_iommu *iommu;
3626 /* VT-d is required for a TXT/tboot launch, so enforce that */
3627 force_on = tboot_force_iommu();
3629 if (dmar_table_init()) {
3631 panic("tboot: Failed to initialize DMAR table\n");
3636 * Disable translation if already enabled prior to OS handover.
3638 for_each_active_iommu(iommu, drhd)
3639 if (iommu->gcmd & DMA_GCMD_TE)
3640 iommu_disable_translation(iommu);
3642 if (dmar_dev_scope_init() < 0) {
3644 panic("tboot: Failed to initialize DMAR device scope\n");
3648 if (no_iommu || dmar_disabled)
3651 if (iommu_init_mempool()) {
3653 panic("tboot: Failed to initialize iommu memory\n");
3657 if (list_empty(&dmar_rmrr_units))
3658 printk(KERN_INFO "DMAR: No RMRR found\n");
3660 if (list_empty(&dmar_atsr_units))
3661 printk(KERN_INFO "DMAR: No ATSR found\n");
3663 if (dmar_init_reserved_ranges()) {
3665 panic("tboot: Failed to reserve iommu ranges\n");
3669 init_no_remapping_devices();
3674 panic("tboot: Failed to initialize DMARs\n");
3675 printk(KERN_ERR "IOMMU: dmar init failed\n");
3676 put_iova_domain(&reserved_iova_list);
3677 iommu_exit_mempool();
3681 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3683 init_timer(&unmap_timer);
3684 #ifdef CONFIG_SWIOTLB
3687 dma_ops = &intel_dma_ops;
3689 init_iommu_pm_ops();
3691 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3693 bus_register_notifier(&pci_bus_type, &device_nb);
3695 intel_iommu_enabled = 1;
3700 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3701 struct pci_dev *pdev)
3703 struct pci_dev *tmp, *parent;
3705 if (!iommu || !pdev)
3708 /* dependent device detach */
3709 tmp = pci_find_upstream_pcie_bridge(pdev);
3710 /* Secondary interface's bus number and devfn 0 */
3712 parent = pdev->bus->self;
3713 while (parent != tmp) {
3714 iommu_detach_dev(iommu, parent->bus->number,
3716 parent = parent->bus->self;
3718 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3719 iommu_detach_dev(iommu,
3720 tmp->subordinate->number, 0);
3721 else /* this is a legacy PCI bridge */
3722 iommu_detach_dev(iommu, tmp->bus->number,
3727 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3728 struct pci_dev *pdev)
3730 struct device_domain_info *info, *tmp;
3731 struct intel_iommu *iommu;
3732 unsigned long flags;
3735 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3740 spin_lock_irqsave(&device_domain_lock, flags);
3741 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
3742 if (info->segment == pci_domain_nr(pdev->bus) &&
3743 info->bus == pdev->bus->number &&
3744 info->devfn == pdev->devfn) {
3745 unlink_domain_info(info);
3746 spin_unlock_irqrestore(&device_domain_lock, flags);
3748 iommu_disable_dev_iotlb(info);
3749 iommu_detach_dev(iommu, info->bus, info->devfn);
3750 iommu_detach_dependent_devices(iommu, pdev);
3751 free_devinfo_mem(info);
3753 spin_lock_irqsave(&device_domain_lock, flags);
3761 /* if there is no other devices under the same iommu
3762 * owned by this domain, clear this iommu in iommu_bmp
3763 * update iommu count and coherency
3765 if (iommu == device_to_iommu(info->segment, info->bus,
3770 spin_unlock_irqrestore(&device_domain_lock, flags);
3773 unsigned long tmp_flags;
3774 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3775 clear_bit(iommu->seq_id, domain->iommu_bmp);
3776 domain->iommu_count--;
3777 domain_update_iommu_cap(domain);
3778 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3780 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3781 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3782 spin_lock_irqsave(&iommu->lock, tmp_flags);
3783 clear_bit(domain->id, iommu->domain_ids);
3784 iommu->domains[domain->id] = NULL;
3785 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3790 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3792 struct device_domain_info *info;
3793 struct intel_iommu *iommu;
3794 unsigned long flags1, flags2;
3796 spin_lock_irqsave(&device_domain_lock, flags1);
3797 while (!list_empty(&domain->devices)) {
3798 info = list_entry(domain->devices.next,
3799 struct device_domain_info, link);
3800 unlink_domain_info(info);
3801 spin_unlock_irqrestore(&device_domain_lock, flags1);
3803 iommu_disable_dev_iotlb(info);
3804 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3805 iommu_detach_dev(iommu, info->bus, info->devfn);
3806 iommu_detach_dependent_devices(iommu, info->dev);
3808 /* clear this iommu in iommu_bmp, update iommu count
3811 spin_lock_irqsave(&domain->iommu_lock, flags2);
3812 if (test_and_clear_bit(iommu->seq_id,
3813 domain->iommu_bmp)) {
3814 domain->iommu_count--;
3815 domain_update_iommu_cap(domain);
3817 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3819 free_devinfo_mem(info);
3820 spin_lock_irqsave(&device_domain_lock, flags1);
3822 spin_unlock_irqrestore(&device_domain_lock, flags1);
3825 /* domain id for virtual machine, it won't be set in context */
3826 static atomic_t vm_domid = ATOMIC_INIT(0);
3828 static struct dmar_domain *iommu_alloc_vm_domain(void)
3830 struct dmar_domain *domain;
3832 domain = alloc_domain_mem();
3836 domain->id = atomic_inc_return(&vm_domid);
3838 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
3839 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3844 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3848 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3849 spin_lock_init(&domain->iommu_lock);
3851 domain_reserve_special_ranges(domain);
3853 /* calculate AGAW */
3854 domain->gaw = guest_width;
3855 adjust_width = guestwidth_to_adjustwidth(guest_width);
3856 domain->agaw = width_to_agaw(adjust_width);
3858 INIT_LIST_HEAD(&domain->devices);
3860 domain->iommu_count = 0;
3861 domain->iommu_coherency = 0;
3862 domain->iommu_snooping = 0;
3863 domain->iommu_superpage = 0;
3864 domain->max_addr = 0;
3867 /* always allocate the top pgd */
3868 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3871 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3875 static void iommu_free_vm_domain(struct dmar_domain *domain)
3877 unsigned long flags;
3878 struct dmar_drhd_unit *drhd;
3879 struct intel_iommu *iommu;
3881 unsigned long ndomains;
3883 for_each_active_iommu(iommu, drhd) {
3884 ndomains = cap_ndoms(iommu->cap);
3885 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3886 if (iommu->domains[i] == domain) {
3887 spin_lock_irqsave(&iommu->lock, flags);
3888 clear_bit(i, iommu->domain_ids);
3889 iommu->domains[i] = NULL;
3890 spin_unlock_irqrestore(&iommu->lock, flags);
3897 static void vm_domain_exit(struct dmar_domain *domain)
3899 /* Domain 0 is reserved, so dont process it */
3903 vm_domain_remove_all_dev_info(domain);
3905 put_iova_domain(&domain->iovad);
3908 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3910 /* free page tables */
3911 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3913 iommu_free_vm_domain(domain);
3914 free_domain_mem(domain);
3917 static int intel_iommu_domain_init(struct iommu_domain *domain)
3919 struct dmar_domain *dmar_domain;
3921 dmar_domain = iommu_alloc_vm_domain();
3924 "intel_iommu_domain_init: dmar_domain == NULL\n");
3927 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3929 "intel_iommu_domain_init() failed\n");
3930 vm_domain_exit(dmar_domain);
3933 domain_update_iommu_cap(dmar_domain);
3934 domain->priv = dmar_domain;
3936 domain->geometry.aperture_start = 0;
3937 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3938 domain->geometry.force_aperture = true;
3943 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3945 struct dmar_domain *dmar_domain = domain->priv;
3947 domain->priv = NULL;
3948 vm_domain_exit(dmar_domain);
3951 static int intel_iommu_attach_device(struct iommu_domain *domain,
3954 struct dmar_domain *dmar_domain = domain->priv;
3955 struct pci_dev *pdev = to_pci_dev(dev);
3956 struct intel_iommu *iommu;
3959 /* normally pdev is not mapped */
3960 if (unlikely(domain_context_mapped(pdev))) {
3961 struct dmar_domain *old_domain;
3963 old_domain = find_domain(pdev);
3965 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3966 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3967 domain_remove_one_dev_info(old_domain, pdev);
3969 domain_remove_dev_info(old_domain);
3973 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3978 /* check if this iommu agaw is sufficient for max mapped address */
3979 addr_width = agaw_to_width(iommu->agaw);
3980 if (addr_width > cap_mgaw(iommu->cap))
3981 addr_width = cap_mgaw(iommu->cap);
3983 if (dmar_domain->max_addr > (1LL << addr_width)) {
3984 printk(KERN_ERR "%s: iommu width (%d) is not "
3985 "sufficient for the mapped address (%llx)\n",
3986 __func__, addr_width, dmar_domain->max_addr);
3989 dmar_domain->gaw = addr_width;
3992 * Knock out extra levels of page tables if necessary
3994 while (iommu->agaw < dmar_domain->agaw) {
3995 struct dma_pte *pte;
3997 pte = dmar_domain->pgd;
3998 if (dma_pte_present(pte)) {
3999 dmar_domain->pgd = (struct dma_pte *)
4000 phys_to_virt(dma_pte_addr(pte));
4001 free_pgtable_page(pte);
4003 dmar_domain->agaw--;
4006 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4009 static void intel_iommu_detach_device(struct iommu_domain *domain,
4012 struct dmar_domain *dmar_domain = domain->priv;
4013 struct pci_dev *pdev = to_pci_dev(dev);
4015 domain_remove_one_dev_info(dmar_domain, pdev);
4018 static int intel_iommu_map(struct iommu_domain *domain,
4019 unsigned long iova, phys_addr_t hpa,
4020 size_t size, int iommu_prot)
4022 struct dmar_domain *dmar_domain = domain->priv;
4027 if (iommu_prot & IOMMU_READ)
4028 prot |= DMA_PTE_READ;
4029 if (iommu_prot & IOMMU_WRITE)
4030 prot |= DMA_PTE_WRITE;
4031 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4032 prot |= DMA_PTE_SNP;
4034 max_addr = iova + size;
4035 if (dmar_domain->max_addr < max_addr) {
4038 /* check if minimum agaw is sufficient for mapped address */
4039 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4040 if (end < max_addr) {
4041 printk(KERN_ERR "%s: iommu width (%d) is not "
4042 "sufficient for the mapped address (%llx)\n",
4043 __func__, dmar_domain->gaw, max_addr);
4046 dmar_domain->max_addr = max_addr;
4048 /* Round up size to next multiple of PAGE_SIZE, if it and
4049 the low bits of hpa would take us onto the next page */
4050 size = aligned_nrpages(hpa, size);
4051 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4052 hpa >> VTD_PAGE_SHIFT, size, prot);
4056 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4057 unsigned long iova, size_t size)
4059 struct dmar_domain *dmar_domain = domain->priv;
4062 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4063 (iova + size - 1) >> VTD_PAGE_SHIFT);
4065 if (dmar_domain->max_addr == iova + size)
4066 dmar_domain->max_addr = iova;
4068 return PAGE_SIZE << order;
4071 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4074 struct dmar_domain *dmar_domain = domain->priv;
4075 struct dma_pte *pte;
4078 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4080 phys = dma_pte_addr(pte);
4085 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4088 struct dmar_domain *dmar_domain = domain->priv;
4090 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4091 return dmar_domain->iommu_snooping;
4092 if (cap == IOMMU_CAP_INTR_REMAP)
4093 return irq_remapping_enabled;
4098 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4100 static int intel_iommu_add_device(struct device *dev)
4102 struct pci_dev *pdev = to_pci_dev(dev);
4103 struct pci_dev *bridge, *dma_pdev = NULL;
4104 struct iommu_group *group;
4107 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4108 pdev->bus->number, pdev->devfn))
4111 bridge = pci_find_upstream_pcie_bridge(pdev);
4113 if (pci_is_pcie(bridge))
4114 dma_pdev = pci_get_domain_bus_and_slot(
4115 pci_domain_nr(pdev->bus),
4116 bridge->subordinate->number, 0);
4118 dma_pdev = pci_dev_get(bridge);
4120 dma_pdev = pci_dev_get(pdev);
4122 /* Account for quirked devices */
4123 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4126 * If it's a multifunction device that does not support our
4127 * required ACS flags, add to the same group as lowest numbered
4128 * function that also does not suport the required ACS flags.
4130 if (dma_pdev->multifunction &&
4131 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4132 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4134 for (i = 0; i < 8; i++) {
4135 struct pci_dev *tmp;
4137 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4141 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4142 swap_pci_ref(&dma_pdev, tmp);
4150 * Devices on the root bus go through the iommu. If that's not us,
4151 * find the next upstream device and test ACS up to the root bus.
4152 * Finding the next device may require skipping virtual buses.
4154 while (!pci_is_root_bus(dma_pdev->bus)) {
4155 struct pci_bus *bus = dma_pdev->bus;
4157 while (!bus->self) {
4158 if (!pci_is_root_bus(bus))
4164 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4167 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4171 group = iommu_group_get(&dma_pdev->dev);
4172 pci_dev_put(dma_pdev);
4174 group = iommu_group_alloc();
4176 return PTR_ERR(group);
4179 ret = iommu_group_add_device(group, dev);
4181 iommu_group_put(group);
4185 static void intel_iommu_remove_device(struct device *dev)
4187 iommu_group_remove_device(dev);
4190 static struct iommu_ops intel_iommu_ops = {
4191 .domain_init = intel_iommu_domain_init,
4192 .domain_destroy = intel_iommu_domain_destroy,
4193 .attach_dev = intel_iommu_attach_device,
4194 .detach_dev = intel_iommu_detach_device,
4195 .map = intel_iommu_map,
4196 .unmap = intel_iommu_unmap,
4197 .iova_to_phys = intel_iommu_iova_to_phys,
4198 .domain_has_cap = intel_iommu_domain_has_cap,
4199 .add_device = intel_iommu_add_device,
4200 .remove_device = intel_iommu_remove_device,
4201 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4204 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4206 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4207 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4211 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4212 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4213 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4214 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4215 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4216 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4217 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4219 static void quirk_iommu_rwbf(struct pci_dev *dev)
4222 * Mobile 4 Series Chipset neglects to set RWBF capability,
4223 * but needs it. Same seems to hold for the desktop versions.
4225 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4229 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4230 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4231 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4232 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4233 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4234 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4235 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4238 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4239 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4240 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4241 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4242 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4243 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4244 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4245 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4247 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4251 if (pci_read_config_word(dev, GGC, &ggc))
4254 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4255 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4257 } else if (dmar_map_gfx) {
4258 /* we have to ensure the gfx device is idle before we flush */
4259 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4260 intel_iommu_strict = 1;
4263 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4265 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4268 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4269 ISOCH DMAR unit for the Azalia sound device, but not give it any
4270 TLB entries, which causes it to deadlock. Check for that. We do
4271 this in a function called from init_dmars(), instead of in a PCI
4272 quirk, because we don't want to print the obnoxious "BIOS broken"
4273 message if VT-d is actually disabled.
4275 static void __init check_tylersburg_isoch(void)
4277 struct pci_dev *pdev;
4278 uint32_t vtisochctrl;
4280 /* If there's no Azalia in the system anyway, forget it. */
4281 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4286 /* System Management Registers. Might be hidden, in which case
4287 we can't do the sanity check. But that's OK, because the
4288 known-broken BIOSes _don't_ actually hide it, so far. */
4289 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4293 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4300 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4301 if (vtisochctrl & 1)
4304 /* Drop all bits other than the number of TLB entries */
4305 vtisochctrl &= 0x1c;
4307 /* If we have the recommended number of TLB entries (16), fine. */
4308 if (vtisochctrl == 0x10)
4311 /* Zero TLB entries? You get to ride the short bus to school. */
4313 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4314 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4315 dmi_get_system_info(DMI_BIOS_VENDOR),
4316 dmi_get_system_info(DMI_BIOS_VERSION),
4317 dmi_get_system_info(DMI_PRODUCT_VERSION));
4318 iommu_identity_mapping |= IDENTMAP_AZALIA;
4322 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",