2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw)
115 static inline int agaw_to_width(int agaw)
117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 static inline int width_to_agaw(int width)
122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 static inline unsigned int level_to_offset_bits(int level)
127 return (level - 1) * LEVEL_STRIDE;
130 static inline int pfn_level_offset(unsigned long pfn, int level)
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 static inline unsigned long level_mask(int level)
137 return -1UL << level_to_offset_bits(level);
140 static inline unsigned long level_size(int level)
142 return 1UL << level_to_offset_bits(level);
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
147 return (pfn + level_size(level) - 1) & level_mask(level);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
168 return mm_to_dma_pfn(page_to_pfn(pg));
170 static inline unsigned long virt_to_dma_pfn(void *p)
172 return page_to_dma_pfn(virt_to_page(p));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on = 0;
186 int intel_iommu_tboot_noforce;
191 * 12-63: Context Ptr (12 - (haw-1))
198 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
201 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
204 static phys_addr_t root_entry_lctp(struct root_entry *re)
209 return re->lo & VTD_PAGE_MASK;
213 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
216 static phys_addr_t root_entry_uctp(struct root_entry *re)
221 return re->hi & VTD_PAGE_MASK;
226 * 1: fault processing disable
227 * 2-3: translation type
228 * 12-63: address space root
234 struct context_entry {
239 static inline void context_clear_pasid_enable(struct context_entry *context)
241 context->lo &= ~(1ULL << 11);
244 static inline bool context_pasid_enabled(struct context_entry *context)
246 return !!(context->lo & (1ULL << 11));
249 static inline void context_set_copied(struct context_entry *context)
251 context->hi |= (1ull << 3);
254 static inline bool context_copied(struct context_entry *context)
256 return !!(context->hi & (1ULL << 3));
259 static inline bool __context_present(struct context_entry *context)
261 return (context->lo & 1);
264 static inline bool context_present(struct context_entry *context)
266 return context_pasid_enabled(context) ?
267 __context_present(context) :
268 __context_present(context) && !context_copied(context);
271 static inline void context_set_present(struct context_entry *context)
276 static inline void context_set_fault_enable(struct context_entry *context)
278 context->lo &= (((u64)-1) << 2) | 1;
281 static inline void context_set_translation_type(struct context_entry *context,
284 context->lo &= (((u64)-1) << 4) | 3;
285 context->lo |= (value & 3) << 2;
288 static inline void context_set_address_root(struct context_entry *context,
291 context->lo &= ~VTD_PAGE_MASK;
292 context->lo |= value & VTD_PAGE_MASK;
295 static inline void context_set_address_width(struct context_entry *context,
298 context->hi |= value & 7;
301 static inline void context_set_domain_id(struct context_entry *context,
304 context->hi |= (value & ((1 << 16) - 1)) << 8;
307 static inline int context_domain_id(struct context_entry *c)
309 return((c->hi >> 8) & 0xffff);
312 static inline void context_clear_entry(struct context_entry *context)
325 * 12-63: Host physcial address
331 static inline void dma_clear_pte(struct dma_pte *pte)
336 static inline u64 dma_pte_addr(struct dma_pte *pte)
339 return pte->val & VTD_PAGE_MASK;
341 /* Must have a full atomic 64-bit read */
342 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
346 static inline bool dma_pte_present(struct dma_pte *pte)
348 return (pte->val & 3) != 0;
351 static inline bool dma_pte_superpage(struct dma_pte *pte)
353 return (pte->val & DMA_PTE_LARGE_PAGE);
356 static inline int first_pte_in_page(struct dma_pte *pte)
358 return !((unsigned long)pte & ~VTD_PAGE_MASK);
362 * This domain is a statically identity mapping domain.
363 * 1. This domain creats a static 1:1 mapping to all usable memory.
364 * 2. It maps to each iommu if successful.
365 * 3. Each iommu mapps to this domain if successful.
367 static struct dmar_domain *si_domain;
368 static int hw_pass_through = 1;
371 * Domain represents a virtual machine, more than one devices
372 * across iommus may be owned in one domain, e.g. kvm guest.
374 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
376 /* si_domain contains mulitple devices */
377 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
379 #define for_each_domain_iommu(idx, domain) \
380 for (idx = 0; idx < g_num_of_iommus; idx++) \
381 if (domain->iommu_refcnt[idx])
384 int nid; /* node id */
386 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
387 /* Refcount of devices per iommu */
390 u16 iommu_did[DMAR_UNITS_SUPPORTED];
391 /* Domain ids per IOMMU. Use u16 since
392 * domain ids are 16 bit wide according
393 * to VT-d spec, section 9.3 */
395 bool has_iotlb_device;
396 struct list_head devices; /* all devices' list */
397 struct iova_domain iovad; /* iova's that belong to this domain */
399 struct dma_pte *pgd; /* virtual address */
400 int gaw; /* max guest address width */
402 /* adjusted guest address width, 0 is level 2 30-bit */
405 int flags; /* flags to find out type of domain */
407 int iommu_coherency;/* indicate coherency of iommu access */
408 int iommu_snooping; /* indicate snooping control feature*/
409 int iommu_count; /* reference count of iommu */
410 int iommu_superpage;/* Level of superpages supported:
411 0 == 4KiB (no superpages), 1 == 2MiB,
412 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
413 u64 max_addr; /* maximum mapped address */
415 struct iommu_domain domain; /* generic domain data structure for
419 /* PCI domain-device relationship */
420 struct device_domain_info {
421 struct list_head link; /* link to domain siblings */
422 struct list_head global; /* link to global list */
423 u8 bus; /* PCI bus number */
424 u8 devfn; /* PCI devfn number */
425 u8 pasid_supported:3;
432 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
433 struct intel_iommu *iommu; /* IOMMU used by this device */
434 struct dmar_domain *domain; /* pointer to domain */
437 struct dmar_rmrr_unit {
438 struct list_head list; /* list of rmrr units */
439 struct acpi_dmar_header *hdr; /* ACPI header */
440 u64 base_address; /* reserved base address*/
441 u64 end_address; /* reserved end address */
442 struct dmar_dev_scope *devices; /* target devices */
443 int devices_cnt; /* target device count */
444 struct iommu_resv_region *resv; /* reserved region handle */
447 struct dmar_atsr_unit {
448 struct list_head list; /* list of ATSR units */
449 struct acpi_dmar_header *hdr; /* ACPI header */
450 struct dmar_dev_scope *devices; /* target devices */
451 int devices_cnt; /* target device count */
452 u8 include_all:1; /* include all ports */
455 static LIST_HEAD(dmar_atsr_units);
456 static LIST_HEAD(dmar_rmrr_units);
458 #define for_each_rmrr_units(rmrr) \
459 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
461 static void flush_unmaps_timeout(unsigned long data);
463 struct deferred_flush_entry {
464 unsigned long iova_pfn;
465 unsigned long nrpages;
466 struct dmar_domain *domain;
467 struct page *freelist;
470 #define HIGH_WATER_MARK 250
471 struct deferred_flush_table {
473 struct deferred_flush_entry entries[HIGH_WATER_MARK];
476 struct deferred_flush_data {
479 struct timer_list timer;
481 struct deferred_flush_table *tables;
484 static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
486 /* bitmap for indexing intel_iommus */
487 static int g_num_of_iommus;
489 static void domain_exit(struct dmar_domain *domain);
490 static void domain_remove_dev_info(struct dmar_domain *domain);
491 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
493 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
494 static void domain_context_clear(struct intel_iommu *iommu,
496 static int domain_detach_iommu(struct dmar_domain *domain,
497 struct intel_iommu *iommu);
499 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
500 int dmar_disabled = 0;
502 int dmar_disabled = 1;
503 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
505 int intel_iommu_enabled = 0;
506 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
508 static int dmar_map_gfx = 1;
509 static int dmar_forcedac;
510 static int intel_iommu_strict;
511 static int intel_iommu_superpage = 1;
512 static int intel_iommu_ecs = 1;
513 static int intel_iommu_pasid28;
514 static int iommu_identity_mapping;
516 #define IDENTMAP_ALL 1
517 #define IDENTMAP_GFX 2
518 #define IDENTMAP_AZALIA 4
520 /* Broadwell and Skylake have broken ECS support — normal so-called "second
521 * level" translation of DMA requests-without-PASID doesn't actually happen
522 * unless you also set the NESTE bit in an extended context-entry. Which of
523 * course means that SVM doesn't work because it's trying to do nested
524 * translation of the physical addresses it finds in the process page tables,
525 * through the IOVA->phys mapping found in the "second level" page tables.
527 * The VT-d specification was retroactively changed to change the definition
528 * of the capability bits and pretend that Broadwell/Skylake never happened...
529 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
530 * for some reason it was the PASID capability bit which was redefined (from
531 * bit 28 on BDW/SKL to bit 40 in future).
533 * So our test for ECS needs to eschew those implementations which set the old
534 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
535 * Unless we are working around the 'pasid28' limitations, that is, by putting
536 * the device into passthrough mode for normal DMA and thus masking the bug.
538 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
539 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
540 /* PASID support is thus enabled if ECS is enabled and *either* of the old
541 * or new capability bits are set. */
542 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
543 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
545 int intel_iommu_gfx_mapped;
546 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
548 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
549 static DEFINE_SPINLOCK(device_domain_lock);
550 static LIST_HEAD(device_domain_list);
552 const struct iommu_ops intel_iommu_ops;
554 static bool translation_pre_enabled(struct intel_iommu *iommu)
556 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
559 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
561 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
564 static void init_translation_status(struct intel_iommu *iommu)
568 gsts = readl(iommu->reg + DMAR_GSTS_REG);
569 if (gsts & DMA_GSTS_TES)
570 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
573 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
574 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
576 return container_of(dom, struct dmar_domain, domain);
579 static int __init intel_iommu_setup(char *str)
584 if (!strncmp(str, "on", 2)) {
586 pr_info("IOMMU enabled\n");
587 } else if (!strncmp(str, "off", 3)) {
589 pr_info("IOMMU disabled\n");
590 } else if (!strncmp(str, "igfx_off", 8)) {
592 pr_info("Disable GFX device mapping\n");
593 } else if (!strncmp(str, "forcedac", 8)) {
594 pr_info("Forcing DAC for PCI devices\n");
596 } else if (!strncmp(str, "strict", 6)) {
597 pr_info("Disable batched IOTLB flush\n");
598 intel_iommu_strict = 1;
599 } else if (!strncmp(str, "sp_off", 6)) {
600 pr_info("Disable supported super page\n");
601 intel_iommu_superpage = 0;
602 } else if (!strncmp(str, "ecs_off", 7)) {
604 "Intel-IOMMU: disable extended context table support\n");
606 } else if (!strncmp(str, "pasid28", 7)) {
608 "Intel-IOMMU: enable pre-production PASID support\n");
609 intel_iommu_pasid28 = 1;
610 iommu_identity_mapping |= IDENTMAP_GFX;
611 } else if (!strncmp(str, "tboot_noforce", 13)) {
613 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
614 intel_iommu_tboot_noforce = 1;
617 str += strcspn(str, ",");
623 __setup("intel_iommu=", intel_iommu_setup);
625 static struct kmem_cache *iommu_domain_cache;
626 static struct kmem_cache *iommu_devinfo_cache;
628 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
630 struct dmar_domain **domains;
633 domains = iommu->domains[idx];
637 return domains[did & 0xff];
640 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
641 struct dmar_domain *domain)
643 struct dmar_domain **domains;
646 if (!iommu->domains[idx]) {
647 size_t size = 256 * sizeof(struct dmar_domain *);
648 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
651 domains = iommu->domains[idx];
652 if (WARN_ON(!domains))
655 domains[did & 0xff] = domain;
658 static inline void *alloc_pgtable_page(int node)
663 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
665 vaddr = page_address(page);
669 static inline void free_pgtable_page(void *vaddr)
671 free_page((unsigned long)vaddr);
674 static inline void *alloc_domain_mem(void)
676 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
679 static void free_domain_mem(void *vaddr)
681 kmem_cache_free(iommu_domain_cache, vaddr);
684 static inline void * alloc_devinfo_mem(void)
686 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
689 static inline void free_devinfo_mem(void *vaddr)
691 kmem_cache_free(iommu_devinfo_cache, vaddr);
694 static inline int domain_type_is_vm(struct dmar_domain *domain)
696 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
699 static inline int domain_type_is_si(struct dmar_domain *domain)
701 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
704 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
706 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
707 DOMAIN_FLAG_STATIC_IDENTITY);
710 static inline int domain_pfn_supported(struct dmar_domain *domain,
713 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
715 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
718 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
723 sagaw = cap_sagaw(iommu->cap);
724 for (agaw = width_to_agaw(max_gaw);
726 if (test_bit(agaw, &sagaw))
734 * Calculate max SAGAW for each iommu.
736 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
738 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
742 * calculate agaw for each iommu.
743 * "SAGAW" may be different across iommus, use a default agaw, and
744 * get a supported less agaw for iommus that don't support the default agaw.
746 int iommu_calculate_agaw(struct intel_iommu *iommu)
748 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
751 /* This functionin only returns single iommu in a domain */
752 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
756 /* si_domain and vm domain should not get here. */
757 BUG_ON(domain_type_is_vm_or_si(domain));
758 for_each_domain_iommu(iommu_id, domain)
761 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
764 return g_iommus[iommu_id];
767 static void domain_update_iommu_coherency(struct dmar_domain *domain)
769 struct dmar_drhd_unit *drhd;
770 struct intel_iommu *iommu;
774 domain->iommu_coherency = 1;
776 for_each_domain_iommu(i, domain) {
778 if (!ecap_coherent(g_iommus[i]->ecap)) {
779 domain->iommu_coherency = 0;
786 /* No hardware attached; use lowest common denominator */
788 for_each_active_iommu(iommu, drhd) {
789 if (!ecap_coherent(iommu->ecap)) {
790 domain->iommu_coherency = 0;
797 static int domain_update_iommu_snooping(struct intel_iommu *skip)
799 struct dmar_drhd_unit *drhd;
800 struct intel_iommu *iommu;
804 for_each_active_iommu(iommu, drhd) {
806 if (!ecap_sc_support(iommu->ecap)) {
817 static int domain_update_iommu_superpage(struct intel_iommu *skip)
819 struct dmar_drhd_unit *drhd;
820 struct intel_iommu *iommu;
823 if (!intel_iommu_superpage) {
827 /* set iommu_superpage to the smallest common denominator */
829 for_each_active_iommu(iommu, drhd) {
831 mask &= cap_super_page_val(iommu->cap);
841 /* Some capabilities may be different across iommus */
842 static void domain_update_iommu_cap(struct dmar_domain *domain)
844 domain_update_iommu_coherency(domain);
845 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
846 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
849 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
850 u8 bus, u8 devfn, int alloc)
852 struct root_entry *root = &iommu->root_entry[bus];
853 struct context_entry *context;
857 if (ecs_enabled(iommu)) {
865 context = phys_to_virt(*entry & VTD_PAGE_MASK);
867 unsigned long phy_addr;
871 context = alloc_pgtable_page(iommu->node);
875 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
876 phy_addr = virt_to_phys((void *)context);
877 *entry = phy_addr | 1;
878 __iommu_flush_cache(iommu, entry, sizeof(*entry));
880 return &context[devfn];
883 static int iommu_dummy(struct device *dev)
885 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
888 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
890 struct dmar_drhd_unit *drhd = NULL;
891 struct intel_iommu *iommu;
893 struct pci_dev *ptmp, *pdev = NULL;
897 if (iommu_dummy(dev))
900 if (dev_is_pci(dev)) {
901 struct pci_dev *pf_pdev;
903 pdev = to_pci_dev(dev);
904 /* VFs aren't listed in scope tables; we need to look up
905 * the PF instead to find the IOMMU. */
906 pf_pdev = pci_physfn(pdev);
908 segment = pci_domain_nr(pdev->bus);
909 } else if (has_acpi_companion(dev))
910 dev = &ACPI_COMPANION(dev)->dev;
913 for_each_active_iommu(iommu, drhd) {
914 if (pdev && segment != drhd->segment)
917 for_each_active_dev_scope(drhd->devices,
918 drhd->devices_cnt, i, tmp) {
920 /* For a VF use its original BDF# not that of the PF
921 * which we used for the IOMMU lookup. Strictly speaking
922 * we could do this for all PCI devices; we only need to
923 * get the BDF# from the scope table for ACPI matches. */
924 if (pdev && pdev->is_virtfn)
927 *bus = drhd->devices[i].bus;
928 *devfn = drhd->devices[i].devfn;
932 if (!pdev || !dev_is_pci(tmp))
935 ptmp = to_pci_dev(tmp);
936 if (ptmp->subordinate &&
937 ptmp->subordinate->number <= pdev->bus->number &&
938 ptmp->subordinate->busn_res.end >= pdev->bus->number)
942 if (pdev && drhd->include_all) {
944 *bus = pdev->bus->number;
945 *devfn = pdev->devfn;
956 static void domain_flush_cache(struct dmar_domain *domain,
957 void *addr, int size)
959 if (!domain->iommu_coherency)
960 clflush_cache_range(addr, size);
963 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
965 struct context_entry *context;
969 spin_lock_irqsave(&iommu->lock, flags);
970 context = iommu_context_addr(iommu, bus, devfn, 0);
972 ret = context_present(context);
973 spin_unlock_irqrestore(&iommu->lock, flags);
977 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
979 struct context_entry *context;
982 spin_lock_irqsave(&iommu->lock, flags);
983 context = iommu_context_addr(iommu, bus, devfn, 0);
985 context_clear_entry(context);
986 __iommu_flush_cache(iommu, context, sizeof(*context));
988 spin_unlock_irqrestore(&iommu->lock, flags);
991 static void free_context_table(struct intel_iommu *iommu)
995 struct context_entry *context;
997 spin_lock_irqsave(&iommu->lock, flags);
998 if (!iommu->root_entry) {
1001 for (i = 0; i < ROOT_ENTRY_NR; i++) {
1002 context = iommu_context_addr(iommu, i, 0, 0);
1004 free_pgtable_page(context);
1006 if (!ecs_enabled(iommu))
1009 context = iommu_context_addr(iommu, i, 0x80, 0);
1011 free_pgtable_page(context);
1014 free_pgtable_page(iommu->root_entry);
1015 iommu->root_entry = NULL;
1017 spin_unlock_irqrestore(&iommu->lock, flags);
1020 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1021 unsigned long pfn, int *target_level)
1023 struct dma_pte *parent, *pte = NULL;
1024 int level = agaw_to_level(domain->agaw);
1027 BUG_ON(!domain->pgd);
1029 if (!domain_pfn_supported(domain, pfn))
1030 /* Address beyond IOMMU's addressing capabilities. */
1033 parent = domain->pgd;
1038 offset = pfn_level_offset(pfn, level);
1039 pte = &parent[offset];
1040 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1042 if (level == *target_level)
1045 if (!dma_pte_present(pte)) {
1048 tmp_page = alloc_pgtable_page(domain->nid);
1053 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1054 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1055 if (cmpxchg64(&pte->val, 0ULL, pteval))
1056 /* Someone else set it while we were thinking; use theirs. */
1057 free_pgtable_page(tmp_page);
1059 domain_flush_cache(domain, pte, sizeof(*pte));
1064 parent = phys_to_virt(dma_pte_addr(pte));
1069 *target_level = level;
1075 /* return address's pte at specific level */
1076 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1078 int level, int *large_page)
1080 struct dma_pte *parent, *pte = NULL;
1081 int total = agaw_to_level(domain->agaw);
1084 parent = domain->pgd;
1085 while (level <= total) {
1086 offset = pfn_level_offset(pfn, total);
1087 pte = &parent[offset];
1091 if (!dma_pte_present(pte)) {
1092 *large_page = total;
1096 if (dma_pte_superpage(pte)) {
1097 *large_page = total;
1101 parent = phys_to_virt(dma_pte_addr(pte));
1107 /* clear last level pte, a tlb flush should be followed */
1108 static void dma_pte_clear_range(struct dmar_domain *domain,
1109 unsigned long start_pfn,
1110 unsigned long last_pfn)
1112 unsigned int large_page = 1;
1113 struct dma_pte *first_pte, *pte;
1115 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1116 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1117 BUG_ON(start_pfn > last_pfn);
1119 /* we don't need lock here; nobody else touches the iova range */
1122 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1124 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1129 start_pfn += lvl_to_nr_pages(large_page);
1131 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1133 domain_flush_cache(domain, first_pte,
1134 (void *)pte - (void *)first_pte);
1136 } while (start_pfn && start_pfn <= last_pfn);
1139 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1140 struct dma_pte *pte, unsigned long pfn,
1141 unsigned long start_pfn, unsigned long last_pfn)
1143 pfn = max(start_pfn, pfn);
1144 pte = &pte[pfn_level_offset(pfn, level)];
1147 unsigned long level_pfn;
1148 struct dma_pte *level_pte;
1150 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1153 level_pfn = pfn & level_mask(level);
1154 level_pte = phys_to_virt(dma_pte_addr(pte));
1157 dma_pte_free_level(domain, level - 1, level_pte,
1158 level_pfn, start_pfn, last_pfn);
1160 /* If range covers entire pagetable, free it */
1161 if (!(start_pfn > level_pfn ||
1162 last_pfn < level_pfn + level_size(level) - 1)) {
1164 domain_flush_cache(domain, pte, sizeof(*pte));
1165 free_pgtable_page(level_pte);
1168 pfn += level_size(level);
1169 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1172 /* clear last level (leaf) ptes and free page table pages. */
1173 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1174 unsigned long start_pfn,
1175 unsigned long last_pfn)
1177 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1178 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1179 BUG_ON(start_pfn > last_pfn);
1181 dma_pte_clear_range(domain, start_pfn, last_pfn);
1183 /* We don't need lock here; nobody else touches the iova range */
1184 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1185 domain->pgd, 0, start_pfn, last_pfn);
1188 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1189 free_pgtable_page(domain->pgd);
1194 /* When a page at a given level is being unlinked from its parent, we don't
1195 need to *modify* it at all. All we need to do is make a list of all the
1196 pages which can be freed just as soon as we've flushed the IOTLB and we
1197 know the hardware page-walk will no longer touch them.
1198 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1200 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1201 int level, struct dma_pte *pte,
1202 struct page *freelist)
1206 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1207 pg->freelist = freelist;
1213 pte = page_address(pg);
1215 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1216 freelist = dma_pte_list_pagetables(domain, level - 1,
1219 } while (!first_pte_in_page(pte));
1224 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1225 struct dma_pte *pte, unsigned long pfn,
1226 unsigned long start_pfn,
1227 unsigned long last_pfn,
1228 struct page *freelist)
1230 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1232 pfn = max(start_pfn, pfn);
1233 pte = &pte[pfn_level_offset(pfn, level)];
1236 unsigned long level_pfn;
1238 if (!dma_pte_present(pte))
1241 level_pfn = pfn & level_mask(level);
1243 /* If range covers entire pagetable, free it */
1244 if (start_pfn <= level_pfn &&
1245 last_pfn >= level_pfn + level_size(level) - 1) {
1246 /* These suborbinate page tables are going away entirely. Don't
1247 bother to clear them; we're just going to *free* them. */
1248 if (level > 1 && !dma_pte_superpage(pte))
1249 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1255 } else if (level > 1) {
1256 /* Recurse down into a level that isn't *entirely* obsolete */
1257 freelist = dma_pte_clear_level(domain, level - 1,
1258 phys_to_virt(dma_pte_addr(pte)),
1259 level_pfn, start_pfn, last_pfn,
1263 pfn += level_size(level);
1264 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1267 domain_flush_cache(domain, first_pte,
1268 (void *)++last_pte - (void *)first_pte);
1273 /* We can't just free the pages because the IOMMU may still be walking
1274 the page tables, and may have cached the intermediate levels. The
1275 pages can only be freed after the IOTLB flush has been done. */
1276 static struct page *domain_unmap(struct dmar_domain *domain,
1277 unsigned long start_pfn,
1278 unsigned long last_pfn)
1280 struct page *freelist = NULL;
1282 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1283 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1284 BUG_ON(start_pfn > last_pfn);
1286 /* we don't need lock here; nobody else touches the iova range */
1287 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1288 domain->pgd, 0, start_pfn, last_pfn, NULL);
1291 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1292 struct page *pgd_page = virt_to_page(domain->pgd);
1293 pgd_page->freelist = freelist;
1294 freelist = pgd_page;
1302 static void dma_free_pagelist(struct page *freelist)
1306 while ((pg = freelist)) {
1307 freelist = pg->freelist;
1308 free_pgtable_page(page_address(pg));
1312 /* iommu handling */
1313 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1315 struct root_entry *root;
1316 unsigned long flags;
1318 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1320 pr_err("Allocating root entry for %s failed\n",
1325 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1327 spin_lock_irqsave(&iommu->lock, flags);
1328 iommu->root_entry = root;
1329 spin_unlock_irqrestore(&iommu->lock, flags);
1334 static void iommu_set_root_entry(struct intel_iommu *iommu)
1340 addr = virt_to_phys(iommu->root_entry);
1341 if (ecs_enabled(iommu))
1342 addr |= DMA_RTADDR_RTT;
1344 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1345 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1347 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1349 /* Make sure hardware complete it */
1350 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1351 readl, (sts & DMA_GSTS_RTPS), sts);
1353 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1356 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1361 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1364 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1367 /* Make sure hardware complete it */
1368 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369 readl, (!(val & DMA_GSTS_WBFS)), val);
1371 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376 u16 did, u16 source_id, u8 function_mask,
1383 case DMA_CCMD_GLOBAL_INVL:
1384 val = DMA_CCMD_GLOBAL_INVL;
1386 case DMA_CCMD_DOMAIN_INVL:
1387 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1389 case DMA_CCMD_DEVICE_INVL:
1390 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1396 val |= DMA_CCMD_ICC;
1398 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1401 /* Make sure hardware complete it */
1402 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1405 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410 u64 addr, unsigned int size_order, u64 type)
1412 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413 u64 val = 0, val_iva = 0;
1417 case DMA_TLB_GLOBAL_FLUSH:
1418 /* global flush doesn't need set IVA_REG */
1419 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1421 case DMA_TLB_DSI_FLUSH:
1422 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1424 case DMA_TLB_PSI_FLUSH:
1425 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426 /* IH bit is passed in as part of address */
1427 val_iva = size_order | addr;
1432 /* Note: set drain read/write */
1435 * This is probably to be super secure.. Looks like we can
1436 * ignore it without any impact.
1438 if (cap_read_drain(iommu->cap))
1439 val |= DMA_TLB_READ_DRAIN;
1441 if (cap_write_drain(iommu->cap))
1442 val |= DMA_TLB_WRITE_DRAIN;
1444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445 /* Note: Only uses first TLB reg currently */
1447 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1450 /* Make sure hardware complete it */
1451 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1454 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1456 /* check IOTLB invalidation granularity */
1457 if (DMA_TLB_IAIG(val) == 0)
1458 pr_err("Flush IOTLB failed\n");
1459 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461 (unsigned long long)DMA_TLB_IIRG(type),
1462 (unsigned long long)DMA_TLB_IAIG(val));
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1469 struct device_domain_info *info;
1471 assert_spin_locked(&device_domain_lock);
1476 list_for_each_entry(info, &domain->devices, link)
1477 if (info->iommu == iommu && info->bus == bus &&
1478 info->devfn == devfn) {
1479 if (info->ats_supported && info->dev)
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1489 struct device_domain_info *info;
1490 bool has_iotlb_device = false;
1492 assert_spin_locked(&device_domain_lock);
1494 list_for_each_entry(info, &domain->devices, link) {
1495 struct pci_dev *pdev;
1497 if (!info->dev || !dev_is_pci(info->dev))
1500 pdev = to_pci_dev(info->dev);
1501 if (pdev->ats_enabled) {
1502 has_iotlb_device = true;
1507 domain->has_iotlb_device = has_iotlb_device;
1510 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1512 struct pci_dev *pdev;
1514 assert_spin_locked(&device_domain_lock);
1516 if (!info || !dev_is_pci(info->dev))
1519 pdev = to_pci_dev(info->dev);
1521 #ifdef CONFIG_INTEL_IOMMU_SVM
1522 /* The PCIe spec, in its wisdom, declares that the behaviour of
1523 the device if you enable PASID support after ATS support is
1524 undefined. So always enable PASID support on devices which
1525 have it, even if we can't yet know if we're ever going to
1527 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1528 info->pasid_enabled = 1;
1530 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1531 info->pri_enabled = 1;
1533 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1534 info->ats_enabled = 1;
1535 domain_update_iotlb(info->domain);
1536 info->ats_qdep = pci_ats_queue_depth(pdev);
1540 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1542 struct pci_dev *pdev;
1544 assert_spin_locked(&device_domain_lock);
1546 if (!dev_is_pci(info->dev))
1549 pdev = to_pci_dev(info->dev);
1551 if (info->ats_enabled) {
1552 pci_disable_ats(pdev);
1553 info->ats_enabled = 0;
1554 domain_update_iotlb(info->domain);
1556 #ifdef CONFIG_INTEL_IOMMU_SVM
1557 if (info->pri_enabled) {
1558 pci_disable_pri(pdev);
1559 info->pri_enabled = 0;
1561 if (info->pasid_enabled) {
1562 pci_disable_pasid(pdev);
1563 info->pasid_enabled = 0;
1568 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1569 u64 addr, unsigned mask)
1572 unsigned long flags;
1573 struct device_domain_info *info;
1575 if (!domain->has_iotlb_device)
1578 spin_lock_irqsave(&device_domain_lock, flags);
1579 list_for_each_entry(info, &domain->devices, link) {
1580 if (!info->ats_enabled)
1583 sid = info->bus << 8 | info->devfn;
1584 qdep = info->ats_qdep;
1585 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1587 spin_unlock_irqrestore(&device_domain_lock, flags);
1590 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1591 struct dmar_domain *domain,
1592 unsigned long pfn, unsigned int pages,
1595 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1596 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1597 u16 did = domain->iommu_did[iommu->seq_id];
1604 * Fallback to domain selective flush if no PSI support or the size is
1606 * PSI requires page size to be 2 ^ x, and the base address is naturally
1607 * aligned to the size
1609 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1610 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1613 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1617 * In caching mode, changes of pages from non-present to present require
1618 * flush. However, device IOTLB doesn't need to be flushed in this case.
1620 if (!cap_caching_mode(iommu->cap) || !map)
1621 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1625 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1628 unsigned long flags;
1630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1631 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1632 pmen &= ~DMA_PMEN_EPM;
1633 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1635 /* wait for the protected region status bit to clear */
1636 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1637 readl, !(pmen & DMA_PMEN_PRS), pmen);
1639 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1642 static void iommu_enable_translation(struct intel_iommu *iommu)
1645 unsigned long flags;
1647 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1648 iommu->gcmd |= DMA_GCMD_TE;
1649 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1651 /* Make sure hardware complete it */
1652 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1653 readl, (sts & DMA_GSTS_TES), sts);
1655 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1658 static void iommu_disable_translation(struct intel_iommu *iommu)
1663 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1664 iommu->gcmd &= ~DMA_GCMD_TE;
1665 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1667 /* Make sure hardware complete it */
1668 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1669 readl, (!(sts & DMA_GSTS_TES)), sts);
1671 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1675 static int iommu_init_domains(struct intel_iommu *iommu)
1677 u32 ndomains, nlongs;
1680 ndomains = cap_ndoms(iommu->cap);
1681 pr_debug("%s: Number of Domains supported <%d>\n",
1682 iommu->name, ndomains);
1683 nlongs = BITS_TO_LONGS(ndomains);
1685 spin_lock_init(&iommu->lock);
1687 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1688 if (!iommu->domain_ids) {
1689 pr_err("%s: Allocating domain id array failed\n",
1694 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1695 iommu->domains = kzalloc(size, GFP_KERNEL);
1697 if (iommu->domains) {
1698 size = 256 * sizeof(struct dmar_domain *);
1699 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1702 if (!iommu->domains || !iommu->domains[0]) {
1703 pr_err("%s: Allocating domain array failed\n",
1705 kfree(iommu->domain_ids);
1706 kfree(iommu->domains);
1707 iommu->domain_ids = NULL;
1708 iommu->domains = NULL;
1715 * If Caching mode is set, then invalid translations are tagged
1716 * with domain-id 0, hence we need to pre-allocate it. We also
1717 * use domain-id 0 as a marker for non-allocated domain-id, so
1718 * make sure it is not used for a real domain.
1720 set_bit(0, iommu->domain_ids);
1725 static void disable_dmar_iommu(struct intel_iommu *iommu)
1727 struct device_domain_info *info, *tmp;
1728 unsigned long flags;
1730 if (!iommu->domains || !iommu->domain_ids)
1734 spin_lock_irqsave(&device_domain_lock, flags);
1735 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1736 struct dmar_domain *domain;
1738 if (info->iommu != iommu)
1741 if (!info->dev || !info->domain)
1744 domain = info->domain;
1746 __dmar_remove_one_dev_info(info);
1748 if (!domain_type_is_vm_or_si(domain)) {
1750 * The domain_exit() function can't be called under
1751 * device_domain_lock, as it takes this lock itself.
1752 * So release the lock here and re-run the loop
1755 spin_unlock_irqrestore(&device_domain_lock, flags);
1756 domain_exit(domain);
1760 spin_unlock_irqrestore(&device_domain_lock, flags);
1762 if (iommu->gcmd & DMA_GCMD_TE)
1763 iommu_disable_translation(iommu);
1766 static void free_dmar_iommu(struct intel_iommu *iommu)
1768 if ((iommu->domains) && (iommu->domain_ids)) {
1769 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1772 for (i = 0; i < elems; i++)
1773 kfree(iommu->domains[i]);
1774 kfree(iommu->domains);
1775 kfree(iommu->domain_ids);
1776 iommu->domains = NULL;
1777 iommu->domain_ids = NULL;
1780 g_iommus[iommu->seq_id] = NULL;
1782 /* free context mapping */
1783 free_context_table(iommu);
1785 #ifdef CONFIG_INTEL_IOMMU_SVM
1786 if (pasid_enabled(iommu)) {
1787 if (ecap_prs(iommu->ecap))
1788 intel_svm_finish_prq(iommu);
1789 intel_svm_free_pasid_tables(iommu);
1794 static struct dmar_domain *alloc_domain(int flags)
1796 struct dmar_domain *domain;
1798 domain = alloc_domain_mem();
1802 memset(domain, 0, sizeof(*domain));
1804 domain->flags = flags;
1805 domain->has_iotlb_device = false;
1806 INIT_LIST_HEAD(&domain->devices);
1811 /* Must be called with iommu->lock */
1812 static int domain_attach_iommu(struct dmar_domain *domain,
1813 struct intel_iommu *iommu)
1815 unsigned long ndomains;
1818 assert_spin_locked(&device_domain_lock);
1819 assert_spin_locked(&iommu->lock);
1821 domain->iommu_refcnt[iommu->seq_id] += 1;
1822 domain->iommu_count += 1;
1823 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1824 ndomains = cap_ndoms(iommu->cap);
1825 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1827 if (num >= ndomains) {
1828 pr_err("%s: No free domain ids\n", iommu->name);
1829 domain->iommu_refcnt[iommu->seq_id] -= 1;
1830 domain->iommu_count -= 1;
1834 set_bit(num, iommu->domain_ids);
1835 set_iommu_domain(iommu, num, domain);
1837 domain->iommu_did[iommu->seq_id] = num;
1838 domain->nid = iommu->node;
1840 domain_update_iommu_cap(domain);
1846 static int domain_detach_iommu(struct dmar_domain *domain,
1847 struct intel_iommu *iommu)
1849 int num, count = INT_MAX;
1851 assert_spin_locked(&device_domain_lock);
1852 assert_spin_locked(&iommu->lock);
1854 domain->iommu_refcnt[iommu->seq_id] -= 1;
1855 count = --domain->iommu_count;
1856 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1857 num = domain->iommu_did[iommu->seq_id];
1858 clear_bit(num, iommu->domain_ids);
1859 set_iommu_domain(iommu, num, NULL);
1861 domain_update_iommu_cap(domain);
1862 domain->iommu_did[iommu->seq_id] = 0;
1868 static struct iova_domain reserved_iova_list;
1869 static struct lock_class_key reserved_rbtree_key;
1871 static int dmar_init_reserved_ranges(void)
1873 struct pci_dev *pdev = NULL;
1877 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1880 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1881 &reserved_rbtree_key);
1883 /* IOAPIC ranges shouldn't be accessed by DMA */
1884 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1885 IOVA_PFN(IOAPIC_RANGE_END));
1887 pr_err("Reserve IOAPIC range failed\n");
1891 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1892 for_each_pci_dev(pdev) {
1895 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1896 r = &pdev->resource[i];
1897 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1899 iova = reserve_iova(&reserved_iova_list,
1903 pr_err("Reserve iova failed\n");
1911 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1913 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1916 static inline int guestwidth_to_adjustwidth(int gaw)
1919 int r = (gaw - 12) % 9;
1930 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1933 int adjust_width, agaw;
1934 unsigned long sagaw;
1936 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1938 domain_reserve_special_ranges(domain);
1940 /* calculate AGAW */
1941 if (guest_width > cap_mgaw(iommu->cap))
1942 guest_width = cap_mgaw(iommu->cap);
1943 domain->gaw = guest_width;
1944 adjust_width = guestwidth_to_adjustwidth(guest_width);
1945 agaw = width_to_agaw(adjust_width);
1946 sagaw = cap_sagaw(iommu->cap);
1947 if (!test_bit(agaw, &sagaw)) {
1948 /* hardware doesn't support it, choose a bigger one */
1949 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1950 agaw = find_next_bit(&sagaw, 5, agaw);
1954 domain->agaw = agaw;
1956 if (ecap_coherent(iommu->ecap))
1957 domain->iommu_coherency = 1;
1959 domain->iommu_coherency = 0;
1961 if (ecap_sc_support(iommu->ecap))
1962 domain->iommu_snooping = 1;
1964 domain->iommu_snooping = 0;
1966 if (intel_iommu_superpage)
1967 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1969 domain->iommu_superpage = 0;
1971 domain->nid = iommu->node;
1973 /* always allocate the top pgd */
1974 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1977 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1981 static void domain_exit(struct dmar_domain *domain)
1983 struct page *freelist = NULL;
1985 /* Domain 0 is reserved, so dont process it */
1989 /* Flush any lazy unmaps that may reference this domain */
1990 if (!intel_iommu_strict) {
1993 for_each_possible_cpu(cpu)
1994 flush_unmaps_timeout(cpu);
1997 /* Remove associated devices and clear attached or cached domains */
1999 domain_remove_dev_info(domain);
2003 put_iova_domain(&domain->iovad);
2005 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2007 dma_free_pagelist(freelist);
2009 free_domain_mem(domain);
2012 static int domain_context_mapping_one(struct dmar_domain *domain,
2013 struct intel_iommu *iommu,
2016 u16 did = domain->iommu_did[iommu->seq_id];
2017 int translation = CONTEXT_TT_MULTI_LEVEL;
2018 struct device_domain_info *info = NULL;
2019 struct context_entry *context;
2020 unsigned long flags;
2021 struct dma_pte *pgd;
2026 if (hw_pass_through && domain_type_is_si(domain))
2027 translation = CONTEXT_TT_PASS_THROUGH;
2029 pr_debug("Set context mapping for %02x:%02x.%d\n",
2030 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2032 BUG_ON(!domain->pgd);
2034 spin_lock_irqsave(&device_domain_lock, flags);
2035 spin_lock(&iommu->lock);
2038 context = iommu_context_addr(iommu, bus, devfn, 1);
2043 if (context_present(context))
2047 * For kdump cases, old valid entries may be cached due to the
2048 * in-flight DMA and copied pgtable, but there is no unmapping
2049 * behaviour for them, thus we need an explicit cache flush for
2050 * the newly-mapped device. For kdump, at this point, the device
2051 * is supposed to finish reset at its driver probe stage, so no
2052 * in-flight DMA will exist, and we don't need to worry anymore
2055 if (context_copied(context)) {
2056 u16 did_old = context_domain_id(context);
2058 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2059 iommu->flush.flush_context(iommu, did_old,
2060 (((u16)bus) << 8) | devfn,
2061 DMA_CCMD_MASK_NOBIT,
2062 DMA_CCMD_DEVICE_INVL);
2063 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2070 context_clear_entry(context);
2071 context_set_domain_id(context, did);
2074 * Skip top levels of page tables for iommu which has less agaw
2075 * than default. Unnecessary for PT mode.
2077 if (translation != CONTEXT_TT_PASS_THROUGH) {
2078 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2080 pgd = phys_to_virt(dma_pte_addr(pgd));
2081 if (!dma_pte_present(pgd))
2085 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2086 if (info && info->ats_supported)
2087 translation = CONTEXT_TT_DEV_IOTLB;
2089 translation = CONTEXT_TT_MULTI_LEVEL;
2091 context_set_address_root(context, virt_to_phys(pgd));
2092 context_set_address_width(context, iommu->agaw);
2095 * In pass through mode, AW must be programmed to
2096 * indicate the largest AGAW value supported by
2097 * hardware. And ASR is ignored by hardware.
2099 context_set_address_width(context, iommu->msagaw);
2102 context_set_translation_type(context, translation);
2103 context_set_fault_enable(context);
2104 context_set_present(context);
2105 domain_flush_cache(domain, context, sizeof(*context));
2108 * It's a non-present to present mapping. If hardware doesn't cache
2109 * non-present entry we only need to flush the write-buffer. If the
2110 * _does_ cache non-present entries, then it does so in the special
2111 * domain #0, which we have to flush:
2113 if (cap_caching_mode(iommu->cap)) {
2114 iommu->flush.flush_context(iommu, 0,
2115 (((u16)bus) << 8) | devfn,
2116 DMA_CCMD_MASK_NOBIT,
2117 DMA_CCMD_DEVICE_INVL);
2118 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2120 iommu_flush_write_buffer(iommu);
2122 iommu_enable_dev_iotlb(info);
2127 spin_unlock(&iommu->lock);
2128 spin_unlock_irqrestore(&device_domain_lock, flags);
2133 struct domain_context_mapping_data {
2134 struct dmar_domain *domain;
2135 struct intel_iommu *iommu;
2138 static int domain_context_mapping_cb(struct pci_dev *pdev,
2139 u16 alias, void *opaque)
2141 struct domain_context_mapping_data *data = opaque;
2143 return domain_context_mapping_one(data->domain, data->iommu,
2144 PCI_BUS_NUM(alias), alias & 0xff);
2148 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2150 struct intel_iommu *iommu;
2152 struct domain_context_mapping_data data;
2154 iommu = device_to_iommu(dev, &bus, &devfn);
2158 if (!dev_is_pci(dev))
2159 return domain_context_mapping_one(domain, iommu, bus, devfn);
2161 data.domain = domain;
2164 return pci_for_each_dma_alias(to_pci_dev(dev),
2165 &domain_context_mapping_cb, &data);
2168 static int domain_context_mapped_cb(struct pci_dev *pdev,
2169 u16 alias, void *opaque)
2171 struct intel_iommu *iommu = opaque;
2173 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2176 static int domain_context_mapped(struct device *dev)
2178 struct intel_iommu *iommu;
2181 iommu = device_to_iommu(dev, &bus, &devfn);
2185 if (!dev_is_pci(dev))
2186 return device_context_mapped(iommu, bus, devfn);
2188 return !pci_for_each_dma_alias(to_pci_dev(dev),
2189 domain_context_mapped_cb, iommu);
2192 /* Returns a number of VTD pages, but aligned to MM page size */
2193 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2196 host_addr &= ~PAGE_MASK;
2197 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2200 /* Return largest possible superpage level for a given mapping */
2201 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2202 unsigned long iov_pfn,
2203 unsigned long phy_pfn,
2204 unsigned long pages)
2206 int support, level = 1;
2207 unsigned long pfnmerge;
2209 support = domain->iommu_superpage;
2211 /* To use a large page, the virtual *and* physical addresses
2212 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2213 of them will mean we have to use smaller pages. So just
2214 merge them and check both at once. */
2215 pfnmerge = iov_pfn | phy_pfn;
2217 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2218 pages >>= VTD_STRIDE_SHIFT;
2221 pfnmerge >>= VTD_STRIDE_SHIFT;
2228 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2229 struct scatterlist *sg, unsigned long phys_pfn,
2230 unsigned long nr_pages, int prot)
2232 struct dma_pte *first_pte = NULL, *pte = NULL;
2233 phys_addr_t uninitialized_var(pteval);
2234 unsigned long sg_res = 0;
2235 unsigned int largepage_lvl = 0;
2236 unsigned long lvl_pages = 0;
2238 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2240 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2243 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2247 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2250 while (nr_pages > 0) {
2254 sg_res = aligned_nrpages(sg->offset, sg->length);
2255 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2256 sg->dma_length = sg->length;
2257 pteval = page_to_phys(sg_page(sg)) | prot;
2258 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2262 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2264 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2267 /* It is large page*/
2268 if (largepage_lvl > 1) {
2269 unsigned long nr_superpages, end_pfn;
2271 pteval |= DMA_PTE_LARGE_PAGE;
2272 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2274 nr_superpages = sg_res / lvl_pages;
2275 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2278 * Ensure that old small page tables are
2279 * removed to make room for superpage(s).
2281 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2283 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2287 /* We don't need lock here, nobody else
2288 * touches the iova range
2290 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2292 static int dumps = 5;
2293 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2294 iov_pfn, tmp, (unsigned long long)pteval);
2297 debug_dma_dump_mappings(NULL);
2302 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2304 BUG_ON(nr_pages < lvl_pages);
2305 BUG_ON(sg_res < lvl_pages);
2307 nr_pages -= lvl_pages;
2308 iov_pfn += lvl_pages;
2309 phys_pfn += lvl_pages;
2310 pteval += lvl_pages * VTD_PAGE_SIZE;
2311 sg_res -= lvl_pages;
2313 /* If the next PTE would be the first in a new page, then we
2314 need to flush the cache on the entries we've just written.
2315 And then we'll need to recalculate 'pte', so clear it and
2316 let it get set again in the if (!pte) block above.
2318 If we're done (!nr_pages) we need to flush the cache too.
2320 Also if we've been setting superpages, we may need to
2321 recalculate 'pte' and switch back to smaller pages for the
2322 end of the mapping, if the trailing size is not enough to
2323 use another superpage (i.e. sg_res < lvl_pages). */
2325 if (!nr_pages || first_pte_in_page(pte) ||
2326 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2327 domain_flush_cache(domain, first_pte,
2328 (void *)pte - (void *)first_pte);
2332 if (!sg_res && nr_pages)
2338 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2339 struct scatterlist *sg, unsigned long nr_pages,
2342 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2345 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2346 unsigned long phys_pfn, unsigned long nr_pages,
2349 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2352 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2357 clear_context_table(iommu, bus, devfn);
2358 iommu->flush.flush_context(iommu, 0, 0, 0,
2359 DMA_CCMD_GLOBAL_INVL);
2360 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2363 static inline void unlink_domain_info(struct device_domain_info *info)
2365 assert_spin_locked(&device_domain_lock);
2366 list_del(&info->link);
2367 list_del(&info->global);
2369 info->dev->archdata.iommu = NULL;
2372 static void domain_remove_dev_info(struct dmar_domain *domain)
2374 struct device_domain_info *info, *tmp;
2375 unsigned long flags;
2377 spin_lock_irqsave(&device_domain_lock, flags);
2378 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2379 __dmar_remove_one_dev_info(info);
2380 spin_unlock_irqrestore(&device_domain_lock, flags);
2385 * Note: we use struct device->archdata.iommu stores the info
2387 static struct dmar_domain *find_domain(struct device *dev)
2389 struct device_domain_info *info;
2391 /* No lock here, assumes no domain exit in normal case */
2392 info = dev->archdata.iommu;
2394 return info->domain;
2398 static inline struct device_domain_info *
2399 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2401 struct device_domain_info *info;
2403 list_for_each_entry(info, &device_domain_list, global)
2404 if (info->iommu->segment == segment && info->bus == bus &&
2405 info->devfn == devfn)
2411 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2414 struct dmar_domain *domain)
2416 struct dmar_domain *found = NULL;
2417 struct device_domain_info *info;
2418 unsigned long flags;
2421 info = alloc_devinfo_mem();
2426 info->devfn = devfn;
2427 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2428 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2431 info->domain = domain;
2432 info->iommu = iommu;
2434 if (dev && dev_is_pci(dev)) {
2435 struct pci_dev *pdev = to_pci_dev(info->dev);
2437 if (ecap_dev_iotlb_support(iommu->ecap) &&
2438 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2439 dmar_find_matched_atsr_unit(pdev))
2440 info->ats_supported = 1;
2442 if (ecs_enabled(iommu)) {
2443 if (pasid_enabled(iommu)) {
2444 int features = pci_pasid_features(pdev);
2446 info->pasid_supported = features | 1;
2449 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2450 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2451 info->pri_supported = 1;
2455 spin_lock_irqsave(&device_domain_lock, flags);
2457 found = find_domain(dev);
2460 struct device_domain_info *info2;
2461 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2463 found = info2->domain;
2469 spin_unlock_irqrestore(&device_domain_lock, flags);
2470 free_devinfo_mem(info);
2471 /* Caller must free the original domain */
2475 spin_lock(&iommu->lock);
2476 ret = domain_attach_iommu(domain, iommu);
2477 spin_unlock(&iommu->lock);
2480 spin_unlock_irqrestore(&device_domain_lock, flags);
2481 free_devinfo_mem(info);
2485 list_add(&info->link, &domain->devices);
2486 list_add(&info->global, &device_domain_list);
2488 dev->archdata.iommu = info;
2489 spin_unlock_irqrestore(&device_domain_lock, flags);
2491 if (dev && domain_context_mapping(domain, dev)) {
2492 pr_err("Domain context map for %s failed\n", dev_name(dev));
2493 dmar_remove_one_dev_info(domain, dev);
2500 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2502 *(u16 *)opaque = alias;
2506 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2508 struct device_domain_info *info = NULL;
2509 struct dmar_domain *domain = NULL;
2510 struct intel_iommu *iommu;
2511 u16 req_id, dma_alias;
2512 unsigned long flags;
2515 iommu = device_to_iommu(dev, &bus, &devfn);
2519 req_id = ((u16)bus << 8) | devfn;
2521 if (dev_is_pci(dev)) {
2522 struct pci_dev *pdev = to_pci_dev(dev);
2524 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2526 spin_lock_irqsave(&device_domain_lock, flags);
2527 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2528 PCI_BUS_NUM(dma_alias),
2531 iommu = info->iommu;
2532 domain = info->domain;
2534 spin_unlock_irqrestore(&device_domain_lock, flags);
2536 /* DMA alias already has a domain, use it */
2541 /* Allocate and initialize new domain for the device */
2542 domain = alloc_domain(0);
2545 if (domain_init(domain, iommu, gaw)) {
2546 domain_exit(domain);
2555 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2556 struct dmar_domain *domain)
2558 struct intel_iommu *iommu;
2559 struct dmar_domain *tmp;
2560 u16 req_id, dma_alias;
2563 iommu = device_to_iommu(dev, &bus, &devfn);
2567 req_id = ((u16)bus << 8) | devfn;
2569 if (dev_is_pci(dev)) {
2570 struct pci_dev *pdev = to_pci_dev(dev);
2572 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2574 /* register PCI DMA alias device */
2575 if (req_id != dma_alias) {
2576 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2577 dma_alias & 0xff, NULL, domain);
2579 if (!tmp || tmp != domain)
2584 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2585 if (!tmp || tmp != domain)
2591 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2593 struct dmar_domain *domain, *tmp;
2595 domain = find_domain(dev);
2599 domain = find_or_alloc_domain(dev, gaw);
2603 tmp = set_domain_for_dev(dev, domain);
2604 if (!tmp || domain != tmp) {
2605 domain_exit(domain);
2614 static int iommu_domain_identity_map(struct dmar_domain *domain,
2615 unsigned long long start,
2616 unsigned long long end)
2618 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2619 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2621 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2622 dma_to_mm_pfn(last_vpfn))) {
2623 pr_err("Reserving iova failed\n");
2627 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2629 * RMRR range might have overlap with physical memory range,
2632 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2634 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2635 last_vpfn - first_vpfn + 1,
2636 DMA_PTE_READ|DMA_PTE_WRITE);
2639 static int domain_prepare_identity_map(struct device *dev,
2640 struct dmar_domain *domain,
2641 unsigned long long start,
2642 unsigned long long end)
2644 /* For _hardware_ passthrough, don't bother. But for software
2645 passthrough, we do it anyway -- it may indicate a memory
2646 range which is reserved in E820, so which didn't get set
2647 up to start with in si_domain */
2648 if (domain == si_domain && hw_pass_through) {
2649 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2650 dev_name(dev), start, end);
2654 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2655 dev_name(dev), start, end);
2658 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2659 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2660 dmi_get_system_info(DMI_BIOS_VENDOR),
2661 dmi_get_system_info(DMI_BIOS_VERSION),
2662 dmi_get_system_info(DMI_PRODUCT_VERSION));
2666 if (end >> agaw_to_width(domain->agaw)) {
2667 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2668 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2669 agaw_to_width(domain->agaw),
2670 dmi_get_system_info(DMI_BIOS_VENDOR),
2671 dmi_get_system_info(DMI_BIOS_VERSION),
2672 dmi_get_system_info(DMI_PRODUCT_VERSION));
2676 return iommu_domain_identity_map(domain, start, end);
2679 static int iommu_prepare_identity_map(struct device *dev,
2680 unsigned long long start,
2681 unsigned long long end)
2683 struct dmar_domain *domain;
2686 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2690 ret = domain_prepare_identity_map(dev, domain, start, end);
2692 domain_exit(domain);
2697 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2700 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2702 return iommu_prepare_identity_map(dev, rmrr->base_address,
2706 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2707 static inline void iommu_prepare_isa(void)
2709 struct pci_dev *pdev;
2712 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2716 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2717 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2720 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2725 static inline void iommu_prepare_isa(void)
2729 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2731 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2733 static int __init si_domain_init(int hw)
2737 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2741 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2742 domain_exit(si_domain);
2746 pr_debug("Identity mapping domain allocated\n");
2751 for_each_online_node(nid) {
2752 unsigned long start_pfn, end_pfn;
2755 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2756 ret = iommu_domain_identity_map(si_domain,
2757 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2766 static int identity_mapping(struct device *dev)
2768 struct device_domain_info *info;
2770 if (likely(!iommu_identity_mapping))
2773 info = dev->archdata.iommu;
2774 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2775 return (info->domain == si_domain);
2780 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2782 struct dmar_domain *ndomain;
2783 struct intel_iommu *iommu;
2786 iommu = device_to_iommu(dev, &bus, &devfn);
2790 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2791 if (ndomain != domain)
2797 static bool device_has_rmrr(struct device *dev)
2799 struct dmar_rmrr_unit *rmrr;
2804 for_each_rmrr_units(rmrr) {
2806 * Return TRUE if this RMRR contains the device that
2809 for_each_active_dev_scope(rmrr->devices,
2810 rmrr->devices_cnt, i, tmp)
2821 * There are a couple cases where we need to restrict the functionality of
2822 * devices associated with RMRRs. The first is when evaluating a device for
2823 * identity mapping because problems exist when devices are moved in and out
2824 * of domains and their respective RMRR information is lost. This means that
2825 * a device with associated RMRRs will never be in a "passthrough" domain.
2826 * The second is use of the device through the IOMMU API. This interface
2827 * expects to have full control of the IOVA space for the device. We cannot
2828 * satisfy both the requirement that RMRR access is maintained and have an
2829 * unencumbered IOVA space. We also have no ability to quiesce the device's
2830 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2831 * We therefore prevent devices associated with an RMRR from participating in
2832 * the IOMMU API, which eliminates them from device assignment.
2834 * In both cases we assume that PCI USB devices with RMRRs have them largely
2835 * for historical reasons and that the RMRR space is not actively used post
2836 * boot. This exclusion may change if vendors begin to abuse it.
2838 * The same exception is made for graphics devices, with the requirement that
2839 * any use of the RMRR regions will be torn down before assigning the device
2842 static bool device_is_rmrr_locked(struct device *dev)
2844 if (!device_has_rmrr(dev))
2847 if (dev_is_pci(dev)) {
2848 struct pci_dev *pdev = to_pci_dev(dev);
2850 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2857 static int iommu_should_identity_map(struct device *dev, int startup)
2860 if (dev_is_pci(dev)) {
2861 struct pci_dev *pdev = to_pci_dev(dev);
2863 if (device_is_rmrr_locked(dev))
2866 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2869 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2872 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2876 * We want to start off with all devices in the 1:1 domain, and
2877 * take them out later if we find they can't access all of memory.
2879 * However, we can't do this for PCI devices behind bridges,
2880 * because all PCI devices behind the same bridge will end up
2881 * with the same source-id on their transactions.
2883 * Practically speaking, we can't change things around for these
2884 * devices at run-time, because we can't be sure there'll be no
2885 * DMA transactions in flight for any of their siblings.
2887 * So PCI devices (unless they're on the root bus) as well as
2888 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2889 * the 1:1 domain, just in _case_ one of their siblings turns out
2890 * not to be able to map all of memory.
2892 if (!pci_is_pcie(pdev)) {
2893 if (!pci_is_root_bus(pdev->bus))
2895 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2897 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2900 if (device_has_rmrr(dev))
2905 * At boot time, we don't yet know if devices will be 64-bit capable.
2906 * Assume that they will — if they turn out not to be, then we can
2907 * take them out of the 1:1 domain later.
2911 * If the device's dma_mask is less than the system's memory
2912 * size then this is not a candidate for identity mapping.
2914 u64 dma_mask = *dev->dma_mask;
2916 if (dev->coherent_dma_mask &&
2917 dev->coherent_dma_mask < dma_mask)
2918 dma_mask = dev->coherent_dma_mask;
2920 return dma_mask >= dma_get_required_mask(dev);
2926 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2930 if (!iommu_should_identity_map(dev, 1))
2933 ret = domain_add_dev_info(si_domain, dev);
2935 pr_info("%s identity mapping for device %s\n",
2936 hw ? "Hardware" : "Software", dev_name(dev));
2937 else if (ret == -ENODEV)
2938 /* device not associated with an iommu */
2945 static int __init iommu_prepare_static_identity_mapping(int hw)
2947 struct pci_dev *pdev = NULL;
2948 struct dmar_drhd_unit *drhd;
2949 struct intel_iommu *iommu;
2954 for_each_pci_dev(pdev) {
2955 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2960 for_each_active_iommu(iommu, drhd)
2961 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2962 struct acpi_device_physical_node *pn;
2963 struct acpi_device *adev;
2965 if (dev->bus != &acpi_bus_type)
2968 adev= to_acpi_device(dev);
2969 mutex_lock(&adev->physical_node_lock);
2970 list_for_each_entry(pn, &adev->physical_node_list, node) {
2971 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2975 mutex_unlock(&adev->physical_node_lock);
2983 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2986 * Start from the sane iommu hardware state.
2987 * If the queued invalidation is already initialized by us
2988 * (for example, while enabling interrupt-remapping) then
2989 * we got the things already rolling from a sane state.
2993 * Clear any previous faults.
2995 dmar_fault(-1, iommu);
2997 * Disable queued invalidation if supported and already enabled
2998 * before OS handover.
3000 dmar_disable_qi(iommu);
3003 if (dmar_enable_qi(iommu)) {
3005 * Queued Invalidate not enabled, use Register Based Invalidate
3007 iommu->flush.flush_context = __iommu_flush_context;
3008 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3009 pr_info("%s: Using Register based invalidation\n",
3012 iommu->flush.flush_context = qi_flush_context;
3013 iommu->flush.flush_iotlb = qi_flush_iotlb;
3014 pr_info("%s: Using Queued invalidation\n", iommu->name);
3018 static int copy_context_table(struct intel_iommu *iommu,
3019 struct root_entry *old_re,
3020 struct context_entry **tbl,
3023 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3024 struct context_entry *new_ce = NULL, ce;
3025 struct context_entry *old_ce = NULL;
3026 struct root_entry re;
3027 phys_addr_t old_ce_phys;
3029 tbl_idx = ext ? bus * 2 : bus;
3030 memcpy(&re, old_re, sizeof(re));
3032 for (devfn = 0; devfn < 256; devfn++) {
3033 /* First calculate the correct index */
3034 idx = (ext ? devfn * 2 : devfn) % 256;
3037 /* First save what we may have and clean up */
3039 tbl[tbl_idx] = new_ce;
3040 __iommu_flush_cache(iommu, new_ce,
3050 old_ce_phys = root_entry_lctp(&re);
3052 old_ce_phys = root_entry_uctp(&re);
3055 if (ext && devfn == 0) {
3056 /* No LCTP, try UCTP */
3065 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3070 new_ce = alloc_pgtable_page(iommu->node);
3077 /* Now copy the context entry */
3078 memcpy(&ce, old_ce + idx, sizeof(ce));
3080 if (!__context_present(&ce))
3083 did = context_domain_id(&ce);
3084 if (did >= 0 && did < cap_ndoms(iommu->cap))
3085 set_bit(did, iommu->domain_ids);
3088 * We need a marker for copied context entries. This
3089 * marker needs to work for the old format as well as
3090 * for extended context entries.
3092 * Bit 67 of the context entry is used. In the old
3093 * format this bit is available to software, in the
3094 * extended format it is the PGE bit, but PGE is ignored
3095 * by HW if PASIDs are disabled (and thus still
3098 * So disable PASIDs first and then mark the entry
3099 * copied. This means that we don't copy PASID
3100 * translations from the old kernel, but this is fine as
3101 * faults there are not fatal.
3103 context_clear_pasid_enable(&ce);
3104 context_set_copied(&ce);
3109 tbl[tbl_idx + pos] = new_ce;
3111 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3120 static int copy_translation_tables(struct intel_iommu *iommu)
3122 struct context_entry **ctxt_tbls;
3123 struct root_entry *old_rt;
3124 phys_addr_t old_rt_phys;
3125 int ctxt_table_entries;
3126 unsigned long flags;
3131 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3132 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3133 new_ext = !!ecap_ecs(iommu->ecap);
3136 * The RTT bit can only be changed when translation is disabled,
3137 * but disabling translation means to open a window for data
3138 * corruption. So bail out and don't copy anything if we would
3139 * have to change the bit.
3144 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3148 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3152 /* This is too big for the stack - allocate it from slab */
3153 ctxt_table_entries = ext ? 512 : 256;
3155 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3159 for (bus = 0; bus < 256; bus++) {
3160 ret = copy_context_table(iommu, &old_rt[bus],
3161 ctxt_tbls, bus, ext);
3163 pr_err("%s: Failed to copy context table for bus %d\n",
3169 spin_lock_irqsave(&iommu->lock, flags);
3171 /* Context tables are copied, now write them to the root_entry table */
3172 for (bus = 0; bus < 256; bus++) {
3173 int idx = ext ? bus * 2 : bus;
3176 if (ctxt_tbls[idx]) {
3177 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3178 iommu->root_entry[bus].lo = val;
3181 if (!ext || !ctxt_tbls[idx + 1])
3184 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3185 iommu->root_entry[bus].hi = val;
3188 spin_unlock_irqrestore(&iommu->lock, flags);
3192 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3202 static int __init init_dmars(void)
3204 struct dmar_drhd_unit *drhd;
3205 struct dmar_rmrr_unit *rmrr;
3206 bool copied_tables = false;
3208 struct intel_iommu *iommu;
3214 * initialize and program root entry to not present
3217 for_each_drhd_unit(drhd) {
3219 * lock not needed as this is only incremented in the single
3220 * threaded kernel __init code path all other access are read
3223 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3227 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3230 /* Preallocate enough resources for IOMMU hot-addition */
3231 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3232 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3234 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3237 pr_err("Allocating global iommu array failed\n");
3242 for_each_possible_cpu(cpu) {
3243 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3246 dfd->tables = kzalloc(g_num_of_iommus *
3247 sizeof(struct deferred_flush_table),
3254 spin_lock_init(&dfd->lock);
3255 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3258 for_each_active_iommu(iommu, drhd) {
3259 g_iommus[iommu->seq_id] = iommu;
3261 intel_iommu_init_qi(iommu);
3263 ret = iommu_init_domains(iommu);
3267 init_translation_status(iommu);
3269 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3270 iommu_disable_translation(iommu);
3271 clear_translation_pre_enabled(iommu);
3272 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3278 * we could share the same root & context tables
3279 * among all IOMMU's. Need to Split it later.
3281 ret = iommu_alloc_root_entry(iommu);
3285 if (translation_pre_enabled(iommu)) {
3286 pr_info("Translation already enabled - trying to copy translation structures\n");
3288 ret = copy_translation_tables(iommu);
3291 * We found the IOMMU with translation
3292 * enabled - but failed to copy over the
3293 * old root-entry table. Try to proceed
3294 * by disabling translation now and
3295 * allocating a clean root-entry table.
3296 * This might cause DMAR faults, but
3297 * probably the dump will still succeed.
3299 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3301 iommu_disable_translation(iommu);
3302 clear_translation_pre_enabled(iommu);
3304 pr_info("Copied translation tables from previous kernel for %s\n",
3306 copied_tables = true;
3310 if (!ecap_pass_through(iommu->ecap))
3311 hw_pass_through = 0;
3312 #ifdef CONFIG_INTEL_IOMMU_SVM
3313 if (pasid_enabled(iommu))
3314 intel_svm_alloc_pasid_tables(iommu);
3319 * Now that qi is enabled on all iommus, set the root entry and flush
3320 * caches. This is required on some Intel X58 chipsets, otherwise the
3321 * flush_context function will loop forever and the boot hangs.
3323 for_each_active_iommu(iommu, drhd) {
3324 iommu_flush_write_buffer(iommu);
3325 iommu_set_root_entry(iommu);
3326 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3327 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3330 if (iommu_pass_through)
3331 iommu_identity_mapping |= IDENTMAP_ALL;
3333 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3334 iommu_identity_mapping |= IDENTMAP_GFX;
3337 check_tylersburg_isoch();
3339 if (iommu_identity_mapping) {
3340 ret = si_domain_init(hw_pass_through);
3347 * If we copied translations from a previous kernel in the kdump
3348 * case, we can not assign the devices to domains now, as that
3349 * would eliminate the old mappings. So skip this part and defer
3350 * the assignment to device driver initialization time.
3356 * If pass through is not set or not enabled, setup context entries for
3357 * identity mappings for rmrr, gfx, and isa and may fall back to static
3358 * identity mapping if iommu_identity_mapping is set.
3360 if (iommu_identity_mapping) {
3361 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3363 pr_crit("Failed to setup IOMMU pass-through\n");
3369 * for each dev attached to rmrr
3371 * locate drhd for dev, alloc domain for dev
3372 * allocate free domain
3373 * allocate page table entries for rmrr
3374 * if context not allocated for bus
3375 * allocate and init context
3376 * set present in root table for this bus
3377 * init context with domain, translation etc
3381 pr_info("Setting RMRR:\n");
3382 for_each_rmrr_units(rmrr) {
3383 /* some BIOS lists non-exist devices in DMAR table. */
3384 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3386 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3388 pr_err("Mapping reserved region failed\n");
3392 iommu_prepare_isa();
3399 * global invalidate context cache
3400 * global invalidate iotlb
3401 * enable translation
3403 for_each_iommu(iommu, drhd) {
3404 if (drhd->ignored) {
3406 * we always have to disable PMRs or DMA may fail on
3410 iommu_disable_protect_mem_regions(iommu);
3414 iommu_flush_write_buffer(iommu);
3416 #ifdef CONFIG_INTEL_IOMMU_SVM
3417 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3418 ret = intel_svm_enable_prq(iommu);
3423 ret = dmar_set_interrupt(iommu);
3427 if (!translation_pre_enabled(iommu))
3428 iommu_enable_translation(iommu);
3430 iommu_disable_protect_mem_regions(iommu);
3436 for_each_active_iommu(iommu, drhd) {
3437 disable_dmar_iommu(iommu);
3438 free_dmar_iommu(iommu);
3441 for_each_possible_cpu(cpu)
3442 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3448 /* This takes a number of _MM_ pages, not VTD pages */
3449 static unsigned long intel_alloc_iova(struct device *dev,
3450 struct dmar_domain *domain,
3451 unsigned long nrpages, uint64_t dma_mask)
3453 unsigned long iova_pfn = 0;
3455 /* Restrict dma_mask to the width that the iommu can handle */
3456 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3457 /* Ensure we reserve the whole size-aligned region */
3458 nrpages = __roundup_pow_of_two(nrpages);
3460 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3462 * First try to allocate an io virtual address in
3463 * DMA_BIT_MASK(32) and if that fails then try allocating
3466 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3467 IOVA_PFN(DMA_BIT_MASK(32)));
3471 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3472 if (unlikely(!iova_pfn)) {
3473 pr_err("Allocating %ld-page iova for %s failed",
3474 nrpages, dev_name(dev));
3481 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3483 struct dmar_domain *domain, *tmp;
3484 struct dmar_rmrr_unit *rmrr;
3485 struct device *i_dev;
3488 domain = find_domain(dev);
3492 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3496 /* We have a new domain - setup possible RMRRs for the device */
3498 for_each_rmrr_units(rmrr) {
3499 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3504 ret = domain_prepare_identity_map(dev, domain,
3508 dev_err(dev, "Mapping reserved region failed\n");
3513 tmp = set_domain_for_dev(dev, domain);
3514 if (!tmp || domain != tmp) {
3515 domain_exit(domain);
3522 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3528 /* Check if the dev needs to go through non-identity map and unmap process.*/
3529 static int iommu_no_mapping(struct device *dev)
3533 if (iommu_dummy(dev))
3536 if (!iommu_identity_mapping)
3539 found = identity_mapping(dev);
3541 if (iommu_should_identity_map(dev, 0))
3545 * 32 bit DMA is removed from si_domain and fall back
3546 * to non-identity mapping.
3548 dmar_remove_one_dev_info(si_domain, dev);
3549 pr_info("32bit %s uses non-identity mapping\n",
3555 * In case of a detached 64 bit DMA device from vm, the device
3556 * is put into si_domain for identity mapping.
3558 if (iommu_should_identity_map(dev, 0)) {
3560 ret = domain_add_dev_info(si_domain, dev);
3562 pr_info("64bit %s uses identity mapping\n",
3572 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3573 size_t size, int dir, u64 dma_mask)
3575 struct dmar_domain *domain;
3576 phys_addr_t start_paddr;
3577 unsigned long iova_pfn;
3580 struct intel_iommu *iommu;
3581 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3583 BUG_ON(dir == DMA_NONE);
3585 if (iommu_no_mapping(dev))
3588 domain = get_valid_domain_for_dev(dev);
3592 iommu = domain_get_iommu(domain);
3593 size = aligned_nrpages(paddr, size);
3595 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3600 * Check if DMAR supports zero-length reads on write only
3603 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3604 !cap_zlr(iommu->cap))
3605 prot |= DMA_PTE_READ;
3606 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3607 prot |= DMA_PTE_WRITE;
3609 * paddr - (paddr + size) might be partial page, we should map the whole
3610 * page. Note: if two part of one page are separately mapped, we
3611 * might have two guest_addr mapping to the same host paddr, but this
3612 * is not a big problem
3614 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3615 mm_to_dma_pfn(paddr_pfn), size, prot);
3619 /* it's a non-present to present mapping. Only flush if caching mode */
3620 if (cap_caching_mode(iommu->cap))
3621 iommu_flush_iotlb_psi(iommu, domain,
3622 mm_to_dma_pfn(iova_pfn),
3625 iommu_flush_write_buffer(iommu);
3627 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3628 start_paddr += paddr & ~PAGE_MASK;
3633 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3634 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3635 dev_name(dev), size, (unsigned long long)paddr, dir);
3639 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3640 unsigned long offset, size_t size,
3641 enum dma_data_direction dir,
3642 unsigned long attrs)
3644 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3645 dir, *dev->dma_mask);
3648 static void flush_unmaps(struct deferred_flush_data *flush_data)
3652 flush_data->timer_on = 0;
3654 /* just flush them all */
3655 for (i = 0; i < g_num_of_iommus; i++) {
3656 struct intel_iommu *iommu = g_iommus[i];
3657 struct deferred_flush_table *flush_table =
3658 &flush_data->tables[i];
3662 if (!flush_table->next)
3665 /* In caching mode, global flushes turn emulation expensive */
3666 if (!cap_caching_mode(iommu->cap))
3667 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3668 DMA_TLB_GLOBAL_FLUSH);
3669 for (j = 0; j < flush_table->next; j++) {
3671 struct deferred_flush_entry *entry =
3672 &flush_table->entries[j];
3673 unsigned long iova_pfn = entry->iova_pfn;
3674 unsigned long nrpages = entry->nrpages;
3675 struct dmar_domain *domain = entry->domain;
3676 struct page *freelist = entry->freelist;
3678 /* On real hardware multiple invalidations are expensive */
3679 if (cap_caching_mode(iommu->cap))
3680 iommu_flush_iotlb_psi(iommu, domain,
3681 mm_to_dma_pfn(iova_pfn),
3682 nrpages, !freelist, 0);
3684 mask = ilog2(nrpages);
3685 iommu_flush_dev_iotlb(domain,
3686 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3688 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3690 dma_free_pagelist(freelist);
3692 flush_table->next = 0;
3695 flush_data->size = 0;
3698 static void flush_unmaps_timeout(unsigned long cpuid)
3700 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3701 unsigned long flags;
3703 spin_lock_irqsave(&flush_data->lock, flags);
3704 flush_unmaps(flush_data);
3705 spin_unlock_irqrestore(&flush_data->lock, flags);
3708 static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3709 unsigned long nrpages, struct page *freelist)
3711 unsigned long flags;
3712 int entry_id, iommu_id;
3713 struct intel_iommu *iommu;
3714 struct deferred_flush_entry *entry;
3715 struct deferred_flush_data *flush_data;
3717 flush_data = raw_cpu_ptr(&deferred_flush);
3719 /* Flush all CPUs' entries to avoid deferring too much. If
3720 * this becomes a bottleneck, can just flush us, and rely on
3721 * flush timer for the rest.
3723 if (flush_data->size == HIGH_WATER_MARK) {
3726 for_each_online_cpu(cpu)
3727 flush_unmaps_timeout(cpu);
3730 spin_lock_irqsave(&flush_data->lock, flags);
3732 iommu = domain_get_iommu(dom);
3733 iommu_id = iommu->seq_id;
3735 entry_id = flush_data->tables[iommu_id].next;
3736 ++(flush_data->tables[iommu_id].next);
3738 entry = &flush_data->tables[iommu_id].entries[entry_id];
3739 entry->domain = dom;
3740 entry->iova_pfn = iova_pfn;
3741 entry->nrpages = nrpages;
3742 entry->freelist = freelist;
3744 if (!flush_data->timer_on) {
3745 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3746 flush_data->timer_on = 1;
3749 spin_unlock_irqrestore(&flush_data->lock, flags);
3752 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3754 struct dmar_domain *domain;
3755 unsigned long start_pfn, last_pfn;
3756 unsigned long nrpages;
3757 unsigned long iova_pfn;
3758 struct intel_iommu *iommu;
3759 struct page *freelist;
3761 if (iommu_no_mapping(dev))
3764 domain = find_domain(dev);
3767 iommu = domain_get_iommu(domain);
3769 iova_pfn = IOVA_PFN(dev_addr);
3771 nrpages = aligned_nrpages(dev_addr, size);
3772 start_pfn = mm_to_dma_pfn(iova_pfn);
3773 last_pfn = start_pfn + nrpages - 1;
3775 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3776 dev_name(dev), start_pfn, last_pfn);
3778 freelist = domain_unmap(domain, start_pfn, last_pfn);
3780 if (intel_iommu_strict) {
3781 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3782 nrpages, !freelist, 0);
3784 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3785 dma_free_pagelist(freelist);
3787 add_unmap(domain, iova_pfn, nrpages, freelist);
3789 * queue up the release of the unmap to save the 1/6th of the
3790 * cpu used up by the iotlb flush operation...
3795 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3796 size_t size, enum dma_data_direction dir,
3797 unsigned long attrs)
3799 intel_unmap(dev, dev_addr, size);
3802 static void *intel_alloc_coherent(struct device *dev, size_t size,
3803 dma_addr_t *dma_handle, gfp_t flags,
3804 unsigned long attrs)
3806 struct page *page = NULL;
3809 size = PAGE_ALIGN(size);
3810 order = get_order(size);
3812 if (!iommu_no_mapping(dev))
3813 flags &= ~(GFP_DMA | GFP_DMA32);
3814 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3815 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3821 if (gfpflags_allow_blocking(flags)) {
3822 unsigned int count = size >> PAGE_SHIFT;
3824 page = dma_alloc_from_contiguous(dev, count, order, flags);
3825 if (page && iommu_no_mapping(dev) &&
3826 page_to_phys(page) + size > dev->coherent_dma_mask) {
3827 dma_release_from_contiguous(dev, page, count);
3833 page = alloc_pages(flags, order);
3836 memset(page_address(page), 0, size);
3838 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3840 dev->coherent_dma_mask);
3842 return page_address(page);
3843 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3844 __free_pages(page, order);
3849 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3850 dma_addr_t dma_handle, unsigned long attrs)
3853 struct page *page = virt_to_page(vaddr);
3855 size = PAGE_ALIGN(size);
3856 order = get_order(size);
3858 intel_unmap(dev, dma_handle, size);
3859 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3860 __free_pages(page, order);
3863 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3864 int nelems, enum dma_data_direction dir,
3865 unsigned long attrs)
3867 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3868 unsigned long nrpages = 0;
3869 struct scatterlist *sg;
3872 for_each_sg(sglist, sg, nelems, i) {
3873 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3876 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3879 static int intel_nontranslate_map_sg(struct device *hddev,
3880 struct scatterlist *sglist, int nelems, int dir)
3883 struct scatterlist *sg;
3885 for_each_sg(sglist, sg, nelems, i) {
3886 BUG_ON(!sg_page(sg));
3887 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3888 sg->dma_length = sg->length;
3893 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3894 enum dma_data_direction dir, unsigned long attrs)
3897 struct dmar_domain *domain;
3900 unsigned long iova_pfn;
3902 struct scatterlist *sg;
3903 unsigned long start_vpfn;
3904 struct intel_iommu *iommu;
3906 BUG_ON(dir == DMA_NONE);
3907 if (iommu_no_mapping(dev))
3908 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3910 domain = get_valid_domain_for_dev(dev);
3914 iommu = domain_get_iommu(domain);
3916 for_each_sg(sglist, sg, nelems, i)
3917 size += aligned_nrpages(sg->offset, sg->length);
3919 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3922 sglist->dma_length = 0;
3927 * Check if DMAR supports zero-length reads on write only
3930 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3931 !cap_zlr(iommu->cap))
3932 prot |= DMA_PTE_READ;
3933 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3934 prot |= DMA_PTE_WRITE;
3936 start_vpfn = mm_to_dma_pfn(iova_pfn);
3938 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3939 if (unlikely(ret)) {
3940 dma_pte_free_pagetable(domain, start_vpfn,
3941 start_vpfn + size - 1);
3942 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3946 /* it's a non-present to present mapping. Only flush if caching mode */
3947 if (cap_caching_mode(iommu->cap))
3948 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3950 iommu_flush_write_buffer(iommu);
3955 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3960 const struct dma_map_ops intel_dma_ops = {
3961 .alloc = intel_alloc_coherent,
3962 .free = intel_free_coherent,
3963 .map_sg = intel_map_sg,
3964 .unmap_sg = intel_unmap_sg,
3965 .map_page = intel_map_page,
3966 .unmap_page = intel_unmap_page,
3967 .mapping_error = intel_mapping_error,
3969 .dma_supported = x86_dma_supported,
3973 static inline int iommu_domain_cache_init(void)
3977 iommu_domain_cache = kmem_cache_create("iommu_domain",
3978 sizeof(struct dmar_domain),
3983 if (!iommu_domain_cache) {
3984 pr_err("Couldn't create iommu_domain cache\n");
3991 static inline int iommu_devinfo_cache_init(void)
3995 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3996 sizeof(struct device_domain_info),
4000 if (!iommu_devinfo_cache) {
4001 pr_err("Couldn't create devinfo cache\n");
4008 static int __init iommu_init_mempool(void)
4011 ret = iova_cache_get();
4015 ret = iommu_domain_cache_init();
4019 ret = iommu_devinfo_cache_init();
4023 kmem_cache_destroy(iommu_domain_cache);
4030 static void __init iommu_exit_mempool(void)
4032 kmem_cache_destroy(iommu_devinfo_cache);
4033 kmem_cache_destroy(iommu_domain_cache);
4037 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4039 struct dmar_drhd_unit *drhd;
4043 /* We know that this device on this chipset has its own IOMMU.
4044 * If we find it under a different IOMMU, then the BIOS is lying
4045 * to us. Hope that the IOMMU for this device is actually
4046 * disabled, and it needs no translation...
4048 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4050 /* "can't" happen */
4051 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4054 vtbar &= 0xffff0000;
4056 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4057 drhd = dmar_find_matched_drhd_unit(pdev);
4058 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4059 TAINT_FIRMWARE_WORKAROUND,
4060 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4061 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4063 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4065 static void __init init_no_remapping_devices(void)
4067 struct dmar_drhd_unit *drhd;
4071 for_each_drhd_unit(drhd) {
4072 if (!drhd->include_all) {
4073 for_each_active_dev_scope(drhd->devices,
4074 drhd->devices_cnt, i, dev)
4076 /* ignore DMAR unit if no devices exist */
4077 if (i == drhd->devices_cnt)
4082 for_each_active_drhd_unit(drhd) {
4083 if (drhd->include_all)
4086 for_each_active_dev_scope(drhd->devices,
4087 drhd->devices_cnt, i, dev)
4088 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4090 if (i < drhd->devices_cnt)
4093 /* This IOMMU has *only* gfx devices. Either bypass it or
4094 set the gfx_mapped flag, as appropriate */
4096 intel_iommu_gfx_mapped = 1;
4099 for_each_active_dev_scope(drhd->devices,
4100 drhd->devices_cnt, i, dev)
4101 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4106 #ifdef CONFIG_SUSPEND
4107 static int init_iommu_hw(void)
4109 struct dmar_drhd_unit *drhd;
4110 struct intel_iommu *iommu = NULL;
4112 for_each_active_iommu(iommu, drhd)
4114 dmar_reenable_qi(iommu);
4116 for_each_iommu(iommu, drhd) {
4117 if (drhd->ignored) {
4119 * we always have to disable PMRs or DMA may fail on
4123 iommu_disable_protect_mem_regions(iommu);
4127 iommu_flush_write_buffer(iommu);
4129 iommu_set_root_entry(iommu);
4131 iommu->flush.flush_context(iommu, 0, 0, 0,
4132 DMA_CCMD_GLOBAL_INVL);
4133 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4134 iommu_enable_translation(iommu);
4135 iommu_disable_protect_mem_regions(iommu);
4141 static void iommu_flush_all(void)
4143 struct dmar_drhd_unit *drhd;
4144 struct intel_iommu *iommu;
4146 for_each_active_iommu(iommu, drhd) {
4147 iommu->flush.flush_context(iommu, 0, 0, 0,
4148 DMA_CCMD_GLOBAL_INVL);
4149 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4150 DMA_TLB_GLOBAL_FLUSH);
4154 static int iommu_suspend(void)
4156 struct dmar_drhd_unit *drhd;
4157 struct intel_iommu *iommu = NULL;
4160 for_each_active_iommu(iommu, drhd) {
4161 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4163 if (!iommu->iommu_state)
4169 for_each_active_iommu(iommu, drhd) {
4170 iommu_disable_translation(iommu);
4172 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4174 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4175 readl(iommu->reg + DMAR_FECTL_REG);
4176 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4177 readl(iommu->reg + DMAR_FEDATA_REG);
4178 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4179 readl(iommu->reg + DMAR_FEADDR_REG);
4180 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4181 readl(iommu->reg + DMAR_FEUADDR_REG);
4183 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4188 for_each_active_iommu(iommu, drhd)
4189 kfree(iommu->iommu_state);
4194 static void iommu_resume(void)
4196 struct dmar_drhd_unit *drhd;
4197 struct intel_iommu *iommu = NULL;
4200 if (init_iommu_hw()) {
4202 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4204 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4208 for_each_active_iommu(iommu, drhd) {
4210 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4212 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4213 iommu->reg + DMAR_FECTL_REG);
4214 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4215 iommu->reg + DMAR_FEDATA_REG);
4216 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4217 iommu->reg + DMAR_FEADDR_REG);
4218 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4219 iommu->reg + DMAR_FEUADDR_REG);
4221 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4224 for_each_active_iommu(iommu, drhd)
4225 kfree(iommu->iommu_state);
4228 static struct syscore_ops iommu_syscore_ops = {
4229 .resume = iommu_resume,
4230 .suspend = iommu_suspend,
4233 static void __init init_iommu_pm_ops(void)
4235 register_syscore_ops(&iommu_syscore_ops);
4239 static inline void init_iommu_pm_ops(void) {}
4240 #endif /* CONFIG_PM */
4243 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4245 struct acpi_dmar_reserved_memory *rmrr;
4246 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4247 struct dmar_rmrr_unit *rmrru;
4250 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4254 rmrru->hdr = header;
4255 rmrr = (struct acpi_dmar_reserved_memory *)header;
4256 rmrru->base_address = rmrr->base_address;
4257 rmrru->end_address = rmrr->end_address;
4259 length = rmrr->end_address - rmrr->base_address + 1;
4260 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4265 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4266 ((void *)rmrr) + rmrr->header.length,
4267 &rmrru->devices_cnt);
4268 if (rmrru->devices_cnt && rmrru->devices == NULL)
4271 list_add(&rmrru->list, &dmar_rmrr_units);
4282 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4284 struct dmar_atsr_unit *atsru;
4285 struct acpi_dmar_atsr *tmp;
4287 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4288 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4289 if (atsr->segment != tmp->segment)
4291 if (atsr->header.length != tmp->header.length)
4293 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4300 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4302 struct acpi_dmar_atsr *atsr;
4303 struct dmar_atsr_unit *atsru;
4305 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4308 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4309 atsru = dmar_find_atsr(atsr);
4313 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4318 * If memory is allocated from slab by ACPI _DSM method, we need to
4319 * copy the memory content because the memory buffer will be freed
4322 atsru->hdr = (void *)(atsru + 1);
4323 memcpy(atsru->hdr, hdr, hdr->length);
4324 atsru->include_all = atsr->flags & 0x1;
4325 if (!atsru->include_all) {
4326 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4327 (void *)atsr + atsr->header.length,
4328 &atsru->devices_cnt);
4329 if (atsru->devices_cnt && atsru->devices == NULL) {
4335 list_add_rcu(&atsru->list, &dmar_atsr_units);
4340 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4342 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4346 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4348 struct acpi_dmar_atsr *atsr;
4349 struct dmar_atsr_unit *atsru;
4351 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4352 atsru = dmar_find_atsr(atsr);
4354 list_del_rcu(&atsru->list);
4356 intel_iommu_free_atsr(atsru);
4362 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4366 struct acpi_dmar_atsr *atsr;
4367 struct dmar_atsr_unit *atsru;
4369 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4370 atsru = dmar_find_atsr(atsr);
4374 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4375 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4383 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4386 struct intel_iommu *iommu = dmaru->iommu;
4388 if (g_iommus[iommu->seq_id])
4391 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4392 pr_warn("%s: Doesn't support hardware pass through.\n",
4396 if (!ecap_sc_support(iommu->ecap) &&
4397 domain_update_iommu_snooping(iommu)) {
4398 pr_warn("%s: Doesn't support snooping.\n",
4402 sp = domain_update_iommu_superpage(iommu) - 1;
4403 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4404 pr_warn("%s: Doesn't support large page.\n",
4410 * Disable translation if already enabled prior to OS handover.
4412 if (iommu->gcmd & DMA_GCMD_TE)
4413 iommu_disable_translation(iommu);
4415 g_iommus[iommu->seq_id] = iommu;
4416 ret = iommu_init_domains(iommu);
4418 ret = iommu_alloc_root_entry(iommu);
4422 #ifdef CONFIG_INTEL_IOMMU_SVM
4423 if (pasid_enabled(iommu))
4424 intel_svm_alloc_pasid_tables(iommu);
4427 if (dmaru->ignored) {
4429 * we always have to disable PMRs or DMA may fail on this device
4432 iommu_disable_protect_mem_regions(iommu);
4436 intel_iommu_init_qi(iommu);
4437 iommu_flush_write_buffer(iommu);
4439 #ifdef CONFIG_INTEL_IOMMU_SVM
4440 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4441 ret = intel_svm_enable_prq(iommu);
4446 ret = dmar_set_interrupt(iommu);
4450 iommu_set_root_entry(iommu);
4451 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4452 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4453 iommu_enable_translation(iommu);
4455 iommu_disable_protect_mem_regions(iommu);
4459 disable_dmar_iommu(iommu);
4461 free_dmar_iommu(iommu);
4465 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4468 struct intel_iommu *iommu = dmaru->iommu;
4470 if (!intel_iommu_enabled)
4476 ret = intel_iommu_add(dmaru);
4478 disable_dmar_iommu(iommu);
4479 free_dmar_iommu(iommu);
4485 static void intel_iommu_free_dmars(void)
4487 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4488 struct dmar_atsr_unit *atsru, *atsr_n;
4490 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4491 list_del(&rmrru->list);
4492 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4497 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4498 list_del(&atsru->list);
4499 intel_iommu_free_atsr(atsru);
4503 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4506 struct pci_bus *bus;
4507 struct pci_dev *bridge = NULL;
4509 struct acpi_dmar_atsr *atsr;
4510 struct dmar_atsr_unit *atsru;
4512 dev = pci_physfn(dev);
4513 for (bus = dev->bus; bus; bus = bus->parent) {
4515 /* If it's an integrated device, allow ATS */
4518 /* Connected via non-PCIe: no ATS */
4519 if (!pci_is_pcie(bridge) ||
4520 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4522 /* If we found the root port, look it up in the ATSR */
4523 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4528 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4529 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4530 if (atsr->segment != pci_domain_nr(dev->bus))
4533 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4534 if (tmp == &bridge->dev)
4537 if (atsru->include_all)
4547 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4550 struct dmar_rmrr_unit *rmrru;
4551 struct dmar_atsr_unit *atsru;
4552 struct acpi_dmar_atsr *atsr;
4553 struct acpi_dmar_reserved_memory *rmrr;
4555 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4558 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4559 rmrr = container_of(rmrru->hdr,
4560 struct acpi_dmar_reserved_memory, header);
4561 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4562 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4563 ((void *)rmrr) + rmrr->header.length,
4564 rmrr->segment, rmrru->devices,
4565 rmrru->devices_cnt);
4568 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4569 dmar_remove_dev_scope(info, rmrr->segment,
4570 rmrru->devices, rmrru->devices_cnt);
4574 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4575 if (atsru->include_all)
4578 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4579 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4580 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4581 (void *)atsr + atsr->header.length,
4582 atsr->segment, atsru->devices,
4583 atsru->devices_cnt);
4588 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4589 if (dmar_remove_dev_scope(info, atsr->segment,
4590 atsru->devices, atsru->devices_cnt))
4599 * Here we only respond to action of unbound device from driver.
4601 * Added device is not attached to its DMAR domain here yet. That will happen
4602 * when mapping the device to iova.
4604 static int device_notifier(struct notifier_block *nb,
4605 unsigned long action, void *data)
4607 struct device *dev = data;
4608 struct dmar_domain *domain;
4610 if (iommu_dummy(dev))
4613 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4616 domain = find_domain(dev);
4620 dmar_remove_one_dev_info(domain, dev);
4621 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4622 domain_exit(domain);
4627 static struct notifier_block device_nb = {
4628 .notifier_call = device_notifier,
4631 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4632 unsigned long val, void *v)
4634 struct memory_notify *mhp = v;
4635 unsigned long long start, end;
4636 unsigned long start_vpfn, last_vpfn;
4639 case MEM_GOING_ONLINE:
4640 start = mhp->start_pfn << PAGE_SHIFT;
4641 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4642 if (iommu_domain_identity_map(si_domain, start, end)) {
4643 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4650 case MEM_CANCEL_ONLINE:
4651 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4652 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4653 while (start_vpfn <= last_vpfn) {
4655 struct dmar_drhd_unit *drhd;
4656 struct intel_iommu *iommu;
4657 struct page *freelist;
4659 iova = find_iova(&si_domain->iovad, start_vpfn);
4661 pr_debug("Failed get IOVA for PFN %lx\n",
4666 iova = split_and_remove_iova(&si_domain->iovad, iova,
4667 start_vpfn, last_vpfn);
4669 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4670 start_vpfn, last_vpfn);
4674 freelist = domain_unmap(si_domain, iova->pfn_lo,
4678 for_each_active_iommu(iommu, drhd)
4679 iommu_flush_iotlb_psi(iommu, si_domain,
4680 iova->pfn_lo, iova_size(iova),
4683 dma_free_pagelist(freelist);
4685 start_vpfn = iova->pfn_hi + 1;
4686 free_iova_mem(iova);
4694 static struct notifier_block intel_iommu_memory_nb = {
4695 .notifier_call = intel_iommu_memory_notifier,
4699 static void free_all_cpu_cached_iovas(unsigned int cpu)
4703 for (i = 0; i < g_num_of_iommus; i++) {
4704 struct intel_iommu *iommu = g_iommus[i];
4705 struct dmar_domain *domain;
4711 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4712 domain = get_iommu_domain(iommu, (u16)did);
4716 free_cpu_cached_iovas(cpu, &domain->iovad);
4721 static int intel_iommu_cpu_dead(unsigned int cpu)
4723 free_all_cpu_cached_iovas(cpu);
4724 flush_unmaps_timeout(cpu);
4728 static void intel_disable_iommus(void)
4730 struct intel_iommu *iommu = NULL;
4731 struct dmar_drhd_unit *drhd;
4733 for_each_iommu(iommu, drhd)
4734 iommu_disable_translation(iommu);
4737 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4739 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4741 return container_of(iommu_dev, struct intel_iommu, iommu);
4744 static ssize_t intel_iommu_show_version(struct device *dev,
4745 struct device_attribute *attr,
4748 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4749 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4750 return sprintf(buf, "%d:%d\n",
4751 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4753 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4755 static ssize_t intel_iommu_show_address(struct device *dev,
4756 struct device_attribute *attr,
4759 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4760 return sprintf(buf, "%llx\n", iommu->reg_phys);
4762 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4764 static ssize_t intel_iommu_show_cap(struct device *dev,
4765 struct device_attribute *attr,
4768 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4769 return sprintf(buf, "%llx\n", iommu->cap);
4771 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4773 static ssize_t intel_iommu_show_ecap(struct device *dev,
4774 struct device_attribute *attr,
4777 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4778 return sprintf(buf, "%llx\n", iommu->ecap);
4780 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4782 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4783 struct device_attribute *attr,
4786 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4787 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4789 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4791 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4792 struct device_attribute *attr,
4795 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4796 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4797 cap_ndoms(iommu->cap)));
4799 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4801 static struct attribute *intel_iommu_attrs[] = {
4802 &dev_attr_version.attr,
4803 &dev_attr_address.attr,
4805 &dev_attr_ecap.attr,
4806 &dev_attr_domains_supported.attr,
4807 &dev_attr_domains_used.attr,
4811 static struct attribute_group intel_iommu_group = {
4812 .name = "intel-iommu",
4813 .attrs = intel_iommu_attrs,
4816 const struct attribute_group *intel_iommu_groups[] = {
4821 int __init intel_iommu_init(void)
4824 struct dmar_drhd_unit *drhd;
4825 struct intel_iommu *iommu;
4827 /* VT-d is required for a TXT/tboot launch, so enforce that */
4828 force_on = tboot_force_iommu();
4830 if (iommu_init_mempool()) {
4832 panic("tboot: Failed to initialize iommu memory\n");
4836 down_write(&dmar_global_lock);
4837 if (dmar_table_init()) {
4839 panic("tboot: Failed to initialize DMAR table\n");
4843 if (dmar_dev_scope_init() < 0) {
4845 panic("tboot: Failed to initialize DMAR device scope\n");
4849 if (no_iommu || dmar_disabled) {
4851 * We exit the function here to ensure IOMMU's remapping and
4852 * mempool aren't setup, which means that the IOMMU's PMRs
4853 * won't be disabled via the call to init_dmars(). So disable
4854 * it explicitly here. The PMRs were setup by tboot prior to
4855 * calling SENTER, but the kernel is expected to reset/tear
4858 if (intel_iommu_tboot_noforce) {
4859 for_each_iommu(iommu, drhd)
4860 iommu_disable_protect_mem_regions(iommu);
4864 * Make sure the IOMMUs are switched off, even when we
4865 * boot into a kexec kernel and the previous kernel left
4868 intel_disable_iommus();
4872 if (list_empty(&dmar_rmrr_units))
4873 pr_info("No RMRR found\n");
4875 if (list_empty(&dmar_atsr_units))
4876 pr_info("No ATSR found\n");
4878 if (dmar_init_reserved_ranges()) {
4880 panic("tboot: Failed to reserve iommu ranges\n");
4881 goto out_free_reserved_range;
4884 init_no_remapping_devices();
4889 panic("tboot: Failed to initialize DMARs\n");
4890 pr_err("Initialization failed\n");
4891 goto out_free_reserved_range;
4893 up_write(&dmar_global_lock);
4894 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4896 #ifdef CONFIG_SWIOTLB
4899 dma_ops = &intel_dma_ops;
4901 init_iommu_pm_ops();
4903 for_each_active_iommu(iommu, drhd) {
4904 iommu_device_sysfs_add(&iommu->iommu, NULL,
4907 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4908 iommu_device_register(&iommu->iommu);
4911 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4912 bus_register_notifier(&pci_bus_type, &device_nb);
4913 if (si_domain && !hw_pass_through)
4914 register_memory_notifier(&intel_iommu_memory_nb);
4915 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4916 intel_iommu_cpu_dead);
4917 intel_iommu_enabled = 1;
4921 out_free_reserved_range:
4922 put_iova_domain(&reserved_iova_list);
4924 intel_iommu_free_dmars();
4925 up_write(&dmar_global_lock);
4926 iommu_exit_mempool();
4930 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4932 struct intel_iommu *iommu = opaque;
4934 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4939 * NB - intel-iommu lacks any sort of reference counting for the users of
4940 * dependent devices. If multiple endpoints have intersecting dependent
4941 * devices, unbinding the driver from any one of them will possibly leave
4942 * the others unable to operate.
4944 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4946 if (!iommu || !dev || !dev_is_pci(dev))
4949 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4952 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4954 struct intel_iommu *iommu;
4955 unsigned long flags;
4957 assert_spin_locked(&device_domain_lock);
4962 iommu = info->iommu;
4965 iommu_disable_dev_iotlb(info);
4966 domain_context_clear(iommu, info->dev);
4969 unlink_domain_info(info);
4971 spin_lock_irqsave(&iommu->lock, flags);
4972 domain_detach_iommu(info->domain, iommu);
4973 spin_unlock_irqrestore(&iommu->lock, flags);
4975 free_devinfo_mem(info);
4978 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4981 struct device_domain_info *info;
4982 unsigned long flags;
4984 spin_lock_irqsave(&device_domain_lock, flags);
4985 info = dev->archdata.iommu;
4986 __dmar_remove_one_dev_info(info);
4987 spin_unlock_irqrestore(&device_domain_lock, flags);
4990 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4994 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4996 domain_reserve_special_ranges(domain);
4998 /* calculate AGAW */
4999 domain->gaw = guest_width;
5000 adjust_width = guestwidth_to_adjustwidth(guest_width);
5001 domain->agaw = width_to_agaw(adjust_width);
5003 domain->iommu_coherency = 0;
5004 domain->iommu_snooping = 0;
5005 domain->iommu_superpage = 0;
5006 domain->max_addr = 0;
5008 /* always allocate the top pgd */
5009 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5012 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5016 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5018 struct dmar_domain *dmar_domain;
5019 struct iommu_domain *domain;
5021 if (type != IOMMU_DOMAIN_UNMANAGED)
5024 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5026 pr_err("Can't allocate dmar_domain\n");
5029 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5030 pr_err("Domain initialization failed\n");
5031 domain_exit(dmar_domain);
5034 domain_update_iommu_cap(dmar_domain);
5036 domain = &dmar_domain->domain;
5037 domain->geometry.aperture_start = 0;
5038 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5039 domain->geometry.force_aperture = true;
5044 static void intel_iommu_domain_free(struct iommu_domain *domain)
5046 domain_exit(to_dmar_domain(domain));
5049 static int intel_iommu_attach_device(struct iommu_domain *domain,
5052 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5053 struct intel_iommu *iommu;
5057 if (device_is_rmrr_locked(dev)) {
5058 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5062 /* normally dev is not mapped */
5063 if (unlikely(domain_context_mapped(dev))) {
5064 struct dmar_domain *old_domain;
5066 old_domain = find_domain(dev);
5069 dmar_remove_one_dev_info(old_domain, dev);
5072 if (!domain_type_is_vm_or_si(old_domain) &&
5073 list_empty(&old_domain->devices))
5074 domain_exit(old_domain);
5078 iommu = device_to_iommu(dev, &bus, &devfn);
5082 /* check if this iommu agaw is sufficient for max mapped address */
5083 addr_width = agaw_to_width(iommu->agaw);
5084 if (addr_width > cap_mgaw(iommu->cap))
5085 addr_width = cap_mgaw(iommu->cap);
5087 if (dmar_domain->max_addr > (1LL << addr_width)) {
5088 pr_err("%s: iommu width (%d) is not "
5089 "sufficient for the mapped address (%llx)\n",
5090 __func__, addr_width, dmar_domain->max_addr);
5093 dmar_domain->gaw = addr_width;
5096 * Knock out extra levels of page tables if necessary
5098 while (iommu->agaw < dmar_domain->agaw) {
5099 struct dma_pte *pte;
5101 pte = dmar_domain->pgd;
5102 if (dma_pte_present(pte)) {
5103 dmar_domain->pgd = (struct dma_pte *)
5104 phys_to_virt(dma_pte_addr(pte));
5105 free_pgtable_page(pte);
5107 dmar_domain->agaw--;
5110 return domain_add_dev_info(dmar_domain, dev);
5113 static void intel_iommu_detach_device(struct iommu_domain *domain,
5116 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5119 static int intel_iommu_map(struct iommu_domain *domain,
5120 unsigned long iova, phys_addr_t hpa,
5121 size_t size, int iommu_prot)
5123 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5128 if (iommu_prot & IOMMU_READ)
5129 prot |= DMA_PTE_READ;
5130 if (iommu_prot & IOMMU_WRITE)
5131 prot |= DMA_PTE_WRITE;
5132 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5133 prot |= DMA_PTE_SNP;
5135 max_addr = iova + size;
5136 if (dmar_domain->max_addr < max_addr) {
5139 /* check if minimum agaw is sufficient for mapped address */
5140 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5141 if (end < max_addr) {
5142 pr_err("%s: iommu width (%d) is not "
5143 "sufficient for the mapped address (%llx)\n",
5144 __func__, dmar_domain->gaw, max_addr);
5147 dmar_domain->max_addr = max_addr;
5149 /* Round up size to next multiple of PAGE_SIZE, if it and
5150 the low bits of hpa would take us onto the next page */
5151 size = aligned_nrpages(hpa, size);
5152 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5153 hpa >> VTD_PAGE_SHIFT, size, prot);
5157 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5158 unsigned long iova, size_t size)
5160 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5161 struct page *freelist = NULL;
5162 struct intel_iommu *iommu;
5163 unsigned long start_pfn, last_pfn;
5164 unsigned int npages;
5165 int iommu_id, level = 0;
5167 /* Cope with horrid API which requires us to unmap more than the
5168 size argument if it happens to be a large-page mapping. */
5169 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5171 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5172 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5174 start_pfn = iova >> VTD_PAGE_SHIFT;
5175 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5177 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5179 npages = last_pfn - start_pfn + 1;
5181 for_each_domain_iommu(iommu_id, dmar_domain) {
5182 iommu = g_iommus[iommu_id];
5184 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5185 start_pfn, npages, !freelist, 0);
5188 dma_free_pagelist(freelist);
5190 if (dmar_domain->max_addr == iova + size)
5191 dmar_domain->max_addr = iova;
5196 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5199 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5200 struct dma_pte *pte;
5204 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5206 phys = dma_pte_addr(pte);
5211 static bool intel_iommu_capable(enum iommu_cap cap)
5213 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5214 return domain_update_iommu_snooping(NULL) == 1;
5215 if (cap == IOMMU_CAP_INTR_REMAP)
5216 return irq_remapping_enabled == 1;
5221 static int intel_iommu_add_device(struct device *dev)
5223 struct intel_iommu *iommu;
5224 struct iommu_group *group;
5227 iommu = device_to_iommu(dev, &bus, &devfn);
5231 iommu_device_link(&iommu->iommu, dev);
5233 group = iommu_group_get_for_dev(dev);
5236 return PTR_ERR(group);
5238 iommu_group_put(group);
5242 static void intel_iommu_remove_device(struct device *dev)
5244 struct intel_iommu *iommu;
5247 iommu = device_to_iommu(dev, &bus, &devfn);
5251 iommu_group_remove_device(dev);
5253 iommu_device_unlink(&iommu->iommu, dev);
5256 static void intel_iommu_get_resv_regions(struct device *device,
5257 struct list_head *head)
5259 struct iommu_resv_region *reg;
5260 struct dmar_rmrr_unit *rmrr;
5261 struct device *i_dev;
5265 for_each_rmrr_units(rmrr) {
5266 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5268 if (i_dev != device)
5271 list_add_tail(&rmrr->resv->list, head);
5276 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5277 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5281 list_add_tail(®->list, head);
5284 static void intel_iommu_put_resv_regions(struct device *dev,
5285 struct list_head *head)
5287 struct iommu_resv_region *entry, *next;
5289 list_for_each_entry_safe(entry, next, head, list) {
5290 if (entry->type == IOMMU_RESV_RESERVED)
5295 #ifdef CONFIG_INTEL_IOMMU_SVM
5296 #define MAX_NR_PASID_BITS (20)
5297 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5300 * Convert ecap_pss to extend context entry pts encoding, also
5301 * respect the soft pasid_max value set by the iommu.
5302 * - number of PASID bits = ecap_pss + 1
5303 * - number of PASID table entries = 2^(pts + 5)
5304 * Therefore, pts = ecap_pss - 4
5305 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5307 if (ecap_pss(iommu->ecap) < 5)
5310 /* pasid_max is encoded as actual number of entries not the bits */
5311 return find_first_bit((unsigned long *)&iommu->pasid_max,
5312 MAX_NR_PASID_BITS) - 5;
5315 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5317 struct device_domain_info *info;
5318 struct context_entry *context;
5319 struct dmar_domain *domain;
5320 unsigned long flags;
5324 domain = get_valid_domain_for_dev(sdev->dev);
5328 spin_lock_irqsave(&device_domain_lock, flags);
5329 spin_lock(&iommu->lock);
5332 info = sdev->dev->archdata.iommu;
5333 if (!info || !info->pasid_supported)
5336 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5337 if (WARN_ON(!context))
5340 ctx_lo = context[0].lo;
5342 sdev->did = domain->iommu_did[iommu->seq_id];
5343 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5345 if (!(ctx_lo & CONTEXT_PASIDE)) {
5346 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5347 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5348 intel_iommu_get_pts(iommu);
5351 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5352 * extended to permit requests-with-PASID if the PASIDE bit
5353 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5354 * however, the PASIDE bit is ignored and requests-with-PASID
5355 * are unconditionally blocked. Which makes less sense.
5356 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5357 * "guest mode" translation types depending on whether ATS
5358 * is available or not. Annoyingly, we can't use the new
5359 * modes *unless* PASIDE is set. */
5360 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5361 ctx_lo &= ~CONTEXT_TT_MASK;
5362 if (info->ats_supported)
5363 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5365 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5367 ctx_lo |= CONTEXT_PASIDE;
5368 if (iommu->pasid_state_table)
5369 ctx_lo |= CONTEXT_DINVE;
5370 if (info->pri_supported)
5371 ctx_lo |= CONTEXT_PRS;
5372 context[0].lo = ctx_lo;
5374 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5375 DMA_CCMD_MASK_NOBIT,
5376 DMA_CCMD_DEVICE_INVL);
5379 /* Enable PASID support in the device, if it wasn't already */
5380 if (!info->pasid_enabled)
5381 iommu_enable_dev_iotlb(info);
5383 if (info->ats_enabled) {
5384 sdev->dev_iotlb = 1;
5385 sdev->qdep = info->ats_qdep;
5386 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5392 spin_unlock(&iommu->lock);
5393 spin_unlock_irqrestore(&device_domain_lock, flags);
5398 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5400 struct intel_iommu *iommu;
5403 if (iommu_dummy(dev)) {
5405 "No IOMMU translation for device; cannot enable SVM\n");
5409 iommu = device_to_iommu(dev, &bus, &devfn);
5411 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5415 if (!iommu->pasid_table) {
5416 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5422 #endif /* CONFIG_INTEL_IOMMU_SVM */
5424 const struct iommu_ops intel_iommu_ops = {
5425 .capable = intel_iommu_capable,
5426 .domain_alloc = intel_iommu_domain_alloc,
5427 .domain_free = intel_iommu_domain_free,
5428 .attach_dev = intel_iommu_attach_device,
5429 .detach_dev = intel_iommu_detach_device,
5430 .map = intel_iommu_map,
5431 .unmap = intel_iommu_unmap,
5432 .map_sg = default_iommu_map_sg,
5433 .iova_to_phys = intel_iommu_iova_to_phys,
5434 .add_device = intel_iommu_add_device,
5435 .remove_device = intel_iommu_remove_device,
5436 .get_resv_regions = intel_iommu_get_resv_regions,
5437 .put_resv_regions = intel_iommu_put_resv_regions,
5438 .device_group = pci_device_group,
5439 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5442 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5444 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5445 pr_info("Disabling IOMMU for graphics on this chipset\n");
5449 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5451 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5452 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5454 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5457 static void quirk_iommu_rwbf(struct pci_dev *dev)
5460 * Mobile 4 Series Chipset neglects to set RWBF capability,
5461 * but needs it. Same seems to hold for the desktop versions.
5463 pr_info("Forcing write-buffer flush capability\n");
5467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5472 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5476 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5477 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5478 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5479 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5480 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5481 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5482 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5483 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5485 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5489 if (pci_read_config_word(dev, GGC, &ggc))
5492 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5493 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5495 } else if (dmar_map_gfx) {
5496 /* we have to ensure the gfx device is idle before we flush */
5497 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5498 intel_iommu_strict = 1;
5501 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5502 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5503 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5504 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5506 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5507 ISOCH DMAR unit for the Azalia sound device, but not give it any
5508 TLB entries, which causes it to deadlock. Check for that. We do
5509 this in a function called from init_dmars(), instead of in a PCI
5510 quirk, because we don't want to print the obnoxious "BIOS broken"
5511 message if VT-d is actually disabled.
5513 static void __init check_tylersburg_isoch(void)
5515 struct pci_dev *pdev;
5516 uint32_t vtisochctrl;
5518 /* If there's no Azalia in the system anyway, forget it. */
5519 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5524 /* System Management Registers. Might be hidden, in which case
5525 we can't do the sanity check. But that's OK, because the
5526 known-broken BIOSes _don't_ actually hide it, so far. */
5527 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5531 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5538 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5539 if (vtisochctrl & 1)
5542 /* Drop all bits other than the number of TLB entries */
5543 vtisochctrl &= 0x1c;
5545 /* If we have the recommended number of TLB entries (16), fine. */
5546 if (vtisochctrl == 0x10)
5549 /* Zero TLB entries? You get to ride the short bus to school. */
5551 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5552 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5553 dmi_get_system_info(DMI_BIOS_VENDOR),
5554 dmi_get_system_info(DMI_BIOS_VERSION),
5555 dmi_get_system_info(DMI_PRODUCT_VERSION));
5556 iommu_identity_mapping |= IDENTMAP_AZALIA;
5560 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",