1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 static int forbid_dac __read_mostly;
14 struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
17 static int iommu_sac_force __read_mostly;
20 * If this is disabled the IOMMU will use an optimized flushing strategy
21 * of only flushing when an mapping is reused. With it true the GART is
22 * flushed for every mapping. Problem is that doing the lazy flush seems
23 * to trigger bugs with some popular PCI cards, in particular 3ware (but
24 * has been also also seen with Qlogic at least).
28 #ifdef CONFIG_IOMMU_DEBUG
29 int panic_on_overflow __read_mostly = 1;
30 int force_iommu __read_mostly = 1;
32 int panic_on_overflow __read_mostly = 0;
33 int force_iommu __read_mostly = 0;
36 int iommu_merge __read_mostly = 0;
38 int no_iommu __read_mostly;
39 /* Set this to 1 if there is a HW IOMMU in the system */
40 int iommu_detected __read_mostly = 0;
42 /* This tells the BIO block layer to assume merging. Default to off
43 because we cannot guarantee merging later. */
44 int iommu_bio_merge __read_mostly = 0;
45 EXPORT_SYMBOL(iommu_bio_merge);
47 dma_addr_t bad_dma_address __read_mostly = 0;
48 EXPORT_SYMBOL(bad_dma_address);
50 /* Dummy device used for NULL arguments (normally ISA). Better would
51 be probably a smaller DMA mask, but this is bug-to-bug compatible
53 struct device x86_dma_fallback_dev = {
54 .bus_id = "fallback device",
55 .coherent_dma_mask = DMA_32BIT_MASK,
56 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
58 EXPORT_SYMBOL(x86_dma_fallback_dev);
60 int dma_set_mask(struct device *dev, u64 mask)
62 if (!dev->dma_mask || !dma_supported(dev, mask))
65 *dev->dma_mask = mask;
69 EXPORT_SYMBOL(dma_set_mask);
72 static __initdata void *dma32_bootmem_ptr;
73 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
75 static int __init parse_dma32_size_opt(char *p)
79 dma32_bootmem_size = memparse(p, &p);
82 early_param("dma32_size", parse_dma32_size_opt);
84 void __init dma32_reserve_bootmem(void)
86 unsigned long size, align;
87 if (max_pfn <= MAX_DMA32_PFN)
91 * check aperture_64.c allocate_aperture() for reason about
95 size = round_up(dma32_bootmem_size, align);
96 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
98 if (dma32_bootmem_ptr)
99 dma32_bootmem_size = size;
101 dma32_bootmem_size = 0;
103 static void __init dma32_free_bootmem(void)
106 if (max_pfn <= MAX_DMA32_PFN)
109 if (!dma32_bootmem_ptr)
112 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
114 dma32_bootmem_ptr = NULL;
115 dma32_bootmem_size = 0;
118 void __init pci_iommu_alloc(void)
120 /* free the range so iommu could get some range less than 4G */
121 dma32_free_bootmem();
123 * The order of these functions is important for
124 * fall-back/fail-over reasons
126 gart_iommu_hole_init();
130 detect_intel_iommu();
137 unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
139 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
141 return size >> PAGE_SHIFT;
143 EXPORT_SYMBOL(iommu_num_pages);
147 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
150 static __init int iommu_setup(char *p)
158 if (!strncmp(p, "off", 3))
160 /* gart_parse_options has more force support */
161 if (!strncmp(p, "force", 5))
163 if (!strncmp(p, "noforce", 7)) {
168 if (!strncmp(p, "biomerge", 8)) {
169 iommu_bio_merge = 4096;
173 if (!strncmp(p, "panic", 5))
174 panic_on_overflow = 1;
175 if (!strncmp(p, "nopanic", 7))
176 panic_on_overflow = 0;
177 if (!strncmp(p, "merge", 5)) {
181 if (!strncmp(p, "nomerge", 7))
183 if (!strncmp(p, "fullflush", 8))
185 if (!strncmp(p, "nofullflush", 11))
187 if (!strncmp(p, "forcesac", 8))
189 if (!strncmp(p, "allowdac", 8))
191 if (!strncmp(p, "nodac", 5))
193 if (!strncmp(p, "usedac", 6)) {
197 #ifdef CONFIG_SWIOTLB
198 if (!strncmp(p, "soft", 4))
202 gart_parse_options(p);
204 #ifdef CONFIG_CALGARY_IOMMU
205 if (!strncmp(p, "calgary", 7))
207 #endif /* CONFIG_CALGARY_IOMMU */
209 p += strcspn(p, ",");
215 early_param("iommu", iommu_setup);
217 int dma_supported(struct device *dev, u64 mask)
219 struct dma_mapping_ops *ops = get_dma_ops(dev);
222 if (mask > 0xffffffff && forbid_dac > 0) {
223 dev_info(dev, "PCI: Disallowing DAC for device\n");
228 if (ops->dma_supported)
229 return ops->dma_supported(dev, mask);
231 /* Copied from i386. Doesn't make much sense, because it will
232 only work for pci_alloc_coherent.
233 The caller just has to use GFP_DMA in this case. */
234 if (mask < DMA_24BIT_MASK)
237 /* Tell the device to use SAC when IOMMU force is on. This
238 allows the driver to use cheaper accesses in some cases.
240 Problem with this is that if we overflow the IOMMU area and
241 return DAC as fallback address the device may not handle it
244 As a special case some controllers have a 39bit address
245 mode that is as efficient as 32bit (aic79xx). Don't force
246 SAC for these. Assume all masks <= 40 bits are of this
247 type. Normally this doesn't make any difference, but gives
248 more gentle handling of IOMMU overflow. */
249 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
250 dev_info(dev, "Force SAC with mask %Lx\n", mask);
256 EXPORT_SYMBOL(dma_supported);
258 static int __init pci_iommu_init(void)
260 calgary_iommu_init();
272 void pci_iommu_shutdown(void)
274 gart_iommu_shutdown();
276 /* Must execute after PCI subsystem */
277 fs_initcall(pci_iommu_init);
280 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
282 static __devinit void via_no_dac(struct pci_dev *dev)
284 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
285 printk(KERN_INFO "PCI: VIA PCI bridge detected."
290 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);