1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
8 #include <linux/kmemleak.h>
10 #include <asm/proto.h>
12 #include <asm/iommu.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
18 static int forbid_dac __read_mostly;
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
23 static int iommu_sac_force __read_mostly;
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
33 int iommu_merge __read_mostly = 0;
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
46 int iommu_pass_through __read_mostly;
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 .init_name = "fallback device",
53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES 65536
61 void __init pci_iommu_alloc(void)
63 struct iommu_table_entry *p;
65 sort_iommu_table(__iommu_table, __iommu_table_end);
66 check_iommu_entries(__iommu_table, __iommu_table_end);
68 for (p = __iommu_table; p < __iommu_table_end; p++) {
69 if (p && p->detect && p->detect() > 0) {
70 p->flags |= IOMMU_DETECTED;
73 if (p->flags & IOMMU_FINISH_IF_DETECTED)
78 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t *dma_addr, gfp_t flag,
80 struct dma_attrs *attrs)
82 unsigned long dma_mask;
84 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
87 dma_mask = dma_alloc_coherent_mask(dev, flag);
92 /* CMA can be used only in the context which permits sleeping */
93 if (flag & __GFP_WAIT) {
94 page = dma_alloc_from_contiguous(dev, count, get_order(size));
95 if (page && page_to_phys(page) + size > dma_mask) {
96 dma_release_from_contiguous(dev, page, count);
102 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
106 addr = page_to_phys(page);
107 if (addr + size > dma_mask) {
108 __free_pages(page, get_order(size));
110 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
111 flag = (flag & ~GFP_DMA32) | GFP_DMA;
117 memset(page_address(page), 0, size);
119 return page_address(page);
122 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
123 dma_addr_t dma_addr, struct dma_attrs *attrs)
125 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
126 struct page *page = virt_to_page(vaddr);
128 if (!dma_release_from_contiguous(dev, page, count))
129 free_pages((unsigned long)vaddr, get_order(size));
132 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
134 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
135 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
138 *dev = &x86_dma_fallback_dev;
139 if (!is_device_dma_capable(*dev))
144 EXPORT_SYMBOL(arch_dma_alloc_attrs);
147 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
148 * parameter documentation.
150 static __init int iommu_setup(char *p)
158 if (!strncmp(p, "off", 3))
160 /* gart_parse_options has more force support */
161 if (!strncmp(p, "force", 5))
163 if (!strncmp(p, "noforce", 7)) {
168 if (!strncmp(p, "biomerge", 8)) {
172 if (!strncmp(p, "panic", 5))
173 panic_on_overflow = 1;
174 if (!strncmp(p, "nopanic", 7))
175 panic_on_overflow = 0;
176 if (!strncmp(p, "merge", 5)) {
180 if (!strncmp(p, "nomerge", 7))
182 if (!strncmp(p, "forcesac", 8))
184 if (!strncmp(p, "allowdac", 8))
186 if (!strncmp(p, "nodac", 5))
188 if (!strncmp(p, "usedac", 6)) {
192 #ifdef CONFIG_SWIOTLB
193 if (!strncmp(p, "soft", 4))
196 if (!strncmp(p, "pt", 2))
197 iommu_pass_through = 1;
199 gart_parse_options(p);
201 #ifdef CONFIG_CALGARY_IOMMU
202 if (!strncmp(p, "calgary", 7))
204 #endif /* CONFIG_CALGARY_IOMMU */
206 p += strcspn(p, ",");
212 early_param("iommu", iommu_setup);
214 int dma_supported(struct device *dev, u64 mask)
216 struct dma_map_ops *ops = get_dma_ops(dev);
219 if (mask > 0xffffffff && forbid_dac > 0) {
220 dev_info(dev, "PCI: Disallowing DAC for device\n");
225 if (ops->dma_supported)
226 return ops->dma_supported(dev, mask);
228 /* Copied from i386. Doesn't make much sense, because it will
229 only work for pci_alloc_coherent.
230 The caller just has to use GFP_DMA in this case. */
231 if (mask < DMA_BIT_MASK(24))
234 /* Tell the device to use SAC when IOMMU force is on. This
235 allows the driver to use cheaper accesses in some cases.
237 Problem with this is that if we overflow the IOMMU area and
238 return DAC as fallback address the device may not handle it
241 As a special case some controllers have a 39bit address
242 mode that is as efficient as 32bit (aic79xx). Don't force
243 SAC for these. Assume all masks <= 40 bits are of this
244 type. Normally this doesn't make any difference, but gives
245 more gentle handling of IOMMU overflow. */
246 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
247 dev_info(dev, "Force SAC with mask %Lx\n", mask);
253 EXPORT_SYMBOL(dma_supported);
255 static int __init pci_iommu_init(void)
257 struct iommu_table_entry *p;
258 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
261 dma_debug_add_bus(&pci_bus_type);
263 x86_init.iommu.iommu_init();
265 for (p = __iommu_table; p < __iommu_table_end; p++) {
266 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
272 /* Must execute after PCI subsystem */
273 rootfs_initcall(pci_iommu_init);
276 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
278 static void via_no_dac(struct pci_dev *dev)
280 if (forbid_dac == 0) {
281 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
285 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);