2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/iommu.h>
37 #include <asm/cacheflush.h>
38 #include <asm/swiotlb.h>
42 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
43 static unsigned long iommu_size; /* size of remapping area bytes */
44 static unsigned long iommu_pages; /* .. and in pages */
46 static u32 *iommu_gatt_base; /* Remapping table */
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
55 int iommu_fullflush = 1;
57 /* Allocation bitmap for the remapping area: */
58 static DEFINE_SPINLOCK(iommu_bitmap_lock);
59 /* Guarded by iommu_bitmap_lock: */
60 static unsigned long *iommu_gart_bitmap;
62 static u32 gart_unmapped_entry;
65 #define GPTE_COHERENT 2
66 #define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
70 #define EMERGENCY_PAGES 32 /* = 128KB */
73 #define AGPEXTERN extern
78 /* backdoor interface to AGP driver */
79 AGPEXTERN int agp_memory_reserved;
80 AGPEXTERN __u32 *agp_gatt_table;
82 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83 static int need_flush; /* global flush state. set for each gart wrap */
85 static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask, u64 dma_mask)
88 unsigned long offset, flags;
89 unsigned long boundary_size;
90 unsigned long base_index;
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT;
95 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
96 PAGE_SIZE) >> PAGE_SHIFT;
98 limit = iommu_device_max_index(iommu_pages,
99 DIV_ROUND_UP(iommu_bus_base, PAGE_SIZE),
100 dma_mask >> PAGE_SHIFT);
102 spin_lock_irqsave(&iommu_bitmap_lock, flags);
104 if (limit <= next_bit) {
109 offset = iommu_area_alloc(iommu_gart_bitmap, limit, next_bit,
110 size, base_index, boundary_size, align_mask);
111 if (offset == -1 && next_bit) {
113 offset = iommu_area_alloc(iommu_gart_bitmap, limit, 0,
114 size, base_index, boundary_size,
118 next_bit = offset+size;
119 if (next_bit >= iommu_pages) {
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
131 static void free_iommu(unsigned long offset, int size)
135 spin_lock_irqsave(&iommu_bitmap_lock, flags);
136 iommu_area_free(iommu_gart_bitmap, offset, size);
137 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
141 * Use global flush state to avoid races with multiple flushers.
143 static void flush_gart(void)
147 spin_lock_irqsave(&iommu_bitmap_lock, flags);
152 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
155 #ifdef CONFIG_IOMMU_LEAK
157 #define SET_LEAK(x) \
159 if (iommu_leak_tab) \
160 iommu_leak_tab[x] = __builtin_return_address(0);\
163 #define CLEAR_LEAK(x) \
165 if (iommu_leak_tab) \
166 iommu_leak_tab[x] = NULL; \
169 /* Debugging aid for drivers that don't free their IOMMU tables */
170 static void **iommu_leak_tab;
171 static int leak_trace;
172 static int iommu_leak_pages = 20;
174 static void dump_leak(void)
179 if (dump || !iommu_leak_tab)
182 show_stack(NULL, NULL);
184 /* Very crude. dump some from the end of the table too */
185 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
187 for (i = 0; i < iommu_leak_pages; i += 2) {
188 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
189 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
190 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
192 printk(KERN_DEBUG "\n");
196 # define CLEAR_LEAK(x)
199 static void iommu_full(struct device *dev, size_t size, int dir)
202 * Ran out of IOMMU space for this operation. This is very bad.
203 * Unfortunately the drivers cannot handle this operation properly.
204 * Return some non mapped prereserved space in the aperture and
205 * let the Northbridge deal with it. This will result in garbage
206 * in the IO operation. When the size exceeds the prereserved space
207 * memory corruption will occur or random memory will be DMAed
208 * out. Hopefully no network devices use single mappings that big.
211 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
213 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
214 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
215 panic("PCI-DMA: Memory would be corrupted\n");
216 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
218 "PCI-DMA: Random memory would be DMAed\n");
220 #ifdef CONFIG_IOMMU_LEAK
226 need_iommu(struct device *dev, unsigned long addr, size_t size)
228 return force_iommu ||
229 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
233 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
235 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
238 /* Map a single continuous physical area into the IOMMU.
239 * Caller needs to check if the iommu is needed and flush.
241 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
242 size_t size, int dir, unsigned long align_mask,
245 unsigned long npages = iommu_num_pages(phys_mem, size);
246 unsigned long iommu_page;
249 iommu_page = alloc_iommu(dev, npages, align_mask, dma_mask);
250 if (iommu_page == -1) {
251 if (!nonforced_iommu(dev, phys_mem, size))
253 if (panic_on_overflow)
254 panic("dma_map_area overflow %lu bytes\n", size);
255 iommu_full(dev, size, dir);
256 return bad_dma_address;
259 for (i = 0; i < npages; i++) {
260 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
261 SET_LEAK(iommu_page + i);
262 phys_mem += PAGE_SIZE;
264 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
267 /* Map a single area into the IOMMU */
269 gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
274 dev = &x86_dma_fallback_dev;
276 if (!need_iommu(dev, paddr, size))
279 bus = dma_map_area(dev, paddr, size, dir, 0, dma_get_mask(dev));
286 * Free a DMA mapping.
288 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
289 size_t size, int direction)
291 unsigned long iommu_page;
295 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
296 dma_addr >= iommu_bus_base + iommu_size)
299 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
300 npages = iommu_num_pages(dma_addr, size);
301 for (i = 0; i < npages; i++) {
302 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
303 CLEAR_LEAK(iommu_page + i);
305 free_iommu(iommu_page, npages);
309 * Wrapper for pci_unmap_single working with scatterlists.
312 gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
314 struct scatterlist *s;
317 for_each_sg(sg, s, nents, i) {
318 if (!s->dma_length || !s->length)
320 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
324 /* Fallback for dma_map_sg in case of overflow */
325 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
328 struct scatterlist *s;
330 u64 dma_mask = dma_get_mask(dev);
332 #ifdef CONFIG_IOMMU_DEBUG
333 printk(KERN_DEBUG "dma_map_sg overflow\n");
336 for_each_sg(sg, s, nents, i) {
337 unsigned long addr = sg_phys(s);
339 if (nonforced_iommu(dev, addr, s->length)) {
340 addr = dma_map_area(dev, addr, s->length, dir, 0,
342 if (addr == bad_dma_address) {
344 gart_unmap_sg(dev, sg, i, dir);
346 sg[0].dma_length = 0;
350 s->dma_address = addr;
351 s->dma_length = s->length;
358 /* Map multiple scatterlist entries continuous into the first. */
359 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
360 int nelems, struct scatterlist *sout,
363 unsigned long iommu_start;
364 unsigned long iommu_page;
365 struct scatterlist *s;
368 iommu_start = alloc_iommu(dev, pages, 0, dma_get_mask(dev));
369 if (iommu_start == -1)
372 iommu_page = iommu_start;
373 for_each_sg(start, s, nelems, i) {
374 unsigned long pages, addr;
375 unsigned long phys_addr = s->dma_address;
377 BUG_ON(s != start && s->offset);
379 sout->dma_address = iommu_bus_base;
380 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
381 sout->dma_length = s->length;
383 sout->dma_length += s->length;
387 pages = iommu_num_pages(s->offset, s->length);
389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
390 SET_LEAK(iommu_page);
395 BUG_ON(iommu_page - iommu_start != pages);
401 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
402 struct scatterlist *sout, unsigned long pages, int need)
406 sout->dma_address = start->dma_address;
407 sout->dma_length = start->length;
410 return __dma_map_cont(dev, start, nelems, sout, pages);
414 * DMA map all entries in a scatterlist.
415 * Merge chunks that have page aligned sizes into a continuous mapping.
418 gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
420 struct scatterlist *s, *ps, *start_sg, *sgmap;
421 int need = 0, nextneed, i, out, start;
422 unsigned long pages = 0;
423 unsigned int seg_size;
424 unsigned int max_seg_size;
430 dev = &x86_dma_fallback_dev;
434 start_sg = sgmap = sg;
436 max_seg_size = dma_get_max_seg_size(dev);
437 ps = NULL; /* shut up gcc */
438 for_each_sg(sg, s, nents, i) {
439 dma_addr_t addr = sg_phys(s);
441 s->dma_address = addr;
442 BUG_ON(s->length == 0);
444 nextneed = need_iommu(dev, addr, s->length);
446 /* Handle the previous not yet processed entries */
449 * Can only merge when the last chunk ends on a
450 * page boundary and the new one doesn't have an
453 if (!iommu_merge || !nextneed || !need || s->offset ||
454 (s->length + seg_size > max_seg_size) ||
455 (ps->offset + ps->length) % PAGE_SIZE) {
456 if (dma_map_cont(dev, start_sg, i - start,
457 sgmap, pages, need) < 0)
461 sgmap = sg_next(sgmap);
468 seg_size += s->length;
470 pages += iommu_num_pages(s->offset, s->length);
473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
478 sgmap = sg_next(sgmap);
479 sgmap->dma_length = 0;
485 gart_unmap_sg(dev, sg, out, dir);
487 /* When it was forced or merged try again in a dumb way */
488 if (force_iommu || iommu_merge) {
489 out = dma_map_sg_nonforce(dev, sg, nents, dir);
493 if (panic_on_overflow)
494 panic("dma_map_sg: overflow on %lu pages\n", pages);
496 iommu_full(dev, pages << PAGE_SHIFT, dir);
497 for_each_sg(sg, s, nents, i)
498 s->dma_address = bad_dma_address;
502 /* allocate and map a coherent mapping */
504 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
509 unsigned long align_mask;
510 u64 dma_mask = dma_alloc_coherent_mask(dev, flag);
512 vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
516 paddr = virt_to_phys(vaddr);
517 if (is_buffer_dma_capable(dma_mask, paddr, size)) {
522 align_mask = (1UL << get_order(size)) - 1;
524 *dma_addr = dma_map_area(dev, paddr, size, DMA_BIDIRECTIONAL,
525 align_mask, dma_mask);
528 if (*dma_addr != bad_dma_address)
531 free_pages((unsigned long)vaddr, get_order(size));
536 /* free a coherent mapping */
538 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
541 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
542 free_pages((unsigned long)vaddr, get_order(size));
547 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
552 iommu_size = aper_size;
557 a = aper + iommu_size;
558 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
560 if (iommu_size < 64*1024*1024) {
562 "PCI-DMA: Warning: Small IOMMU %luMB."
563 " Consider increasing the AGP aperture in BIOS\n",
570 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
572 unsigned aper_size = 0, aper_base_32, aper_order;
575 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
576 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
577 aper_order = (aper_order >> 1) & 7;
579 aper_base = aper_base_32 & 0x7fff;
582 aper_size = (32 * 1024 * 1024) << aper_order;
583 if (aper_base + aper_size > 0x100000000UL || !aper_size)
590 static void enable_gart_translations(void)
594 for (i = 0; i < num_k8_northbridges; i++) {
595 struct pci_dev *dev = k8_northbridges[i];
597 enable_gart_translation(dev, __pa(agp_gatt_table));
602 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
603 * resume in the same way as they are handled in gart_iommu_hole_init().
605 static bool fix_up_north_bridges;
606 static u32 aperture_order;
607 static u32 aperture_alloc;
609 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
611 fix_up_north_bridges = true;
612 aperture_order = aper_order;
613 aperture_alloc = aper_alloc;
616 static int gart_resume(struct sys_device *dev)
618 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
620 if (fix_up_north_bridges) {
623 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
625 for (i = 0; i < num_k8_northbridges; i++) {
626 struct pci_dev *dev = k8_northbridges[i];
629 * Don't enable translations just yet. That is the next
630 * step. Restore the pre-suspend aperture settings.
632 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
633 aperture_order << 1);
634 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
635 aperture_alloc >> 25);
639 enable_gart_translations();
644 static int gart_suspend(struct sys_device *dev, pm_message_t state)
649 static struct sysdev_class gart_sysdev_class = {
651 .suspend = gart_suspend,
652 .resume = gart_resume,
656 static struct sys_device device_gart = {
658 .cls = &gart_sysdev_class,
662 * Private Northbridge GATT initialization in case we cannot use the
663 * AGP driver for some reason.
665 static __init int init_k8_gatt(struct agp_kern_info *info)
667 unsigned aper_size, gatt_size, new_aper_size;
668 unsigned aper_base, new_aper_base;
672 unsigned long start_pfn, end_pfn;
674 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
675 aper_size = aper_base = info->aper_size = 0;
677 for (i = 0; i < num_k8_northbridges; i++) {
678 dev = k8_northbridges[i];
679 new_aper_base = read_aperture(dev, &new_aper_size);
684 aper_size = new_aper_size;
685 aper_base = new_aper_base;
687 if (aper_size != new_aper_size || aper_base != new_aper_base)
692 info->aper_base = aper_base;
693 info->aper_size = aper_size >> 20;
695 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
696 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
698 panic("Cannot allocate GATT table");
699 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
700 panic("Could not set GART PTEs to uncacheable pages");
702 memset(gatt, 0, gatt_size);
703 agp_gatt_table = gatt;
705 enable_gart_translations();
707 error = sysdev_class_register(&gart_sysdev_class);
709 error = sysdev_register(&device_gart);
711 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
715 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
716 aper_base, aper_size>>10);
718 /* need to map that range */
719 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
720 if (end_pfn > max_low_pfn_mapped) {
721 start_pfn = (aper_base>>PAGE_SHIFT);
722 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
727 /* Should not happen anymore */
728 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
729 KERN_WARNING "falling back to iommu=soft.\n");
733 extern int agp_amd64_init(void);
735 static struct dma_mapping_ops gart_dma_ops = {
736 .map_single = gart_map_single,
737 .unmap_single = gart_unmap_single,
738 .sync_single_for_cpu = NULL,
739 .sync_single_for_device = NULL,
740 .sync_single_range_for_cpu = NULL,
741 .sync_single_range_for_device = NULL,
742 .sync_sg_for_cpu = NULL,
743 .sync_sg_for_device = NULL,
744 .map_sg = gart_map_sg,
745 .unmap_sg = gart_unmap_sg,
746 .alloc_coherent = gart_alloc_coherent,
747 .free_coherent = gart_free_coherent,
750 void gart_iommu_shutdown(void)
755 if (no_agp && (dma_ops != &gart_dma_ops))
758 for (i = 0; i < num_k8_northbridges; i++) {
761 dev = k8_northbridges[i];
762 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
766 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
770 void __init gart_iommu_init(void)
772 struct agp_kern_info info;
773 unsigned long iommu_start;
774 unsigned long aper_size;
775 unsigned long scratch;
778 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
779 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
783 #ifndef CONFIG_AGP_AMD64
786 /* Makefile puts PCI initialization via subsys_initcall first. */
787 /* Add other K8 AGP bridge drivers here */
789 (agp_amd64_init() < 0) ||
790 (agp_copy_info(agp_bridge, &info) < 0);
796 /* Did we detect a different HW IOMMU? */
797 if (iommu_detected && !gart_iommu_aperture)
801 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
802 !gart_iommu_aperture ||
803 (no_agp && init_k8_gatt(&info) < 0)) {
804 if (max_pfn > MAX_DMA32_PFN) {
805 printk(KERN_WARNING "More than 4GB of memory "
806 "but GART IOMMU not available.\n"
807 KERN_WARNING "falling back to iommu=soft.\n");
812 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
813 aper_size = info.aper_size * 1024 * 1024;
814 iommu_size = check_iommu_size(info.aper_base, aper_size);
815 iommu_pages = iommu_size >> PAGE_SHIFT;
817 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
818 get_order(iommu_pages/8));
819 if (!iommu_gart_bitmap)
820 panic("Cannot allocate iommu bitmap\n");
821 memset(iommu_gart_bitmap, 0, iommu_pages/8);
823 #ifdef CONFIG_IOMMU_LEAK
825 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
826 get_order(iommu_pages*sizeof(void *)));
828 memset(iommu_leak_tab, 0, iommu_pages * 8);
831 "PCI-DMA: Cannot allocate leak trace area\n");
836 * Out of IOMMU space handling.
837 * Reserve some invalid pages at the beginning of the GART.
839 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
841 agp_memory_reserved = iommu_size;
843 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
846 iommu_start = aper_size - iommu_size;
847 iommu_bus_base = info.aper_base + iommu_start;
848 bad_dma_address = iommu_bus_base;
849 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
852 * Unmap the IOMMU part of the GART. The alias of the page is
853 * always mapped with cache enabled and there is no full cache
854 * coherency across the GART remapping. The unmapping avoids
855 * automatic prefetches from the CPU allocating cache lines in
856 * there. All CPU accesses are done via the direct mapping to
857 * the backing memory. The GART address is only used by PCI
860 set_memory_np((unsigned long)__va(iommu_bus_base),
861 iommu_size >> PAGE_SHIFT);
863 * Tricky. The GART table remaps the physical memory range,
864 * so the CPU wont notice potential aliases and if the memory
865 * is remapped to UC later on, we might surprise the PCI devices
866 * with a stray writeout of a cacheline. So play it sure and
867 * do an explicit, full-scale wbinvd() _after_ having marked all
868 * the pages as Not-Present:
873 * Try to workaround a bug (thanks to BenH):
874 * Set unmapped entries to a scratch page instead of 0.
875 * Any prefetches that hit unmapped entries won't get an bus abort
876 * then. (P2P bridge may be prefetching on DMA reads).
878 scratch = get_zeroed_page(GFP_KERNEL);
880 panic("Cannot allocate iommu scratch page");
881 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
882 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
883 iommu_gatt_base[i] = gart_unmapped_entry;
886 dma_ops = &gart_dma_ops;
889 void __init gart_parse_options(char *p)
893 #ifdef CONFIG_IOMMU_LEAK
894 if (!strncmp(p, "leak", 4)) {
898 if (isdigit(*p) && get_option(&p, &arg))
899 iommu_leak_pages = arg;
902 if (isdigit(*p) && get_option(&p, &arg))
904 if (!strncmp(p, "fullflush", 8))
906 if (!strncmp(p, "nofullflush", 11))
908 if (!strncmp(p, "noagp", 5))
910 if (!strncmp(p, "noaperture", 10))
912 /* duplicated from pci-dma.c */
913 if (!strncmp(p, "force", 5))
914 gart_iommu_aperture_allowed = 1;
915 if (!strncmp(p, "allowed", 7))
916 gart_iommu_aperture_allowed = 1;
917 if (!strncmp(p, "memaper", 7)) {
918 fallback_aper_force = 1;
922 if (get_option(&p, &arg))
923 fallback_aper_order = arg;