2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <asm/atomic.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
33 #include <asm/swiotlb.h>
36 unsigned long iommu_bus_base; /* GART remapping area (physical) */
37 static unsigned long iommu_size; /* size of remapping area bytes */
38 static unsigned long iommu_pages; /* .. and in pages */
40 u32 *iommu_gatt_base; /* Remapping table */
42 /* If this is disabled the IOMMU will use an optimized flushing strategy
43 of only flushing when an mapping is reused. With it true the GART is flushed
44 for every mapping. Problem is that doing the lazy flush seems to trigger
45 bugs with some popular PCI cards, in particular 3ware (but has been also
46 also seen with Qlogic at least). */
47 int iommu_fullflush = 1;
51 /* Allocation bitmap for the remapping area */
52 static DEFINE_SPINLOCK(iommu_bitmap_lock);
53 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
55 static u32 gart_unmapped_entry;
58 #define GPTE_COHERENT 2
59 #define GPTE_ENCODE(x) \
60 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
61 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
63 #define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
66 #define for_all_nb(dev) \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
69 if (dev->bus->number == 0 && \
70 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
72 static struct pci_dev *northbridges[MAX_NB];
73 static u32 northbridge_flush_word[MAX_NB];
75 #define EMERGENCY_PAGES 32 /* = 128KB */
78 #define AGPEXTERN extern
83 /* backdoor interface to AGP driver */
84 AGPEXTERN int agp_memory_reserved;
85 AGPEXTERN __u32 *agp_gatt_table;
87 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
88 static int need_flush; /* global flush state. set for each gart wrap */
90 static unsigned long alloc_iommu(int size)
92 unsigned long offset, flags;
94 spin_lock_irqsave(&iommu_bitmap_lock, flags);
95 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
98 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
101 set_bit_string(iommu_gart_bitmap, offset, size);
102 next_bit = offset+size;
103 if (next_bit >= iommu_pages) {
110 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114 static void free_iommu(unsigned long offset, int size)
118 clear_bit(offset, iommu_gart_bitmap);
121 spin_lock_irqsave(&iommu_bitmap_lock, flags);
122 __clear_bit_string(iommu_gart_bitmap, offset, size);
123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127 * Use global flush state to avoid races with multiple flushers.
129 static void flush_gart(struct device *dev)
135 spin_lock_irqsave(&iommu_bitmap_lock, flags);
138 for (i = 0; i < MAX_NB; i++) {
139 if (!northbridges[i])
141 pci_write_config_dword(northbridges[i], 0x9c,
142 northbridge_flush_word[i] | 1);
146 for (i = 0; i <= max; i++) {
148 if (!northbridges[i])
150 /* Make sure the hardware actually executed the flush. */
152 pci_read_config_dword(northbridges[i], 0x9c, &w);
156 printk("nothing to flush?\n");
159 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
164 #ifdef CONFIG_IOMMU_LEAK
166 #define SET_LEAK(x) if (iommu_leak_tab) \
167 iommu_leak_tab[x] = __builtin_return_address(0);
168 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
169 iommu_leak_tab[x] = NULL;
171 /* Debugging aid for drivers that don't free their IOMMU tables */
172 static void **iommu_leak_tab;
173 static int leak_trace;
174 int iommu_leak_pages = 20;
179 if (dump || !iommu_leak_tab) return;
181 show_stack(NULL,NULL);
182 /* Very crude. dump some from the end of the table too */
183 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
184 for (i = 0; i < iommu_leak_pages; i+=2) {
185 printk("%lu: ", iommu_pages-i);
186 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
187 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
193 #define CLEAR_LEAK(x)
196 static void iommu_full(struct device *dev, size_t size, int dir)
199 * Ran out of IOMMU space for this operation. This is very bad.
200 * Unfortunately the drivers cannot handle this operation properly.
201 * Return some non mapped prereserved space in the aperture and
202 * let the Northbridge deal with it. This will result in garbage
203 * in the IO operation. When the size exceeds the prereserved space
204 * memory corruption will occur or random memory will be DMAed
205 * out. Hopefully no network devices use single mappings that big.
209 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
212 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
213 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
214 panic("PCI-DMA: Memory would be corrupted\n");
215 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
216 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
219 #ifdef CONFIG_IOMMU_LEAK
224 static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
226 u64 mask = *dev->dma_mask;
227 int high = addr + size >= mask;
234 static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
236 u64 mask = *dev->dma_mask;
237 int high = addr + size >= mask;
242 /* Map a single continuous physical area into the IOMMU.
243 * Caller needs to check if the iommu is needed and flush.
245 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
246 size_t size, int dir)
248 unsigned long npages = to_pages(phys_mem, size);
249 unsigned long iommu_page = alloc_iommu(npages);
251 if (iommu_page == -1) {
252 if (!nonforced_iommu(dev, phys_mem, size))
254 if (panic_on_overflow)
255 panic("dma_map_area overflow %lu bytes\n", size);
256 iommu_full(dev, size, dir);
257 return bad_dma_address;
260 for (i = 0; i < npages; i++) {
261 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
262 SET_LEAK(iommu_page + i);
263 phys_mem += PAGE_SIZE;
265 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
268 static dma_addr_t gart_map_simple(struct device *dev, char *buf,
269 size_t size, int dir)
271 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
276 /* Map a single area into the IOMMU */
277 dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
279 unsigned long phys_mem, bus;
281 BUG_ON(dir == DMA_NONE);
286 phys_mem = virt_to_phys(addr);
287 if (!need_iommu(dev, phys_mem, size))
290 bus = gart_map_simple(dev, addr, size, dir);
295 * Wrapper for pci_unmap_single working with scatterlists.
297 void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
301 for (i = 0; i < nents; i++) {
302 struct scatterlist *s = &sg[i];
303 if (!s->dma_length || !s->length)
305 dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
309 /* Fallback for dma_map_sg in case of overflow */
310 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
315 #ifdef CONFIG_IOMMU_DEBUG
316 printk(KERN_DEBUG "dma_map_sg overflow\n");
319 for (i = 0; i < nents; i++ ) {
320 struct scatterlist *s = &sg[i];
321 unsigned long addr = page_to_phys(s->page) + s->offset;
322 if (nonforced_iommu(dev, addr, s->length)) {
323 addr = dma_map_area(dev, addr, s->length, dir);
324 if (addr == bad_dma_address) {
326 gart_unmap_sg(dev, sg, i, dir);
328 sg[0].dma_length = 0;
332 s->dma_address = addr;
333 s->dma_length = s->length;
339 /* Map multiple scatterlist entries continuous into the first. */
340 static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
341 struct scatterlist *sout, unsigned long pages)
343 unsigned long iommu_start = alloc_iommu(pages);
344 unsigned long iommu_page = iommu_start;
347 if (iommu_start == -1)
350 for (i = start; i < stopat; i++) {
351 struct scatterlist *s = &sg[i];
352 unsigned long pages, addr;
353 unsigned long phys_addr = s->dma_address;
355 BUG_ON(i > start && s->offset);
358 sout->dma_address = iommu_bus_base;
359 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
360 sout->dma_length = s->length;
362 sout->dma_length += s->length;
366 pages = to_pages(s->offset, s->length);
368 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
369 SET_LEAK(iommu_page);
374 BUG_ON(iommu_page - iommu_start != pages);
378 static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
379 struct scatterlist *sout,
380 unsigned long pages, int need)
383 BUG_ON(stopat - start != 1);
385 sout->dma_length = sg[start].length;
388 return __dma_map_cont(sg, start, stopat, sout, pages);
392 * DMA map all entries in a scatterlist.
393 * Merge chunks that have page aligned sizes into a continuous mapping.
395 int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
400 unsigned long pages = 0;
401 int need = 0, nextneed;
403 BUG_ON(dir == DMA_NONE);
412 for (i = 0; i < nents; i++) {
413 struct scatterlist *s = &sg[i];
414 dma_addr_t addr = page_to_phys(s->page) + s->offset;
415 s->dma_address = addr;
416 BUG_ON(s->length == 0);
418 nextneed = need_iommu(dev, addr, s->length);
420 /* Handle the previous not yet processed entries */
422 struct scatterlist *ps = &sg[i-1];
423 /* Can only merge when the last chunk ends on a page
424 boundary and the new one doesn't have an offset. */
425 if (!iommu_merge || !nextneed || !need || s->offset ||
426 (ps->offset + ps->length) % PAGE_SIZE) {
427 if (dma_map_cont(sg, start, i, sg+out, pages,
437 pages += to_pages(s->offset, s->length);
439 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
444 sg[out].dma_length = 0;
449 gart_unmap_sg(dev, sg, nents, dir);
450 /* When it was forced or merged try again in a dumb way */
451 if (force_iommu || iommu_merge) {
452 out = dma_map_sg_nonforce(dev, sg, nents, dir);
456 if (panic_on_overflow)
457 panic("dma_map_sg: overflow on %lu pages\n", pages);
458 iommu_full(dev, pages << PAGE_SHIFT, dir);
459 for (i = 0; i < nents; i++)
460 sg[i].dma_address = bad_dma_address;
465 * Free a DMA mapping.
467 void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
468 size_t size, int direction)
470 unsigned long iommu_page;
474 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
475 dma_addr >= iommu_bus_base + iommu_size)
477 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
478 npages = to_pages(dma_addr, size);
479 for (i = 0; i < npages; i++) {
480 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
481 CLEAR_LEAK(iommu_page + i);
483 free_iommu(iommu_page, npages);
488 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
492 iommu_size = aper_size;
497 a = aper + iommu_size;
498 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
500 if (iommu_size < 64*1024*1024)
502 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
507 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
509 unsigned aper_size = 0, aper_base_32;
513 pci_read_config_dword(dev, 0x94, &aper_base_32);
514 pci_read_config_dword(dev, 0x90, &aper_order);
515 aper_order = (aper_order >> 1) & 7;
517 aper_base = aper_base_32 & 0x7fff;
520 aper_size = (32 * 1024 * 1024) << aper_order;
521 if (aper_base + aper_size >= 0xffffffff || !aper_size)
529 * Private Northbridge GATT initialization in case we cannot use the
530 * AGP driver for some reason.
532 static __init int init_k8_gatt(struct agp_kern_info *info)
536 unsigned aper_base, new_aper_base;
537 unsigned aper_size, gatt_size, new_aper_size;
539 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
540 aper_size = aper_base = info->aper_size = 0;
542 new_aper_base = read_aperture(dev, &new_aper_size);
547 aper_size = new_aper_size;
548 aper_base = new_aper_base;
550 if (aper_size != new_aper_size || aper_base != new_aper_base)
555 info->aper_base = aper_base;
556 info->aper_size = aper_size>>20;
558 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
559 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
561 panic("Cannot allocate GATT table");
562 memset(gatt, 0, gatt_size);
563 agp_gatt_table = gatt;
569 gatt_reg = __pa(gatt) >> 12;
571 pci_write_config_dword(dev, 0x98, gatt_reg);
572 pci_read_config_dword(dev, 0x90, &ctl);
575 ctl &= ~((1<<4) | (1<<5));
577 pci_write_config_dword(dev, 0x90, ctl);
581 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
585 /* Should not happen anymore */
586 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
587 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
591 extern int agp_amd64_init(void);
593 static struct dma_mapping_ops gart_dma_ops = {
594 .mapping_error = NULL,
595 .map_single = gart_map_single,
596 .map_simple = gart_map_simple,
597 .unmap_single = gart_unmap_single,
598 .sync_single_for_cpu = NULL,
599 .sync_single_for_device = NULL,
600 .sync_single_range_for_cpu = NULL,
601 .sync_single_range_for_device = NULL,
602 .sync_sg_for_cpu = NULL,
603 .sync_sg_for_device = NULL,
604 .map_sg = gart_map_sg,
605 .unmap_sg = gart_unmap_sg,
608 static int __init pci_iommu_init(void)
610 struct agp_kern_info info;
611 unsigned long aper_size;
612 unsigned long iommu_start;
614 unsigned long scratch;
617 #ifndef CONFIG_AGP_AMD64
620 /* Makefile puts PCI initialization via subsys_initcall first. */
621 /* Add other K8 AGP bridge drivers here */
623 (agp_amd64_init() < 0) ||
624 (agp_copy_info(agp_bridge, &info) < 0);
631 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
633 (no_agp && init_k8_gatt(&info) < 0)) {
634 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
635 if (end_pfn > MAX_DMA32_PFN) {
636 printk(KERN_ERR "WARNING more than 4GB of memory "
637 "but IOMMU not compiled in.\n"
638 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
639 KERN_ERR "You might want to enable "
640 "CONFIG_GART_IOMMU\n");
645 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
646 aper_size = info.aper_size * 1024 * 1024;
647 iommu_size = check_iommu_size(info.aper_base, aper_size);
648 iommu_pages = iommu_size >> PAGE_SHIFT;
650 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
651 get_order(iommu_pages/8));
652 if (!iommu_gart_bitmap)
653 panic("Cannot allocate iommu bitmap\n");
654 memset(iommu_gart_bitmap, 0, iommu_pages/8);
656 #ifdef CONFIG_IOMMU_LEAK
658 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
659 get_order(iommu_pages*sizeof(void *)));
661 memset(iommu_leak_tab, 0, iommu_pages * 8);
663 printk("PCI-DMA: Cannot allocate leak trace area\n");
668 * Out of IOMMU space handling.
669 * Reserve some invalid pages at the beginning of the GART.
671 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
673 agp_memory_reserved = iommu_size;
675 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
678 iommu_start = aper_size - iommu_size;
679 iommu_bus_base = info.aper_base + iommu_start;
680 bad_dma_address = iommu_bus_base;
681 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
684 * Unmap the IOMMU part of the GART. The alias of the page is
685 * always mapped with cache enabled and there is no full cache
686 * coherency across the GART remapping. The unmapping avoids
687 * automatic prefetches from the CPU allocating cache lines in
688 * there. All CPU accesses are done via the direct mapping to
689 * the backing memory. The GART address is only used by PCI
692 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
695 * Try to workaround a bug (thanks to BenH)
696 * Set unmapped entries to a scratch page instead of 0.
697 * Any prefetches that hit unmapped entries won't get an bus abort
700 scratch = get_zeroed_page(GFP_KERNEL);
702 panic("Cannot allocate iommu scratch page");
703 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
704 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
705 iommu_gatt_base[i] = gart_unmapped_entry;
709 int cpu = PCI_SLOT(dev->devfn) - 24;
712 northbridges[cpu] = dev;
713 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
714 northbridge_flush_word[cpu] = flag;
719 dma_ops = &gart_dma_ops;
724 /* Must execute after PCI subsystem */
725 fs_initcall(pci_iommu_init);
727 void gart_parse_options(char *p)
731 #ifdef CONFIG_IOMMU_LEAK
732 if (!strncmp(p,"leak",4)) {
736 if (isdigit(*p) && get_option(&p, &arg))
737 iommu_leak_pages = arg;
740 if (isdigit(*p) && get_option(&p, &arg))
742 if (!strncmp(p, "fullflush",8))
744 if (!strncmp(p, "nofullflush",11))
746 if (!strncmp(p,"noagp",5))
748 if (!strncmp(p, "noaperture",10))
750 /* duplicated from pci-dma.c */
751 if (!strncmp(p,"force",5))
752 iommu_aperture_allowed = 1;
753 if (!strncmp(p,"allowed",7))
754 iommu_aperture_allowed = 1;
755 if (!strncmp(p, "memaper", 7)) {
756 fallback_aper_force = 1;
760 if (get_option(&p, &arg))
761 fallback_aper_order = arg;