2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
36 #include <linux/hash.h>
37 #include <linux/fault-inject.h>
38 #include <linux/pci.h>
39 #include <linux/iommu.h>
40 #include <linux/sched.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/kdump.h>
47 #include <asm/fadump.h>
55 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
57 static int __init setup_iommu(char *str)
59 if (!strcmp(str, "novmerge"))
61 else if (!strcmp(str, "vmerge"))
66 __setup("iommu=", setup_iommu);
68 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
71 * We precalculate the hash to avoid doing it on every allocation.
73 * The hash is important to spread CPUs across all the pools. For example,
74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
75 * with 4 pools all primary threads would map to the same pool.
77 static int __init setup_iommu_pool_hash(void)
81 for_each_possible_cpu(i)
82 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
86 subsys_initcall(setup_iommu_pool_hash);
88 #ifdef CONFIG_FAIL_IOMMU
90 static DECLARE_FAULT_ATTR(fail_iommu);
92 static int __init setup_fail_iommu(char *str)
94 return setup_fault_attr(&fail_iommu, str);
96 __setup("fail_iommu=", setup_fail_iommu);
98 static bool should_fail_iommu(struct device *dev)
100 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
103 static int __init fail_iommu_debugfs(void)
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
108 return PTR_ERR_OR_ZERO(dir);
110 late_initcall(fail_iommu_debugfs);
112 static ssize_t fail_iommu_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
115 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
118 static ssize_t fail_iommu_store(struct device *dev,
119 struct device_attribute *attr, const char *buf,
124 if (count > 0 && sscanf(buf, "%d", &i) > 0)
125 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
130 static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
133 static int fail_iommu_bus_notify(struct notifier_block *nb,
134 unsigned long action, void *data)
136 struct device *dev = data;
138 if (action == BUS_NOTIFY_ADD_DEVICE) {
139 if (device_create_file(dev, &dev_attr_fail_iommu))
140 pr_warn("Unable to create IOMMU fault injection sysfs "
142 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
143 device_remove_file(dev, &dev_attr_fail_iommu);
149 static struct notifier_block fail_iommu_bus_notifier = {
150 .notifier_call = fail_iommu_bus_notify
153 static int __init fail_iommu_setup(void)
156 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
159 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
165 * Must execute after PCI and VIO subsystem have initialised but before
166 * devices are probed.
168 arch_initcall(fail_iommu_setup);
170 static inline bool should_fail_iommu(struct device *dev)
176 static unsigned long iommu_range_alloc(struct device *dev,
177 struct iommu_table *tbl,
178 unsigned long npages,
179 unsigned long *handle,
181 unsigned int align_order)
183 unsigned long n, end, start;
185 int largealloc = npages > 15;
187 unsigned long align_mask;
188 unsigned long boundary_size;
190 unsigned int pool_nr;
191 struct iommu_pool *pool;
193 align_mask = 0xffffffffffffffffl >> (64 - align_order);
195 /* This allocator was derived from x86_64's bit string search */
198 if (unlikely(npages == 0)) {
199 if (printk_ratelimit())
201 return IOMMU_MAPPING_ERROR;
204 if (should_fail_iommu(dev))
205 return IOMMU_MAPPING_ERROR;
208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool.
211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
214 pool = &(tbl->large_pool);
216 pool = &(tbl->pools[pool_nr]);
218 spin_lock_irqsave(&(pool->lock), flags);
221 if ((pass == 0) && handle && *handle &&
222 (*handle >= pool->start) && (*handle < pool->end))
229 /* The case below can happen if we have a small segment appended
230 * to a large, or when the previous alloc was at the very end of
231 * the available space. If so, go back to the initial start.
236 if (limit + tbl->it_offset > mask) {
237 limit = mask - tbl->it_offset + 1;
238 /* If we're constrained on address range, first try
239 * at the masked hint to avoid O(n) search complexity,
240 * but on second pass, start at 0 in pool 0.
242 if ((start & mask) >= limit || pass > 0) {
243 spin_unlock(&(pool->lock));
244 pool = &(tbl->pools[0]);
245 spin_lock(&(pool->lock));
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 1 << tbl->it_page_shift);
256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 boundary_size >> tbl->it_page_shift, align_mask);
262 if (likely(pass == 0)) {
263 /* First try the pool from the start */
264 pool->hint = pool->start;
268 } else if (pass <= tbl->nr_pools) {
269 /* Now try scanning all the other pools */
270 spin_unlock(&(pool->lock));
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
272 pool = &tbl->pools[pool_nr];
273 spin_lock(&(pool->lock));
274 pool->hint = pool->start;
280 spin_unlock_irqrestore(&(pool->lock), flags);
281 return IOMMU_MAPPING_ERROR;
287 /* Bump the hint to a new block for small allocs. */
289 /* Don't bump to new block to avoid fragmentation */
292 /* Overflow will be taken care of at the next allocation */
293 pool->hint = (end + tbl->it_blocksize - 1) &
294 ~(tbl->it_blocksize - 1);
297 /* Update handle for SG allocations */
301 spin_unlock_irqrestore(&(pool->lock), flags);
306 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
307 void *page, unsigned int npages,
308 enum dma_data_direction direction,
309 unsigned long mask, unsigned int align_order,
313 dma_addr_t ret = IOMMU_MAPPING_ERROR;
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
318 if (unlikely(entry == IOMMU_MAPPING_ERROR))
319 return IOMMU_MAPPING_ERROR;
321 entry += tbl->it_offset; /* Offset into real TCE table */
322 ret = entry << tbl->it_page_shift; /* Set the return dma address */
324 /* Put the TCEs in the HW table */
325 build_fail = tbl->it_ops->set(tbl, entry, npages,
326 (unsigned long)page &
327 IOMMU_PAGE_MASK(tbl), direction, attrs);
329 /* tbl->it_ops->set() only returns non-zero for transient errors.
330 * Clean up the table bitmap in this case and return
331 * IOMMU_MAPPING_ERROR. For all other errors the functionality is
334 if (unlikely(build_fail)) {
335 __iommu_free(tbl, ret, npages);
336 return IOMMU_MAPPING_ERROR;
339 /* Flush/invalidate TLB caches if necessary */
340 if (tbl->it_ops->flush)
341 tbl->it_ops->flush(tbl);
343 /* Make sure updates are seen by hardware */
349 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
352 unsigned long entry, free_entry;
354 entry = dma_addr >> tbl->it_page_shift;
355 free_entry = entry - tbl->it_offset;
357 if (((free_entry + npages) > tbl->it_size) ||
358 (entry < tbl->it_offset)) {
359 if (printk_ratelimit()) {
360 printk(KERN_INFO "iommu_free: invalid entry\n");
361 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
362 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
377 static struct iommu_pool *get_pool(struct iommu_table *tbl,
380 struct iommu_pool *p;
381 unsigned long largepool_start = tbl->large_pool.start;
383 /* The large pool is the last pool at the top of the table */
384 if (entry >= largepool_start) {
385 p = &tbl->large_pool;
387 unsigned int pool_nr = entry / tbl->poolsize;
389 BUG_ON(pool_nr > tbl->nr_pools);
390 p = &tbl->pools[pool_nr];
396 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
399 unsigned long entry, free_entry;
401 struct iommu_pool *pool;
403 entry = dma_addr >> tbl->it_page_shift;
404 free_entry = entry - tbl->it_offset;
406 pool = get_pool(tbl, free_entry);
408 if (!iommu_free_check(tbl, dma_addr, npages))
411 tbl->it_ops->clear(tbl, entry, npages);
413 spin_lock_irqsave(&(pool->lock), flags);
414 bitmap_clear(tbl->it_map, free_entry, npages);
415 spin_unlock_irqrestore(&(pool->lock), flags);
418 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
421 __iommu_free(tbl, dma_addr, npages);
423 /* Make sure TLB cache is flushed if the HW needs it. We do
424 * not do an mb() here on purpose, it is not needed on any of
425 * the current platforms.
427 if (tbl->it_ops->flush)
428 tbl->it_ops->flush(tbl);
431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
432 struct scatterlist *sglist, int nelems,
433 unsigned long mask, enum dma_data_direction direction,
436 dma_addr_t dma_next = 0, dma_addr;
437 struct scatterlist *s, *outs, *segstart;
438 int outcount, incount, i, build_fail = 0;
440 unsigned long handle;
441 unsigned int max_seg_size;
443 BUG_ON(direction == DMA_NONE);
445 if ((nelems == 0) || !tbl)
448 outs = s = segstart = &sglist[0];
453 /* Init first segment length for backout at failure */
454 outs->dma_length = 0;
456 DBG("sg mapping %d elements:\n", nelems);
458 max_seg_size = dma_get_max_seg_size(dev);
459 for_each_sg(sglist, s, nelems, i) {
460 unsigned long vaddr, npages, entry, slen;
468 /* Allocate iommu entries for that segment */
469 vaddr = (unsigned long) sg_virt(s);
470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
473 (vaddr & ~PAGE_MASK) == 0)
474 align = PAGE_SHIFT - tbl->it_page_shift;
475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
476 mask >> tbl->it_page_shift, align);
478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
481 if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
482 if (!(attrs & DMA_ATTR_NO_WARN) &&
484 dev_info(dev, "iommu_alloc failed, tbl %p "
485 "vaddr %lx npages %lu\n", tbl, vaddr,
490 /* Convert entry to a dma_addr_t */
491 entry += tbl->it_offset;
492 dma_addr = entry << tbl->it_page_shift;
493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
496 npages, entry, dma_addr);
498 /* Insert into HW table */
499 build_fail = tbl->it_ops->set(tbl, entry, npages,
500 vaddr & IOMMU_PAGE_MASK(tbl),
502 if(unlikely(build_fail))
505 /* If we are in an open segment, try merging */
507 DBG(" - trying merge...\n");
508 /* We cannot merge if:
509 * - allocated dma_addr isn't contiguous to previous allocation
511 if (novmerge || (dma_addr != dma_next) ||
512 (outs->dma_length + s->length > max_seg_size)) {
513 /* Can't merge: create a new segment */
516 outs = sg_next(outs);
517 DBG(" can't merge, new segment.\n");
519 outs->dma_length += s->length;
520 DBG(" merged, new len: %ux\n", outs->dma_length);
525 /* This is a new segment, fill entries */
526 DBG(" - filling new segment.\n");
527 outs->dma_address = dma_addr;
528 outs->dma_length = slen;
531 /* Calculate next page pointer for contiguous check */
532 dma_next = dma_addr + slen;
534 DBG(" - dma next is: %lx\n", dma_next);
537 /* Flush/invalidate TLB caches if necessary */
538 if (tbl->it_ops->flush)
539 tbl->it_ops->flush(tbl);
541 DBG("mapped %d elements:\n", outcount);
543 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
544 * next entry of the sglist if we didn't fill the list completely
546 if (outcount < incount) {
547 outs = sg_next(outs);
548 outs->dma_address = IOMMU_MAPPING_ERROR;
549 outs->dma_length = 0;
552 /* Make sure updates are seen by hardware */
558 for_each_sg(sglist, s, nelems, i) {
559 if (s->dma_length != 0) {
560 unsigned long vaddr, npages;
562 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
563 npages = iommu_num_pages(s->dma_address, s->dma_length,
564 IOMMU_PAGE_SIZE(tbl));
565 __iommu_free(tbl, vaddr, npages);
566 s->dma_address = IOMMU_MAPPING_ERROR;
576 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
577 int nelems, enum dma_data_direction direction,
580 struct scatterlist *sg;
582 BUG_ON(direction == DMA_NONE);
590 dma_addr_t dma_handle = sg->dma_address;
592 if (sg->dma_length == 0)
594 npages = iommu_num_pages(dma_handle, sg->dma_length,
595 IOMMU_PAGE_SIZE(tbl));
596 __iommu_free(tbl, dma_handle, npages);
600 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
601 * do not do an mb() here, the affected platforms do not need it
604 if (tbl->it_ops->flush)
605 tbl->it_ops->flush(tbl);
608 static void iommu_table_clear(struct iommu_table *tbl)
611 * In case of firmware assisted dump system goes through clean
612 * reboot process at the time of system crash. Hence it's safe to
613 * clear the TCE entries if firmware assisted dump is active.
615 if (!is_kdump_kernel() || is_fadump_active()) {
616 /* Clear the table in case firmware left allocations in it */
617 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
621 #ifdef CONFIG_CRASH_DUMP
622 if (tbl->it_ops->get) {
623 unsigned long index, tceval, tcecount = 0;
625 /* Reserve the existing mappings left by the first kernel. */
626 for (index = 0; index < tbl->it_size; index++) {
627 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
629 * Freed TCE entry contains 0x7fffffffffffffff on JS20
631 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
632 __set_bit(index, tbl->it_map);
637 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
638 printk(KERN_WARNING "TCE table is full; freeing ");
639 printk(KERN_WARNING "%d entries for the kdump boot\n",
640 KDUMP_MIN_TCE_ENTRIES);
641 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
642 index < tbl->it_size; index++)
643 __clear_bit(index, tbl->it_map);
650 * Build a iommu_table structure. This contains a bit map which
651 * is used to manage allocation of the tce space.
653 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
656 static int welcomed = 0;
659 struct iommu_pool *p;
661 BUG_ON(!tbl->it_ops);
663 /* number of bytes needed for the bitmap */
664 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
666 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
668 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
669 tbl->it_map = page_address(page);
670 memset(tbl->it_map, 0, sz);
673 * Reserve page 0 so it will not be used for any mappings.
674 * This avoids buggy drivers that consider page 0 to be invalid
675 * to crash the machine or even lose data.
677 if (tbl->it_offset == 0)
678 set_bit(0, tbl->it_map);
680 /* We only split the IOMMU table if we have 1GB or more of space */
681 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
682 tbl->nr_pools = IOMMU_NR_POOLS;
686 /* We reserve the top 1/4 of the table for large allocations */
687 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
689 for (i = 0; i < tbl->nr_pools; i++) {
691 spin_lock_init(&(p->lock));
692 p->start = tbl->poolsize * i;
694 p->end = p->start + tbl->poolsize;
697 p = &tbl->large_pool;
698 spin_lock_init(&(p->lock));
699 p->start = tbl->poolsize * i;
701 p->end = tbl->it_size;
703 iommu_table_clear(tbl);
706 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
707 novmerge ? "disabled" : "enabled");
714 static void iommu_table_free(struct kref *kref)
716 unsigned long bitmap_sz;
718 struct iommu_table *tbl;
720 tbl = container_of(kref, struct iommu_table, it_kref);
722 if (tbl->it_ops->free)
723 tbl->it_ops->free(tbl);
731 * In case we have reserved the first bit, we should not emit
734 if (tbl->it_offset == 0)
735 clear_bit(0, tbl->it_map);
737 /* verify that table contains no entries */
738 if (!bitmap_empty(tbl->it_map, tbl->it_size))
739 pr_warn("%s: Unexpected TCEs\n", __func__);
741 /* calculate bitmap size in bytes */
742 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
745 order = get_order(bitmap_sz);
746 free_pages((unsigned long) tbl->it_map, order);
752 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
754 if (kref_get_unless_zero(&tbl->it_kref))
759 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
761 int iommu_tce_table_put(struct iommu_table *tbl)
766 return kref_put(&tbl->it_kref, iommu_table_free);
768 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
770 /* Creates TCEs for a user provided buffer. The user buffer must be
771 * contiguous real kernel storage (not vmalloc). The address passed here
772 * comprises a page address and offset into that page. The dma_addr_t
773 * returned will point to the same byte within the page as was passed in.
775 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
776 struct page *page, unsigned long offset, size_t size,
777 unsigned long mask, enum dma_data_direction direction,
780 dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
783 unsigned int npages, align;
785 BUG_ON(direction == DMA_NONE);
787 vaddr = page_address(page) + offset;
788 uaddr = (unsigned long)vaddr;
789 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
793 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
794 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
795 align = PAGE_SHIFT - tbl->it_page_shift;
797 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
798 mask >> tbl->it_page_shift, align,
800 if (dma_handle == IOMMU_MAPPING_ERROR) {
801 if (!(attrs & DMA_ATTR_NO_WARN) &&
802 printk_ratelimit()) {
803 dev_info(dev, "iommu_alloc failed, tbl %p "
804 "vaddr %p npages %d\n", tbl, vaddr,
808 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
814 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
815 size_t size, enum dma_data_direction direction,
820 BUG_ON(direction == DMA_NONE);
823 npages = iommu_num_pages(dma_handle, size,
824 IOMMU_PAGE_SIZE(tbl));
825 iommu_free(tbl, dma_handle, npages);
829 /* Allocates a contiguous real buffer and creates mappings over it.
830 * Returns the virtual address of the buffer and sets dma_handle
831 * to the dma address (mapping) of the first page.
833 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
834 size_t size, dma_addr_t *dma_handle,
835 unsigned long mask, gfp_t flag, int node)
840 unsigned int nio_pages, io_order;
843 size = PAGE_ALIGN(size);
844 order = get_order(size);
847 * Client asked for way too much space. This is checked later
848 * anyway. It is easier to debug here for the drivers than in
851 if (order >= IOMAP_MAX_ORDER) {
852 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
860 /* Alloc enough pages (and possibly more) */
861 page = alloc_pages_node(node, flag, order);
864 ret = page_address(page);
865 memset(ret, 0, size);
867 /* Set up tces to cover the allocated range */
868 nio_pages = size >> tbl->it_page_shift;
869 io_order = get_iommu_order(size, tbl);
870 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
871 mask >> tbl->it_page_shift, io_order, 0);
872 if (mapping == IOMMU_MAPPING_ERROR) {
873 free_pages((unsigned long)ret, order);
876 *dma_handle = mapping;
880 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
881 void *vaddr, dma_addr_t dma_handle)
884 unsigned int nio_pages;
886 size = PAGE_ALIGN(size);
887 nio_pages = size >> tbl->it_page_shift;
888 iommu_free(tbl, dma_handle, nio_pages);
889 size = PAGE_ALIGN(size);
890 free_pages((unsigned long)vaddr, get_order(size));
894 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
897 case DMA_BIDIRECTIONAL:
898 return TCE_PCI_READ | TCE_PCI_WRITE;
899 case DMA_FROM_DEVICE:
900 return TCE_PCI_WRITE;
907 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
909 #ifdef CONFIG_IOMMU_API
913 static void group_release(void *iommu_data)
915 struct iommu_table_group *table_group = iommu_data;
917 table_group->group = NULL;
920 void iommu_register_group(struct iommu_table_group *table_group,
921 int pci_domain_number, unsigned long pe_num)
923 struct iommu_group *grp;
926 grp = iommu_group_alloc();
928 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
932 table_group->group = grp;
933 iommu_group_set_iommudata(grp, table_group, group_release);
934 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
935 pci_domain_number, pe_num);
938 iommu_group_set_name(grp, name);
942 enum dma_data_direction iommu_tce_direction(unsigned long tce)
944 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
945 return DMA_BIDIRECTIONAL;
946 else if (tce & TCE_PCI_READ)
947 return DMA_TO_DEVICE;
948 else if (tce & TCE_PCI_WRITE)
949 return DMA_FROM_DEVICE;
953 EXPORT_SYMBOL_GPL(iommu_tce_direction);
955 void iommu_flush_tce(struct iommu_table *tbl)
957 /* Flush/invalidate TLB caches if necessary */
958 if (tbl->it_ops->flush)
959 tbl->it_ops->flush(tbl);
961 /* Make sure updates are seen by hardware */
964 EXPORT_SYMBOL_GPL(iommu_flush_tce);
966 int iommu_tce_check_ioba(unsigned long page_shift,
967 unsigned long offset, unsigned long size,
968 unsigned long ioba, unsigned long npages)
970 unsigned long mask = (1UL << page_shift) - 1;
979 if ((ioba + 1) > (offset + size))
984 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
986 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
988 unsigned long mask = (1UL << page_shift) - 1;
995 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
997 long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
998 unsigned long *hpa, enum dma_data_direction *direction)
1002 ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
1004 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1005 (*direction == DMA_BIDIRECTIONAL)))
1006 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1008 /* if (unlikely(ret))
1009 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1010 __func__, hwaddr, entry << tbl->it_page_shift,
1015 EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1017 #ifdef CONFIG_PPC_BOOK3S_64
1018 long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
1019 unsigned long *hpa, enum dma_data_direction *direction)
1023 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1025 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1026 (*direction == DMA_BIDIRECTIONAL))) {
1027 struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
1032 tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1039 EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
1042 int iommu_take_ownership(struct iommu_table *tbl)
1044 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1048 * VFIO does not control TCE entries allocation and the guest
1049 * can write new TCEs on top of existing ones so iommu_tce_build()
1050 * must be able to release old pages. This functionality
1051 * requires exchange() callback defined so if it is not
1052 * implemented, we disallow taking ownership over the table.
1054 if (!tbl->it_ops->exchange)
1057 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1058 for (i = 0; i < tbl->nr_pools; i++)
1059 spin_lock(&tbl->pools[i].lock);
1061 if (tbl->it_offset == 0)
1062 clear_bit(0, tbl->it_map);
1064 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1065 pr_err("iommu_tce: it_map is not empty");
1067 /* Restore bit#0 set by iommu_init_table() */
1068 if (tbl->it_offset == 0)
1069 set_bit(0, tbl->it_map);
1071 memset(tbl->it_map, 0xff, sz);
1074 for (i = 0; i < tbl->nr_pools; i++)
1075 spin_unlock(&tbl->pools[i].lock);
1076 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1080 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1082 void iommu_release_ownership(struct iommu_table *tbl)
1084 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1086 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1087 for (i = 0; i < tbl->nr_pools; i++)
1088 spin_lock(&tbl->pools[i].lock);
1090 memset(tbl->it_map, 0, sz);
1092 /* Restore bit#0 set by iommu_init_table() */
1093 if (tbl->it_offset == 0)
1094 set_bit(0, tbl->it_map);
1096 for (i = 0; i < tbl->nr_pools; i++)
1097 spin_unlock(&tbl->pools[i].lock);
1098 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1100 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1102 int iommu_add_device(struct device *dev)
1104 struct iommu_table *tbl;
1105 struct iommu_table_group_link *tgl;
1108 * The sysfs entries should be populated before
1109 * binding IOMMU group. If sysfs entries isn't
1110 * ready, we simply bail.
1112 if (!device_is_registered(dev))
1115 if (dev->iommu_group) {
1116 pr_debug("%s: Skipping device %s with iommu group %d\n",
1117 __func__, dev_name(dev),
1118 iommu_group_id(dev->iommu_group));
1122 tbl = get_iommu_table_base(dev);
1124 pr_debug("%s: Skipping device %s with no tbl\n",
1125 __func__, dev_name(dev));
1129 tgl = list_first_entry_or_null(&tbl->it_group_list,
1130 struct iommu_table_group_link, next);
1132 pr_debug("%s: Skipping device %s with no group\n",
1133 __func__, dev_name(dev));
1136 pr_debug("%s: Adding %s to iommu group %d\n",
1137 __func__, dev_name(dev),
1138 iommu_group_id(tgl->table_group->group));
1140 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1141 pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1142 __func__, IOMMU_PAGE_SIZE(tbl),
1143 PAGE_SIZE, dev_name(dev));
1147 return iommu_group_add_device(tgl->table_group->group, dev);
1149 EXPORT_SYMBOL_GPL(iommu_add_device);
1151 void iommu_del_device(struct device *dev)
1154 * Some devices might not have IOMMU table and group
1155 * and we needn't detach them from the associated
1158 if (!dev->iommu_group) {
1159 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1164 iommu_group_remove_device(dev);
1166 EXPORT_SYMBOL_GPL(iommu_del_device);
1168 static int tce_iommu_bus_notifier(struct notifier_block *nb,
1169 unsigned long action, void *data)
1171 struct device *dev = data;
1174 case BUS_NOTIFY_ADD_DEVICE:
1175 return iommu_add_device(dev);
1176 case BUS_NOTIFY_DEL_DEVICE:
1177 if (dev->iommu_group)
1178 iommu_del_device(dev);
1185 static struct notifier_block tce_iommu_bus_nb = {
1186 .notifier_call = tce_iommu_bus_notifier,
1189 int __init tce_iommu_bus_notifier_init(void)
1191 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1194 #endif /* CONFIG_IOMMU_API */