1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/iommu-helper.h>
15 #include <linux/bitmap.h>
16 #include <linux/iommu-common.h>
19 #include <linux/pci.h>
22 #include <asm/iommu.h>
24 #include "iommu_common.h"
27 #define STC_CTXMATCH_ADDR(STC, CTX) \
28 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
29 #define STC_FLUSHFLAG_INIT(STC) \
30 (*((STC)->strbuf_flushflag) = 0UL)
31 #define STC_FLUSHFLAG_SET(STC) \
32 (*((STC)->strbuf_flushflag) != 0UL)
34 #define iommu_read(__reg) \
36 __asm__ __volatile__("ldxa [%1] %2, %0" \
38 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
42 #define iommu_write(__reg, __val) \
43 __asm__ __volatile__("stxa %0, [%1] %2" \
45 : "r" (__val), "r" (__reg), \
46 "i" (ASI_PHYS_BYPASS_EC_E))
48 /* Must be invoked under the IOMMU lock. */
49 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
51 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
52 if (iommu->iommu_flushinv) {
53 iommu_write(iommu->iommu_flushinv, ~(u64)0);
58 tag = iommu->iommu_tags;
59 for (entry = 0; entry < 16; entry++) {
64 /* Ensure completion of previous PIO writes. */
65 (void) iommu_read(iommu->write_complete_reg);
69 #define IOPTE_CONSISTENT(CTX) \
70 (IOPTE_VALID | IOPTE_CACHE | \
71 (((CTX) << 47) & IOPTE_CONTEXT))
73 #define IOPTE_STREAMING(CTX) \
74 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
76 /* Existing mappings are never marked invalid, instead they
77 * are pointed to a dummy page.
79 #define IOPTE_IS_DUMMY(iommu, iopte) \
80 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
82 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
84 unsigned long val = iopte_val(*iopte);
87 val |= iommu->dummy_page_pa;
89 iopte_val(*iopte) = val;
92 int iommu_table_init(struct iommu *iommu, int tsbsize,
93 u32 dma_offset, u32 dma_addr_mask,
96 unsigned long i, order, sz, num_tsb_entries;
99 num_tsb_entries = tsbsize / sizeof(iopte_t);
101 /* Setup initial software IOMMU state. */
102 spin_lock_init(&iommu->lock);
103 iommu->ctx_lowest_free = 1;
104 iommu->tbl.table_map_base = dma_offset;
105 iommu->dma_addr_mask = dma_addr_mask;
107 /* Allocate and initialize the free area map. */
108 sz = num_tsb_entries / 8;
109 sz = (sz + 7UL) & ~7UL;
110 iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
113 memset(iommu->tbl.map, 0, sz);
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116 (tlb_type != hypervisor ? iommu_flushall : NULL),
119 /* Allocate and initialize the dummy page which we
120 * set inactive IO PTEs to point to.
122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
127 iommu->dummy_page = (unsigned long) page_address(page);
128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
131 /* Now allocate and setup the IOMMU page table itself. */
132 order = get_order(tsbsize);
133 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136 goto out_free_dummy_page;
138 iommu->page_table = (iopte_t *)page_address(page);
140 for (i = 0; i < num_tsb_entries; i++)
141 iopte_make_dummy(iommu, &iommu->page_table[i]);
146 free_page(iommu->dummy_page);
147 iommu->dummy_page = 0UL;
150 kfree(iommu->tbl.map);
151 iommu->tbl.map = NULL;
156 static inline iopte_t *alloc_npages(struct device *dev,
158 unsigned long npages)
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163 (unsigned long)(-1), 0);
164 if (unlikely(entry == IOMMU_ERROR_CODE))
167 return iommu->page_table + entry;
170 static int iommu_alloc_ctx(struct iommu *iommu)
172 int lowest = iommu->ctx_lowest_free;
173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
175 if (unlikely(n == IOMMU_NUM_CTXS)) {
176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177 if (unlikely(n == lowest)) {
178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
183 __set_bit(n, iommu->ctx_bitmap);
188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
191 __clear_bit(ctx, iommu->ctx_bitmap);
192 if (ctx < iommu->ctx_lowest_free)
193 iommu->ctx_lowest_free = ctx;
197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198 dma_addr_t *dma_addrp, gfp_t gfp,
201 unsigned long order, first_page;
208 size = IO_PAGE_ALIGN(size);
209 order = get_order(size);
213 nid = dev->archdata.numa_node;
214 page = alloc_pages_node(nid, gfp, order);
218 first_page = (unsigned long) page_address(page);
219 memset((char *)first_page, 0, PAGE_SIZE << order);
221 iommu = dev->archdata.iommu;
223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
225 if (unlikely(iopte == NULL)) {
226 free_pages(first_page, order);
230 *dma_addrp = (iommu->tbl.table_map_base +
231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232 ret = (void *) first_page;
233 npages = size >> IO_PAGE_SHIFT;
234 first_page = __pa(first_page);
236 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
238 (first_page & IOPTE_PAGE));
240 first_page += IO_PAGE_SIZE;
246 static void dma_4u_free_coherent(struct device *dev, size_t size,
247 void *cpu, dma_addr_t dvma,
251 unsigned long order, npages;
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254 iommu = dev->archdata.iommu;
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
258 order = get_order(size);
260 free_pages((unsigned long)cpu, order);
263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264 unsigned long offset, size_t sz,
265 enum dma_data_direction direction,
269 struct strbuf *strbuf;
271 unsigned long flags, npages, oaddr;
272 unsigned long i, base_paddr, ctx;
274 unsigned long iopte_protection;
276 iommu = dev->archdata.iommu;
277 strbuf = dev->archdata.stc;
279 if (unlikely(direction == DMA_NONE))
282 oaddr = (unsigned long)(page_address(page) + offset);
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284 npages >>= IO_PAGE_SHIFT;
286 base = alloc_npages(dev, iommu, npages);
287 spin_lock_irqsave(&iommu->lock, flags);
289 if (iommu->iommu_ctxflush)
290 ctx = iommu_alloc_ctx(iommu);
291 spin_unlock_irqrestore(&iommu->lock, flags);
296 bus_addr = (iommu->tbl.table_map_base +
297 ((base - iommu->page_table) << IO_PAGE_SHIFT));
298 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299 base_paddr = __pa(oaddr & IO_PAGE_MASK);
300 if (strbuf->strbuf_enabled)
301 iopte_protection = IOPTE_STREAMING(ctx);
303 iopte_protection = IOPTE_CONSISTENT(ctx);
304 if (direction != DMA_TO_DEVICE)
305 iopte_protection |= IOPTE_WRITE;
307 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308 iopte_val(*base) = iopte_protection | base_paddr;
313 iommu_free_ctx(iommu, ctx);
315 if (printk_ratelimit())
317 return DMA_ERROR_CODE;
320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321 u32 vaddr, unsigned long ctx, unsigned long npages,
322 enum dma_data_direction direction)
326 if (strbuf->strbuf_ctxflush &&
327 iommu->iommu_ctxflush) {
328 unsigned long matchreg, flushreg;
331 flushreg = strbuf->strbuf_ctxflush;
332 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
334 iommu_write(flushreg, ctx);
335 val = iommu_read(matchreg);
342 iommu_write(flushreg, ctx);
345 val = iommu_read(matchreg);
347 printk(KERN_WARNING "strbuf_flush: ctx flush "
348 "timeout matchreg[%llx] ctx[%lx]\n",
356 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357 iommu_write(strbuf->strbuf_pflush, vaddr);
361 /* If the device could not have possibly put dirty data into
362 * the streaming cache, no flush-flag synchronization needs
365 if (direction == DMA_TO_DEVICE)
368 STC_FLUSHFLAG_INIT(strbuf);
369 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370 (void) iommu_read(iommu->write_complete_reg);
373 while (!STC_FLUSHFLAG_SET(strbuf)) {
381 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387 size_t sz, enum dma_data_direction direction,
391 struct strbuf *strbuf;
393 unsigned long flags, npages, ctx, i;
395 if (unlikely(direction == DMA_NONE)) {
396 if (printk_ratelimit())
401 iommu = dev->archdata.iommu;
402 strbuf = dev->archdata.stc;
404 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405 npages >>= IO_PAGE_SHIFT;
406 base = iommu->page_table +
407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408 bus_addr &= IO_PAGE_MASK;
410 spin_lock_irqsave(&iommu->lock, flags);
412 /* Record the context, if any. */
414 if (iommu->iommu_ctxflush)
415 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
417 /* Step 1: Kick data out of streaming buffers if necessary. */
418 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
419 strbuf_flush(strbuf, iommu, bus_addr, ctx,
422 /* Step 2: Clear out TSB entries. */
423 for (i = 0; i < npages; i++)
424 iopte_make_dummy(iommu, base + i);
426 iommu_free_ctx(iommu, ctx);
427 spin_unlock_irqrestore(&iommu->lock, flags);
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433 int nelems, enum dma_data_direction direction,
436 struct scatterlist *s, *outs, *segstart;
437 unsigned long flags, handle, prot, ctx;
438 dma_addr_t dma_next = 0, dma_addr;
439 unsigned int max_seg_size;
440 unsigned long seg_boundary_size;
441 int outcount, incount, i;
442 struct strbuf *strbuf;
444 unsigned long base_shift;
446 BUG_ON(direction == DMA_NONE);
448 iommu = dev->archdata.iommu;
449 strbuf = dev->archdata.stc;
450 if (nelems == 0 || !iommu)
453 spin_lock_irqsave(&iommu->lock, flags);
456 if (iommu->iommu_ctxflush)
457 ctx = iommu_alloc_ctx(iommu);
459 if (strbuf->strbuf_enabled)
460 prot = IOPTE_STREAMING(ctx);
462 prot = IOPTE_CONSISTENT(ctx);
463 if (direction != DMA_TO_DEVICE)
466 outs = s = segstart = &sglist[0];
471 /* Init first segment length for backout at failure */
472 outs->dma_length = 0;
474 max_seg_size = dma_get_max_seg_size(dev);
475 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
476 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
477 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
478 for_each_sg(sglist, s, nelems, i) {
479 unsigned long paddr, npages, entry, out_entry = 0, slen;
488 /* Allocate iommu entries for that segment */
489 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
490 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
491 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
492 &handle, (unsigned long)(-1), 0);
495 if (unlikely(entry == IOMMU_ERROR_CODE)) {
496 if (printk_ratelimit())
497 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
498 " npages %lx\n", iommu, paddr, npages);
499 goto iommu_map_failed;
502 base = iommu->page_table + entry;
504 /* Convert entry to a dma_addr_t */
505 dma_addr = iommu->tbl.table_map_base +
506 (entry << IO_PAGE_SHIFT);
507 dma_addr |= (s->offset & ~IO_PAGE_MASK);
509 /* Insert into HW table */
510 paddr &= IO_PAGE_MASK;
512 iopte_val(*base) = prot | paddr;
514 paddr += IO_PAGE_SIZE;
517 /* If we are in an open segment, try merging */
519 /* We cannot merge if:
520 * - allocated dma_addr isn't contiguous to previous allocation
522 if ((dma_addr != dma_next) ||
523 (outs->dma_length + s->length > max_seg_size) ||
524 (is_span_boundary(out_entry, base_shift,
525 seg_boundary_size, outs, s))) {
526 /* Can't merge: create a new segment */
529 outs = sg_next(outs);
531 outs->dma_length += s->length;
536 /* This is a new segment, fill entries */
537 outs->dma_address = dma_addr;
538 outs->dma_length = slen;
542 /* Calculate next page pointer for contiguous check */
543 dma_next = dma_addr + slen;
546 spin_unlock_irqrestore(&iommu->lock, flags);
548 if (outcount < incount) {
549 outs = sg_next(outs);
550 outs->dma_address = DMA_ERROR_CODE;
551 outs->dma_length = 0;
557 for_each_sg(sglist, s, nelems, i) {
558 if (s->dma_length != 0) {
559 unsigned long vaddr, npages, entry, j;
562 vaddr = s->dma_address & IO_PAGE_MASK;
563 npages = iommu_num_pages(s->dma_address, s->dma_length,
566 entry = (vaddr - iommu->tbl.table_map_base)
568 base = iommu->page_table + entry;
570 for (j = 0; j < npages; j++)
571 iopte_make_dummy(iommu, base + j);
573 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
576 s->dma_address = DMA_ERROR_CODE;
582 spin_unlock_irqrestore(&iommu->lock, flags);
587 /* If contexts are being used, they are the same in all of the mappings
588 * we make for a particular SG.
590 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
592 unsigned long ctx = 0;
594 if (iommu->iommu_ctxflush) {
597 struct iommu_map_table *tbl = &iommu->tbl;
599 bus_addr = sg->dma_address & IO_PAGE_MASK;
600 base = iommu->page_table +
601 ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
603 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
608 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
609 int nelems, enum dma_data_direction direction,
612 unsigned long flags, ctx;
613 struct scatterlist *sg;
614 struct strbuf *strbuf;
617 BUG_ON(direction == DMA_NONE);
619 iommu = dev->archdata.iommu;
620 strbuf = dev->archdata.stc;
622 ctx = fetch_sg_ctx(iommu, sglist);
624 spin_lock_irqsave(&iommu->lock, flags);
628 dma_addr_t dma_handle = sg->dma_address;
629 unsigned int len = sg->dma_length;
630 unsigned long npages, entry;
636 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
638 entry = ((dma_handle - iommu->tbl.table_map_base)
640 base = iommu->page_table + entry;
642 dma_handle &= IO_PAGE_MASK;
643 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
644 strbuf_flush(strbuf, iommu, dma_handle, ctx,
647 for (i = 0; i < npages; i++)
648 iopte_make_dummy(iommu, base + i);
650 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
655 iommu_free_ctx(iommu, ctx);
657 spin_unlock_irqrestore(&iommu->lock, flags);
660 static void dma_4u_sync_single_for_cpu(struct device *dev,
661 dma_addr_t bus_addr, size_t sz,
662 enum dma_data_direction direction)
665 struct strbuf *strbuf;
666 unsigned long flags, ctx, npages;
668 iommu = dev->archdata.iommu;
669 strbuf = dev->archdata.stc;
671 if (!strbuf->strbuf_enabled)
674 spin_lock_irqsave(&iommu->lock, flags);
676 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
677 npages >>= IO_PAGE_SHIFT;
678 bus_addr &= IO_PAGE_MASK;
680 /* Step 1: Record the context, if any. */
682 if (iommu->iommu_ctxflush &&
683 strbuf->strbuf_ctxflush) {
685 struct iommu_map_table *tbl = &iommu->tbl;
687 iopte = iommu->page_table +
688 ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
689 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
692 /* Step 2: Kick data out of streaming buffers. */
693 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
695 spin_unlock_irqrestore(&iommu->lock, flags);
698 static void dma_4u_sync_sg_for_cpu(struct device *dev,
699 struct scatterlist *sglist, int nelems,
700 enum dma_data_direction direction)
703 struct strbuf *strbuf;
704 unsigned long flags, ctx, npages, i;
705 struct scatterlist *sg, *sgprv;
708 iommu = dev->archdata.iommu;
709 strbuf = dev->archdata.stc;
711 if (!strbuf->strbuf_enabled)
714 spin_lock_irqsave(&iommu->lock, flags);
716 /* Step 1: Record the context, if any. */
718 if (iommu->iommu_ctxflush &&
719 strbuf->strbuf_ctxflush) {
721 struct iommu_map_table *tbl = &iommu->tbl;
723 iopte = iommu->page_table + ((sglist[0].dma_address -
724 tbl->table_map_base) >> IO_PAGE_SHIFT);
725 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
728 /* Step 2: Kick data out of streaming buffers. */
729 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
731 for_each_sg(sglist, sg, nelems, i) {
732 if (sg->dma_length == 0)
737 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
738 - bus_addr) >> IO_PAGE_SHIFT;
739 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
741 spin_unlock_irqrestore(&iommu->lock, flags);
744 static struct dma_map_ops sun4u_dma_ops = {
745 .alloc = dma_4u_alloc_coherent,
746 .free = dma_4u_free_coherent,
747 .map_page = dma_4u_map_page,
748 .unmap_page = dma_4u_unmap_page,
749 .map_sg = dma_4u_map_sg,
750 .unmap_sg = dma_4u_unmap_sg,
751 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
752 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
755 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
756 EXPORT_SYMBOL(dma_ops);
758 int dma_supported(struct device *dev, u64 device_mask)
760 struct iommu *iommu = dev->archdata.iommu;
761 u64 dma_addr_mask = iommu->dma_addr_mask;
763 if (device_mask > DMA_BIT_MASK(32)) {
765 dma_addr_mask = iommu->atu->dma_addr_mask;
770 if ((device_mask & dma_addr_mask) == dma_addr_mask)
775 return pci64_dma_supported(to_pci_dev(dev), device_mask);
780 EXPORT_SYMBOL(dma_supported);