2 * ioport.c: Simple io mapping allocator.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
25 * <zaitcev> Sounds reasonable
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/ioport.h>
35 #include <linux/slab.h>
36 #include <linux/pci.h> /* struct pci_dev */
37 #include <linux/proc_fs.h>
38 #include <linux/scatterlist.h>
39 #include <linux/of_device.h>
42 #include <asm/vaddrs.h>
43 #include <asm/oplib.h>
47 #include <asm/pgalloc.h>
49 #include <asm/iommu.h>
50 #include <asm/io-unit.h>
54 #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
56 static struct resource *_sparc_find_resource(struct resource *r,
59 static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
60 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
61 unsigned long size, char *name);
62 static void _sparc_free_io(struct resource *res);
64 static void register_proc_sparc_ioport(void);
66 /* This points to the next to use virtual memory for DVMA mappings */
67 static struct resource _sparc_dvma = {
68 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
70 /* This points to the start of I/O mappings, cluable from outside. */
71 /*ext*/ struct resource sparc_iomap = {
72 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
76 * Our mini-allocator...
77 * Boy this is gross! We need it because we must map I/O for
78 * timers and interrupt controller before the kmalloc is available.
82 #define XNRES 10 /* SS-10 uses 8 */
85 struct resource xres; /* Must be first */
86 int xflag; /* 1 == used */
90 static struct xresource xresv[XNRES];
92 static struct xresource *xres_alloc(void) {
93 struct xresource *xrp;
97 for (n = 0; n < XNRES; n++) {
98 if (xrp->xflag == 0) {
107 static void xres_free(struct xresource *xrp) {
112 * These are typically used in PCI drivers
113 * which are trying to be cross-platform.
115 * Bus type is always zero on IIep.
117 void __iomem *ioremap(unsigned long offset, unsigned long size)
121 sprintf(name, "phys_%08x", (u32)offset);
122 return _sparc_alloc_io(0, offset, size, name);
126 * Comlimentary to ioremap().
128 void iounmap(volatile void __iomem *virtual)
130 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
131 struct resource *res;
133 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
134 printk("free_io/iounmap: cannot free %lx\n", vaddr);
139 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
140 xres_free((struct xresource *)res);
146 void __iomem *of_ioremap(struct resource *res, unsigned long offset,
147 unsigned long size, char *name)
149 return _sparc_alloc_io(res->flags & 0xF,
153 EXPORT_SYMBOL(of_ioremap);
155 void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
159 EXPORT_SYMBOL(of_iounmap);
164 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
165 unsigned long size, char *name)
167 static int printed_full;
168 struct xresource *xres;
169 struct resource *res;
172 void __iomem *va; /* P3 diag */
174 if (name == NULL) name = "???";
176 if ((xres = xres_alloc()) != 0) {
181 printk("ioremap: done with statics, switching to malloc\n");
185 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
186 if (tack == NULL) return NULL;
187 memset(tack, 0, sizeof(struct resource));
188 res = (struct resource *) tack;
189 tack += sizeof (struct resource);
192 strlcpy(tack, name, XNMLN+1);
195 va = _sparc_ioremap(res, busno, phys, size);
196 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
202 static void __iomem *
203 _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
205 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
207 if (allocate_resource(&sparc_iomap, res,
208 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
209 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
210 /* Usually we cannot see printks in this case. */
211 prom_printf("alloc_io_res(%s): cannot occupy\n",
212 (res->name != NULL)? res->name: "???");
217 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
219 return (void __iomem *)(unsigned long)(res->start + offset);
223 * Comlimentary to _sparc_ioremap().
225 static void _sparc_free_io(struct resource *res)
229 plen = res->end - res->start + 1;
230 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
231 sparc_unmapiorange(res->start, plen);
232 release_resource(res);
237 void sbus_set_sbus64(struct device *dev, int x)
239 printk("sbus_set_sbus64: unsupported\n");
243 * Allocate a chunk of memory suitable for DMA.
244 * Typically devices use them for control blocks.
245 * CPU may access them without any explicit flushing.
247 void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
249 struct of_device *op = to_of_device(dev);
250 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
252 struct resource *res;
255 /* XXX why are some lengths signed, others unsigned? */
259 /* XXX So what is maxphys for us and how do drivers know it? */
260 if (len > 256*1024) { /* __get_free_pages() limit */
264 order = get_order(len_total);
265 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
268 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
271 if (allocate_resource(&_sparc_dvma, res, len_total,
272 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
273 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
276 mmu_inval_dma_area(va, len_total);
277 // XXX The mmu_map_dma_area does this for us below, see comments.
278 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
280 * XXX That's where sdev would be used. Currently we load
281 * all iommu tables with the same translations.
283 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
286 res->name = op->node->name;
288 return (void *)(unsigned long)res->start;
291 release_resource(res);
293 free_pages(va, order);
300 void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
302 struct resource *res;
305 if ((res = _sparc_find_resource(&_sparc_dvma,
306 (unsigned long)p)) == NULL) {
307 printk("sbus_free_consistent: cannot free %p\n", p);
311 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
312 printk("sbus_free_consistent: unaligned va %p\n", p);
316 n = (n + PAGE_SIZE-1) & PAGE_MASK;
317 if ((res->end-res->start)+1 != n) {
318 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
319 (long)((res->end-res->start)+1), n);
323 release_resource(res);
326 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
327 pgv = virt_to_page(p);
328 mmu_unmap_dma_area(dev, ba, n);
330 __free_pages(pgv, get_order(n));
334 * Map a chunk of memory so that devices can see it.
335 * CPU view of this memory may be inconsistent with
336 * a device view and explicit flushing is necessary.
338 dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
340 /* XXX why are some lengths signed, others unsigned? */
344 /* XXX So what is maxphys for us and how do drivers know it? */
345 if (len > 256*1024) { /* __get_free_pages() limit */
348 return mmu_get_scsi_one(dev, va, len);
351 void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
353 mmu_release_scsi_one(dev, ba, n);
356 int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
358 mmu_get_scsi_sgl(dev, sg, n);
361 * XXX sparc64 can return a partial length here. sun4c should do this
362 * but it currently panics if it can't fulfill the request - Anton
367 void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
369 mmu_release_scsi_sgl(dev, sg, n);
372 void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
376 void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
380 /* Support code for sbus_init(). */
381 void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
384 struct device_node *parent = dp->parent;
386 if (sparc_cpu_model != sun4d &&
388 !strcmp(parent->name, "iommu"))
389 iommu_init(parent, sbus);
391 if (sparc_cpu_model == sun4d)
396 int __init sbus_arch_preinit(void)
398 register_proc_sparc_ioport();
402 extern void sun4_dvma_init(void);
411 void __init sbus_arch_postinit(void)
413 if (sparc_cpu_model == sun4d) {
414 extern void sun4d_init_sbi_irq(void);
415 sun4d_init_sbi_irq();
418 #endif /* CONFIG_SBUS */
422 /* Allocate and map kernel buffer using consistent mode DMA for a device.
423 * hwdev should be valid struct pci_dev pointer for PCI devices.
425 void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
427 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
429 struct resource *res;
435 if (len > 256*1024) { /* __get_free_pages() limit */
439 order = get_order(len_total);
440 va = __get_free_pages(GFP_KERNEL, order);
442 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
446 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
447 free_pages(va, order);
448 printk("pci_alloc_consistent: no core\n");
452 if (allocate_resource(&_sparc_dvma, res, len_total,
453 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
454 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
455 free_pages(va, order);
459 mmu_inval_dma_area(va, len_total);
461 /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
462 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
464 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
466 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
467 return (void *) res->start;
470 /* Free and unmap a consistent DMA buffer.
471 * cpu_addr is what was returned from pci_alloc_consistent,
472 * size must be the same as what as passed into pci_alloc_consistent,
473 * and likewise dma_addr must be the same as what *dma_addrp was set to.
475 * References to the memory and mappings associated with cpu_addr/dma_addr
476 * past this call are illegal.
478 void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
480 struct resource *res;
483 if ((res = _sparc_find_resource(&_sparc_dvma,
484 (unsigned long)p)) == NULL) {
485 printk("pci_free_consistent: cannot free %p\n", p);
489 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
490 printk("pci_free_consistent: unaligned va %p\n", p);
494 n = (n + PAGE_SIZE-1) & PAGE_MASK;
495 if ((res->end-res->start)+1 != n) {
496 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
497 (long)((res->end-res->start)+1), (long)n);
501 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
502 mmu_inval_dma_area(pgp, n);
503 sparc_unmapiorange((unsigned long)p, n);
505 release_resource(res);
508 free_pages(pgp, get_order(n));
511 /* Map a single buffer of the indicated size for DMA in streaming mode.
512 * The 32-bit bus address to use is returned.
514 * Once the device is given the dma address, the device owns this memory
515 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
517 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
520 BUG_ON(direction == PCI_DMA_NONE);
521 /* IIep is write-through, not flushing. */
522 return virt_to_phys(ptr);
525 /* Unmap a single streaming mode DMA translation. The dma_addr and size
526 * must match what was provided for in a previous pci_map_single call. All
527 * other usages are undefined.
529 * After this call, reads by the cpu to the buffer are guaranteed to see
530 * whatever the device wrote there.
532 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
535 BUG_ON(direction == PCI_DMA_NONE);
536 if (direction != PCI_DMA_TODEVICE) {
537 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
538 (size + PAGE_SIZE-1) & PAGE_MASK);
543 * Same as pci_map_single, but with pages.
545 dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
546 unsigned long offset, size_t size, int direction)
548 BUG_ON(direction == PCI_DMA_NONE);
549 /* IIep is write-through, not flushing. */
550 return page_to_phys(page) + offset;
553 void pci_unmap_page(struct pci_dev *hwdev,
554 dma_addr_t dma_address, size_t size, int direction)
556 BUG_ON(direction == PCI_DMA_NONE);
557 /* mmu_inval_dma_area XXX */
560 /* Map a set of buffers described by scatterlist in streaming
561 * mode for DMA. This is the scather-gather version of the
562 * above pci_map_single interface. Here the scatter gather list
563 * elements are each tagged with the appropriate dma address
564 * and length. They are obtained via sg_dma_{address,length}(SG).
566 * NOTE: An implementation may be able to use a smaller number of
567 * DMA address/length pairs than there are SG table elements.
568 * (for example via virtual mapping capabilities)
569 * The routine returns the number of addr/length pairs actually
570 * used, at most nents.
572 * Device ownership issues as mentioned above for pci_map_single are
575 int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
578 struct scatterlist *sg;
581 BUG_ON(direction == PCI_DMA_NONE);
582 /* IIep is write-through, not flushing. */
583 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL);
585 sg->dvma_address = virt_to_phys(sg_virt(sg));
586 sg->dvma_length = sg->length;
591 /* Unmap a set of streaming mode DMA translations.
592 * Again, cpu read rules concerning calls here are the same as for
593 * pci_unmap_single() above.
595 void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
598 struct scatterlist *sg;
601 BUG_ON(direction == PCI_DMA_NONE);
602 if (direction != PCI_DMA_TODEVICE) {
603 for_each_sg(sgl, sg, nents, n) {
604 BUG_ON(page_address(sg_page(sg)) == NULL);
606 (unsigned long) page_address(sg_page(sg)),
607 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
612 /* Make physical memory consistent for a single
613 * streaming mode DMA translation before or after a transfer.
615 * If you perform a pci_map_single() but wish to interrogate the
616 * buffer using the cpu, yet do not wish to teardown the PCI dma
617 * mapping, you must call this function before doing so. At the
618 * next point you give the PCI dma address back to the card, you
619 * must first perform a pci_dma_sync_for_device, and then the
620 * device again owns the buffer.
622 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
624 BUG_ON(direction == PCI_DMA_NONE);
625 if (direction != PCI_DMA_TODEVICE) {
626 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
627 (size + PAGE_SIZE-1) & PAGE_MASK);
631 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
633 BUG_ON(direction == PCI_DMA_NONE);
634 if (direction != PCI_DMA_TODEVICE) {
635 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
636 (size + PAGE_SIZE-1) & PAGE_MASK);
640 /* Make physical memory consistent for a set of streaming
641 * mode DMA translations after a transfer.
643 * The same as pci_dma_sync_single_* but for a scatter-gather list,
644 * same rules and usage.
646 void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
648 struct scatterlist *sg;
651 BUG_ON(direction == PCI_DMA_NONE);
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL);
656 (unsigned long) page_address(sg_page(sg)),
657 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
662 void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
664 struct scatterlist *sg;
667 BUG_ON(direction == PCI_DMA_NONE);
668 if (direction != PCI_DMA_TODEVICE) {
669 for_each_sg(sgl, sg, nents, n) {
670 BUG_ON(page_address(sg_page(sg)) == NULL);
672 (unsigned long) page_address(sg_page(sg)),
673 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
677 #endif /* CONFIG_PCI */
679 #ifdef CONFIG_PROC_FS
682 _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
685 char *p = buf, *e = buf + length;
689 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
690 if (p + 32 >= e) /* Better than nothing */
692 if ((nm = r->name) == 0) nm = "???";
693 p += sprintf(p, "%016llx-%016llx: %s\n",
694 (unsigned long long)r->start,
695 (unsigned long long)r->end, nm);
701 #endif /* CONFIG_PROC_FS */
704 * This is a version of find_resource and it belongs to kernel/resource.c.
705 * Until we have agreement with Linus and Martin, it lingers here.
707 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
708 * This probably warrants some sort of hashing.
710 static struct resource *_sparc_find_resource(struct resource *root,
713 struct resource *tmp;
715 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
716 if (tmp->start <= hit && tmp->end >= hit)
722 static void register_proc_sparc_ioport(void)
724 #ifdef CONFIG_PROC_FS
725 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
726 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);