1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/export.h>
16 #include <linux/log2.h>
17 #include <linux/of_device.h>
18 #include <linux/iommu-common.h>
20 #include <asm/iommu.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
29 #include "pci_sun4v.h"
31 #define DRIVER_NAME "pci_sun4v"
32 #define PFX DRIVER_NAME ": "
34 static unsigned long vpci_major;
35 static unsigned long vpci_minor;
42 /* Ordered from largest major to lowest */
43 static struct vpci_version vpci_versions[] = {
44 { .major = 2, .minor = 0 },
45 { .major = 1, .minor = 1 },
48 static unsigned long vatu_major = 1;
49 static unsigned long vatu_minor = 1;
51 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
54 struct device *dev; /* Device mapping is for. */
55 unsigned long prot; /* IOMMU page protections */
56 unsigned long entry; /* Index into IOTSB. */
57 u64 *pglist; /* List of physical pages */
58 unsigned long npages; /* Number of pages in list. */
61 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
62 static int iommu_batch_initialized;
64 /* Interrupts must be disabled. */
65 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
67 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
75 /* Interrupts must be disabled. */
76 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
78 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
79 u64 *pglist = p->pglist;
81 unsigned long devhandle = pbm->devhandle;
82 unsigned long prot = p->prot;
83 unsigned long entry = p->entry;
84 unsigned long npages = p->npages;
85 unsigned long iotsb_num;
89 /* VPCI maj=1, min=[0,1] only supports read and write */
91 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
94 if (mask <= DMA_BIT_MASK(32)) {
95 num = pci_sun4v_iommu_map(devhandle,
96 HV_PCI_TSBID(0, entry),
100 if (unlikely(num < 0)) {
101 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
104 HV_PCI_TSBID(0, entry),
105 npages, prot, __pa(pglist),
110 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
111 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
112 ret = pci_sun4v_iotsb_map(devhandle,
118 if (unlikely(ret != HV_EOK)) {
119 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
121 devhandle, iotsb_num,
138 static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
140 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
142 if (p->entry + p->npages == entry)
144 if (p->entry != ~0UL)
145 iommu_batch_flush(p, mask);
149 /* Interrupts must be disabled. */
150 static inline long iommu_batch_add(u64 phys_page, u64 mask)
152 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
154 BUG_ON(p->npages >= PGLIST_NENTS);
156 p->pglist[p->npages++] = phys_page;
157 if (p->npages == PGLIST_NENTS)
158 return iommu_batch_flush(p, mask);
163 /* Interrupts must be disabled. */
164 static inline long iommu_batch_end(u64 mask)
166 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
168 BUG_ON(p->npages >= PGLIST_NENTS);
170 return iommu_batch_flush(p, mask);
173 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
174 dma_addr_t *dma_addrp, gfp_t gfp,
178 unsigned long flags, order, first_page, npages, n;
179 unsigned long prot = 0;
182 struct iommu_map_table *tbl;
188 size = IO_PAGE_ALIGN(size);
189 order = get_order(size);
190 if (unlikely(order >= MAX_ORDER))
193 npages = size >> IO_PAGE_SHIFT;
195 if (attrs & DMA_ATTR_WEAK_ORDERING)
196 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
198 nid = dev->archdata.numa_node;
199 page = alloc_pages_node(nid, gfp, order);
203 first_page = (unsigned long) page_address(page);
204 memset((char *)first_page, 0, PAGE_SIZE << order);
206 iommu = dev->archdata.iommu;
209 mask = dev->coherent_dma_mask;
210 if (mask <= DMA_BIT_MASK(32))
215 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
216 (unsigned long)(-1), 0);
218 if (unlikely(entry == IOMMU_ERROR_CODE))
219 goto range_alloc_fail;
221 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
222 ret = (void *) first_page;
223 first_page = __pa(first_page);
225 local_irq_save(flags);
227 iommu_batch_start(dev,
228 (HV_PCI_MAP_ATTR_READ | prot |
229 HV_PCI_MAP_ATTR_WRITE),
232 for (n = 0; n < npages; n++) {
233 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
234 if (unlikely(err < 0L))
238 if (unlikely(iommu_batch_end(mask) < 0L))
241 local_irq_restore(flags);
246 local_irq_restore(flags);
247 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
250 free_pages(first_page, order);
254 unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
255 unsigned long iotsb_num,
256 struct pci_bus *bus_dev)
258 struct pci_dev *pdev;
264 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
265 if (pdev->subordinate) {
266 /* No need to bind pci bridge */
267 dma_4v_iotsb_bind(devhandle, iotsb_num,
270 bus = bus_dev->number;
271 device = PCI_SLOT(pdev->devfn);
272 fun = PCI_FUNC(pdev->devfn);
273 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
274 HV_PCI_DEVICE_BUILD(bus,
278 /* If bind fails for one device it is going to fail
279 * for rest of the devices because we are sharing
280 * IOTSB. So in case of failure simply return with
291 static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
292 dma_addr_t dvma, unsigned long iotsb_num,
293 unsigned long entry, unsigned long npages)
295 unsigned long num, flags;
298 local_irq_save(flags);
300 if (dvma <= DMA_BIT_MASK(32)) {
301 num = pci_sun4v_iommu_demap(devhandle,
302 HV_PCI_TSBID(0, entry),
305 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
306 entry, npages, &num);
307 if (unlikely(ret != HV_EOK)) {
308 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
314 } while (npages != 0);
315 local_irq_restore(flags);
318 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
319 dma_addr_t dvma, unsigned long attrs)
321 struct pci_pbm_info *pbm;
324 struct iommu_map_table *tbl;
325 unsigned long order, npages, entry;
326 unsigned long iotsb_num;
329 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
330 iommu = dev->archdata.iommu;
331 pbm = dev->archdata.host_controller;
333 devhandle = pbm->devhandle;
335 if (dvma <= DMA_BIT_MASK(32)) {
337 iotsb_num = 0; /* we don't care for legacy iommu */
340 iotsb_num = atu->iotsb->iotsb_num;
342 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
343 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
344 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
345 order = get_order(size);
347 free_pages((unsigned long)cpu, order);
350 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
351 unsigned long offset, size_t sz,
352 enum dma_data_direction direction,
357 struct iommu_map_table *tbl;
359 unsigned long flags, npages, oaddr;
360 unsigned long i, base_paddr;
362 dma_addr_t bus_addr, ret;
365 iommu = dev->archdata.iommu;
368 if (unlikely(direction == DMA_NONE))
371 oaddr = (unsigned long)(page_address(page) + offset);
372 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373 npages >>= IO_PAGE_SHIFT;
375 mask = *dev->dma_mask;
376 if (mask <= DMA_BIT_MASK(32))
381 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
382 (unsigned long)(-1), 0);
384 if (unlikely(entry == IOMMU_ERROR_CODE))
387 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
388 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
389 base_paddr = __pa(oaddr & IO_PAGE_MASK);
390 prot = HV_PCI_MAP_ATTR_READ;
391 if (direction != DMA_TO_DEVICE)
392 prot |= HV_PCI_MAP_ATTR_WRITE;
394 if (attrs & DMA_ATTR_WEAK_ORDERING)
395 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
397 local_irq_save(flags);
399 iommu_batch_start(dev, prot, entry);
401 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
402 long err = iommu_batch_add(base_paddr, mask);
403 if (unlikely(err < 0L))
406 if (unlikely(iommu_batch_end(mask) < 0L))
409 local_irq_restore(flags);
414 if (printk_ratelimit())
416 return SPARC_MAPPING_ERROR;
419 local_irq_restore(flags);
420 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
421 return SPARC_MAPPING_ERROR;
424 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
425 size_t sz, enum dma_data_direction direction,
428 struct pci_pbm_info *pbm;
431 struct iommu_map_table *tbl;
432 unsigned long npages;
433 unsigned long iotsb_num;
437 if (unlikely(direction == DMA_NONE)) {
438 if (printk_ratelimit())
443 iommu = dev->archdata.iommu;
444 pbm = dev->archdata.host_controller;
446 devhandle = pbm->devhandle;
448 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
449 npages >>= IO_PAGE_SHIFT;
450 bus_addr &= IO_PAGE_MASK;
452 if (bus_addr <= DMA_BIT_MASK(32)) {
453 iotsb_num = 0; /* we don't care for legacy iommu */
456 iotsb_num = atu->iotsb->iotsb_num;
459 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
460 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
461 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
464 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
465 int nelems, enum dma_data_direction direction,
468 struct scatterlist *s, *outs, *segstart;
469 unsigned long flags, handle, prot;
470 dma_addr_t dma_next = 0, dma_addr;
471 unsigned int max_seg_size;
472 unsigned long seg_boundary_size;
473 int outcount, incount, i;
476 struct iommu_map_table *tbl;
478 unsigned long base_shift;
481 BUG_ON(direction == DMA_NONE);
483 iommu = dev->archdata.iommu;
484 if (nelems == 0 || !iommu)
488 prot = HV_PCI_MAP_ATTR_READ;
489 if (direction != DMA_TO_DEVICE)
490 prot |= HV_PCI_MAP_ATTR_WRITE;
492 if (attrs & DMA_ATTR_WEAK_ORDERING)
493 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
495 outs = s = segstart = &sglist[0];
500 /* Init first segment length for backout at failure */
501 outs->dma_length = 0;
503 local_irq_save(flags);
505 iommu_batch_start(dev, prot, ~0UL);
507 max_seg_size = dma_get_max_seg_size(dev);
508 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
509 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
511 mask = *dev->dma_mask;
512 if (mask <= DMA_BIT_MASK(32))
517 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
519 for_each_sg(sglist, s, nelems, i) {
520 unsigned long paddr, npages, entry, out_entry = 0, slen;
528 /* Allocate iommu entries for that segment */
529 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
530 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
531 entry = iommu_tbl_range_alloc(dev, tbl, npages,
532 &handle, (unsigned long)(-1), 0);
535 if (unlikely(entry == IOMMU_ERROR_CODE)) {
536 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
538 goto iommu_map_failed;
541 iommu_batch_new_entry(entry, mask);
543 /* Convert entry to a dma_addr_t */
544 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
545 dma_addr |= (s->offset & ~IO_PAGE_MASK);
547 /* Insert into HW table */
548 paddr &= IO_PAGE_MASK;
550 err = iommu_batch_add(paddr, mask);
551 if (unlikely(err < 0L))
552 goto iommu_map_failed;
553 paddr += IO_PAGE_SIZE;
556 /* If we are in an open segment, try merging */
558 /* We cannot merge if:
559 * - allocated dma_addr isn't contiguous to previous allocation
561 if ((dma_addr != dma_next) ||
562 (outs->dma_length + s->length > max_seg_size) ||
563 (is_span_boundary(out_entry, base_shift,
564 seg_boundary_size, outs, s))) {
565 /* Can't merge: create a new segment */
568 outs = sg_next(outs);
570 outs->dma_length += s->length;
575 /* This is a new segment, fill entries */
576 outs->dma_address = dma_addr;
577 outs->dma_length = slen;
581 /* Calculate next page pointer for contiguous check */
582 dma_next = dma_addr + slen;
585 err = iommu_batch_end(mask);
587 if (unlikely(err < 0L))
588 goto iommu_map_failed;
590 local_irq_restore(flags);
592 if (outcount < incount) {
593 outs = sg_next(outs);
594 outs->dma_address = SPARC_MAPPING_ERROR;
595 outs->dma_length = 0;
601 for_each_sg(sglist, s, nelems, i) {
602 if (s->dma_length != 0) {
603 unsigned long vaddr, npages;
605 vaddr = s->dma_address & IO_PAGE_MASK;
606 npages = iommu_num_pages(s->dma_address, s->dma_length,
608 iommu_tbl_range_free(tbl, vaddr, npages,
611 s->dma_address = SPARC_MAPPING_ERROR;
617 local_irq_restore(flags);
622 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
623 int nelems, enum dma_data_direction direction,
626 struct pci_pbm_info *pbm;
627 struct scatterlist *sg;
630 unsigned long flags, entry;
631 unsigned long iotsb_num;
634 BUG_ON(direction == DMA_NONE);
636 iommu = dev->archdata.iommu;
637 pbm = dev->archdata.host_controller;
639 devhandle = pbm->devhandle;
641 local_irq_save(flags);
645 dma_addr_t dma_handle = sg->dma_address;
646 unsigned int len = sg->dma_length;
647 unsigned long npages;
648 struct iommu_map_table *tbl;
649 unsigned long shift = IO_PAGE_SHIFT;
653 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
655 if (dma_handle <= DMA_BIT_MASK(32)) {
656 iotsb_num = 0; /* we don't care for legacy iommu */
659 iotsb_num = atu->iotsb->iotsb_num;
662 entry = ((dma_handle - tbl->table_map_base) >> shift);
663 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
665 iommu_tbl_range_free(tbl, dma_handle, npages,
670 local_irq_restore(flags);
673 static int dma_4v_supported(struct device *dev, u64 device_mask)
675 struct iommu *iommu = dev->archdata.iommu;
676 u64 dma_addr_mask = iommu->dma_addr_mask;
678 if (device_mask > DMA_BIT_MASK(32)) {
680 dma_addr_mask = iommu->atu->dma_addr_mask;
685 if ((device_mask & dma_addr_mask) == dma_addr_mask)
687 return pci64_dma_supported(to_pci_dev(dev), device_mask);
690 static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
692 return dma_addr == SPARC_MAPPING_ERROR;
695 static const struct dma_map_ops sun4v_dma_ops = {
696 .alloc = dma_4v_alloc_coherent,
697 .free = dma_4v_free_coherent,
698 .map_page = dma_4v_map_page,
699 .unmap_page = dma_4v_unmap_page,
700 .map_sg = dma_4v_map_sg,
701 .unmap_sg = dma_4v_unmap_sg,
702 .dma_supported = dma_4v_supported,
703 .mapping_error = dma_4v_mapping_error,
706 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
708 struct property *prop;
709 struct device_node *dp;
711 dp = pbm->op->dev.of_node;
712 prop = of_find_property(dp, "66mhz-capable", NULL);
713 pbm->is_66mhz_capable = (prop != NULL);
714 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
716 /* XXX register error interrupt handlers XXX */
719 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
720 struct iommu_map_table *iommu)
722 struct iommu_pool *pool;
723 unsigned long i, pool_nr, cnt = 0;
726 devhandle = pbm->devhandle;
727 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
728 pool = &(iommu->pools[pool_nr]);
729 for (i = pool->start; i <= pool->end; i++) {
730 unsigned long ret, io_attrs, ra;
732 ret = pci_sun4v_iommu_getmap(devhandle,
736 if (page_in_phys_avail(ra)) {
737 pci_sun4v_iommu_demap(devhandle,
742 __set_bit(i, iommu->map);
750 static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
752 struct atu *atu = pbm->iommu->atu;
753 struct atu_iotsb *iotsb;
760 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
767 /* calculate size of IOTSB */
768 table_size = (atu->size / IO_PAGE_SIZE) * 8;
769 order = get_order(table_size);
770 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
775 iotsb->table = table;
776 iotsb->ra = __pa(table);
777 iotsb->dvma_size = atu->size;
778 iotsb->dvma_base = atu->base;
779 iotsb->table_size = table_size;
780 iotsb->page_size = IO_PAGE_SIZE;
782 /* configure and register IOTSB with HV */
783 err = pci_sun4v_iotsb_conf(pbm->devhandle,
790 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
791 goto iotsb_conf_failed;
793 iotsb->iotsb_num = iotsb_num;
795 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
797 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
798 goto iotsb_conf_failed;
804 free_pages((unsigned long)table, order);
811 static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
813 struct atu *atu = pbm->iommu->atu;
816 u64 map_size, num_iotte;
818 const u32 *page_size;
821 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
824 pr_err(PFX "No iommu-address-ranges\n");
828 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
831 pr_err(PFX "No iommu-pagesizes\n");
835 /* There are 4 iommu-address-ranges supported. Each range is pair of
836 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
837 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
838 * address ranges to support 64bit addressing. Because 'size' for
839 * address ranges[2] and ranges[3] are same we can select either of
840 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
841 * large for OS to allocate IOTSB we are using fix size 32G
842 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
845 atu->ranges = (struct atu_ranges *)ranges;
846 atu->base = atu->ranges[3].base;
847 atu->size = ATU_64_SPACE_SIZE;
850 err = pci_sun4v_atu_alloc_iotsb(pbm);
852 pr_err(PFX "Error creating ATU IOTSB\n");
856 /* Create ATU iommu map.
857 * One bit represents one iotte in IOTSB table.
859 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
860 num_iotte = atu->size / IO_PAGE_SIZE;
861 map_size = num_iotte / 8;
862 atu->tbl.table_map_base = atu->base;
863 atu->dma_addr_mask = dma_mask;
864 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
868 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
869 NULL, false /* no large_pool */,
870 0 /* default npools */,
871 false /* want span boundary checking */);
876 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
878 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
879 struct iommu *iommu = pbm->iommu;
880 unsigned long num_tsb_entries, sz;
881 u32 dma_mask, dma_offset;
884 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
888 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
889 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
894 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
895 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
897 dma_offset = vdma[0];
899 /* Setup initial software IOMMU state. */
900 spin_lock_init(&iommu->lock);
901 iommu->ctx_lowest_free = 1;
902 iommu->tbl.table_map_base = dma_offset;
903 iommu->dma_addr_mask = dma_mask;
905 /* Allocate and initialize the free area map. */
906 sz = (num_tsb_entries + 7) / 8;
907 sz = (sz + 7UL) & ~7UL;
908 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
909 if (!iommu->tbl.map) {
910 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
913 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
914 NULL, false /* no large_pool */,
915 0 /* default npools */,
916 false /* want span boundary checking */);
917 sz = probe_existing_entries(pbm, &iommu->tbl);
919 printk("%s: Imported %lu TSB entries from OBP\n",
925 #ifdef CONFIG_PCI_MSI
926 struct pci_sun4v_msiq_entry {
928 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
929 #define MSIQ_VERSION_SHIFT 32
930 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
931 #define MSIQ_TYPE_SHIFT 0
932 #define MSIQ_TYPE_NONE 0x00
933 #define MSIQ_TYPE_MSG 0x01
934 #define MSIQ_TYPE_MSI32 0x02
935 #define MSIQ_TYPE_MSI64 0x03
936 #define MSIQ_TYPE_INTX 0x08
937 #define MSIQ_TYPE_NONE2 0xff
942 u64 req_id; /* bus/device/func */
943 #define MSIQ_REQID_BUS_MASK 0xff00UL
944 #define MSIQ_REQID_BUS_SHIFT 8
945 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
946 #define MSIQ_REQID_DEVICE_SHIFT 3
947 #define MSIQ_REQID_FUNC_MASK 0x0007UL
948 #define MSIQ_REQID_FUNC_SHIFT 0
952 /* The format of this value is message type dependent.
953 * For MSI bits 15:0 are the data from the MSI packet.
954 * For MSI-X bits 31:0 are the data from the MSI packet.
955 * For MSG, the message code and message routing code where:
956 * bits 39:32 is the bus/device/fn of the msg target-id
957 * bits 18:16 is the message routing code
958 * bits 7:0 is the message code
959 * For INTx the low order 2-bits are:
970 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
973 unsigned long err, limit;
975 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
979 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
980 if (unlikely(*head >= limit))
986 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
987 unsigned long msiqid, unsigned long *head,
990 struct pci_sun4v_msiq_entry *ep;
991 unsigned long err, type;
993 /* Note: void pointer arithmetic, 'head' is a byte offset */
994 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
995 (pbm->msiq_ent_count *
996 sizeof(struct pci_sun4v_msiq_entry))) +
999 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
1002 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
1003 if (unlikely(type != MSIQ_TYPE_MSI32 &&
1004 type != MSIQ_TYPE_MSI64))
1007 *msi = ep->msi_data;
1009 err = pci_sun4v_msi_setstate(pbm->devhandle,
1010 ep->msi_data /* msi_num */,
1015 /* Clear the entry. */
1016 ep->version_type &= ~MSIQ_TYPE_MASK;
1018 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1020 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1026 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1031 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1038 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1039 unsigned long msi, int is_msi64)
1041 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1043 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1045 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1047 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1052 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1054 unsigned long err, msiqid;
1056 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1060 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1065 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1067 unsigned long q_size, alloc_size, pages, order;
1070 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1071 alloc_size = (pbm->msiq_num * q_size);
1072 order = get_order(alloc_size);
1073 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1075 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1079 memset((char *)pages, 0, PAGE_SIZE << order);
1080 pbm->msi_queues = (void *) pages;
1082 for (i = 0; i < pbm->msiq_num; i++) {
1083 unsigned long err, base = __pa(pages + (i * q_size));
1084 unsigned long ret1, ret2;
1086 err = pci_sun4v_msiq_conf(pbm->devhandle,
1087 pbm->msiq_first + i,
1088 base, pbm->msiq_ent_count);
1090 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1095 err = pci_sun4v_msiq_info(pbm->devhandle,
1096 pbm->msiq_first + i,
1099 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1103 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1104 printk(KERN_ERR "MSI: Bogus qconf "
1105 "expected[%lx:%x] got[%lx:%lx]\n",
1106 base, pbm->msiq_ent_count,
1115 free_pages(pages, order);
1119 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1121 unsigned long q_size, alloc_size, pages, order;
1124 for (i = 0; i < pbm->msiq_num; i++) {
1125 unsigned long msiqid = pbm->msiq_first + i;
1127 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1130 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1131 alloc_size = (pbm->msiq_num * q_size);
1132 order = get_order(alloc_size);
1134 pages = (unsigned long) pbm->msi_queues;
1136 free_pages(pages, order);
1138 pbm->msi_queues = NULL;
1141 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1142 unsigned long msiqid,
1143 unsigned long devino)
1145 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1150 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1152 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1158 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1159 .get_head = pci_sun4v_get_head,
1160 .dequeue_msi = pci_sun4v_dequeue_msi,
1161 .set_head = pci_sun4v_set_head,
1162 .msi_setup = pci_sun4v_msi_setup,
1163 .msi_teardown = pci_sun4v_msi_teardown,
1164 .msiq_alloc = pci_sun4v_msiq_alloc,
1165 .msiq_free = pci_sun4v_msiq_free,
1166 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1169 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1171 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1173 #else /* CONFIG_PCI_MSI */
1174 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1177 #endif /* !(CONFIG_PCI_MSI) */
1179 static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1180 struct platform_device *op, u32 devhandle)
1182 struct device_node *dp = op->dev.of_node;
1185 pbm->numa_node = of_node_to_nid(dp);
1187 pbm->pci_ops = &sun4v_pci_ops;
1188 pbm->config_space_reg_bits = 12;
1190 pbm->index = pci_num_pbms++;
1194 pbm->devhandle = devhandle;
1196 pbm->name = dp->full_name;
1198 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1199 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1201 pci_determine_mem_io_space(pbm);
1203 pci_get_pbm_props(pbm);
1205 err = pci_sun4v_iommu_init(pbm);
1209 pci_sun4v_msi_init(pbm);
1211 pci_sun4v_scan_bus(pbm, &op->dev);
1213 /* if atu_init fails its not complete failure.
1214 * we can still continue using legacy iommu.
1216 if (pbm->iommu->atu) {
1217 err = pci_sun4v_atu_init(pbm);
1219 kfree(pbm->iommu->atu);
1220 pbm->iommu->atu = NULL;
1221 pr_err(PFX "ATU init failed, err=%d\n", err);
1225 pbm->next = pci_pbm_root;
1231 static int pci_sun4v_probe(struct platform_device *op)
1233 const struct linux_prom64_registers *regs;
1234 static int hvapi_negotiated = 0;
1235 struct pci_pbm_info *pbm;
1236 struct device_node *dp;
1237 struct iommu *iommu;
1240 int i, err = -ENODEV;
1241 static bool hv_atu = true;
1243 dp = op->dev.of_node;
1245 if (!hvapi_negotiated++) {
1246 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1247 vpci_major = vpci_versions[i].major;
1248 vpci_minor = vpci_versions[i].minor;
1250 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1257 pr_err(PFX "Could not register hvapi, err=%d\n", err);
1260 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1261 vpci_major, vpci_minor);
1263 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1265 /* don't return an error if we fail to register the
1266 * ATU group, but ATU hcalls won't be available.
1269 pr_err(PFX "Could not register hvapi ATU err=%d\n",
1272 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1273 vatu_major, vatu_minor);
1276 dma_ops = &sun4v_dma_ops;
1279 regs = of_get_property(dp, "reg", NULL);
1282 printk(KERN_ERR PFX "Could not find config registers\n");
1285 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1288 if (!iommu_batch_initialized) {
1289 for_each_possible_cpu(i) {
1290 unsigned long page = get_zeroed_page(GFP_KERNEL);
1295 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1297 iommu_batch_initialized = 1;
1300 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1302 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1306 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1308 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1309 goto out_free_controller;
1315 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1317 pr_err(PFX "Could not allocate atu\n");
1322 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1324 goto out_free_iommu;
1326 dev_set_drvdata(&op->dev, pbm);
1334 out_free_controller:
1341 static const struct of_device_id pci_sun4v_match[] = {
1344 .compatible = "SUNW,sun4v-pci",
1349 static struct platform_driver pci_sun4v_driver = {
1351 .name = DRIVER_NAME,
1352 .of_match_table = pci_sun4v_match,
1354 .probe = pci_sun4v_probe,
1357 static int __init pci_sun4v_init(void)
1359 return platform_driver_register(&pci_sun4v_driver);
1362 subsys_initcall(pci_sun4v_init);