2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/crash_dump.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/irq.h>
24 #include <linux/msi.h>
25 #include <linux/memblock.h>
26 #include <linux/iommu.h>
27 #include <linux/rculist.h>
29 #include <asm/sections.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/msi_bitmap.h>
35 #include <asm/ppc-pci.h>
37 #include <asm/iommu.h>
40 #include <asm/debug.h>
41 #include <asm/firmware.h>
42 #include <asm/pnv-pci.h>
43 #include <asm/mmzone.h>
45 #include <misc/cxl-base.h>
50 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
51 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
53 #define POWERNV_IOMMU_DEFAULT_LEVELS 1
54 #define POWERNV_IOMMU_MAX_LEVELS 5
56 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
58 static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
70 if (pe->flags & PNV_IODA_PE_DEV)
71 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
72 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
73 sprintf(pfix, "%04x:%02x ",
74 pci_domain_nr(pe->pbus), pe->pbus->number);
76 else if (pe->flags & PNV_IODA_PE_VF)
77 sprintf(pfix, "%04x:%02x:%2x.%d",
78 pci_domain_nr(pe->parent_dev->bus),
79 (pe->rid & 0xff00) >> 8,
80 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
81 #endif /* CONFIG_PCI_IOV*/
83 printk("%spci %s: [PE# %.3d] %pV",
84 level, pfix, pe->pe_number, &vaf);
89 #define pe_err(pe, fmt, ...) \
90 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
91 #define pe_warn(pe, fmt, ...) \
92 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
93 #define pe_info(pe, fmt, ...) \
94 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
96 static bool pnv_iommu_bypass_disabled __read_mostly;
98 static int __init iommu_setup(char *str)
104 if (!strncmp(str, "nobypass", 8)) {
105 pnv_iommu_bypass_disabled = true;
106 pr_info("PowerNV: IOMMU bypass window disabled.\n");
109 str += strcspn(str, ",");
116 early_param("iommu", iommu_setup);
119 * stdcix is only supposed to be used in hypervisor real mode as per
120 * the architecture spec
122 static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
124 __asm__ __volatile__("stdcix %0,0,%1"
125 : : "r" (val), "r" (paddr) : "memory");
128 static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
130 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
131 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
134 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
136 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
137 pr_warn("%s: Invalid PE %d on PHB#%x\n",
138 __func__, pe_no, phb->hose->global_number);
142 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
143 pr_warn("%s: PE %d was assigned on PHB#%x\n",
144 __func__, pe_no, phb->hose->global_number);
148 phb->ioda.pe_array[pe_no].phb = phb;
149 phb->ioda.pe_array[pe_no].pe_number = pe_no;
152 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
157 pe = find_next_zero_bit(phb->ioda.pe_alloc,
158 phb->ioda.total_pe, 0);
159 if (pe >= phb->ioda.total_pe)
160 return IODA_INVALID_PE;
161 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
163 phb->ioda.pe_array[pe].phb = phb;
164 phb->ioda.pe_array[pe].pe_number = pe;
168 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
170 WARN_ON(phb->ioda.pe_array[pe].pdev);
172 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
173 clear_bit(pe, phb->ioda.pe_alloc);
176 /* The default M64 BAR is shared by all PEs */
177 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
183 /* Configure the default M64 BAR */
184 rc = opal_pci_set_phb_mem_window(phb->opal_id,
185 OPAL_M64_WINDOW_TYPE,
186 phb->ioda.m64_bar_idx,
190 if (rc != OPAL_SUCCESS) {
191 desc = "configuring";
195 /* Enable the default M64 BAR */
196 rc = opal_pci_phb_mmio_enable(phb->opal_id,
197 OPAL_M64_WINDOW_TYPE,
198 phb->ioda.m64_bar_idx,
199 OPAL_ENABLE_M64_SPLIT);
200 if (rc != OPAL_SUCCESS) {
205 /* Mark the M64 BAR assigned */
206 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
209 * Strip off the segment used by the reserved PE, which is
210 * expected to be 0 or last one of PE capabicity.
212 r = &phb->hose->mem_resources[1];
213 if (phb->ioda.reserved_pe == 0)
214 r->start += phb->ioda.m64_segsize;
215 else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
216 r->end -= phb->ioda.m64_segsize;
218 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
219 phb->ioda.reserved_pe);
224 pr_warn(" Failure %lld %s M64 BAR#%d\n",
225 rc, desc, phb->ioda.m64_bar_idx);
226 opal_pci_phb_mmio_enable(phb->opal_id,
227 OPAL_M64_WINDOW_TYPE,
228 phb->ioda.m64_bar_idx,
233 static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
235 resource_size_t sgsz = phb->ioda.m64_segsize;
236 struct pci_dev *pdev;
241 * Root bus always has full M64 range and root port has
242 * M64 range used in reality. So we're checking root port
243 * instead of root bus.
245 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
246 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
247 r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
249 !pnv_pci_is_mem_pref_64(r->flags))
252 base = (r->start - phb->ioda.m64_base) / sgsz;
253 for (step = 0; step < resource_size(r) / sgsz; step++)
254 pnv_ioda_reserve_pe(phb, base + step);
259 static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
260 struct pci_bus *bus, int all)
262 resource_size_t segsz = phb->ioda.m64_segsize;
263 struct pci_dev *pdev;
265 struct pnv_ioda_pe *master_pe, *pe;
266 unsigned long size, *pe_alloc;
270 /* Root bus shouldn't use M64 */
271 if (pci_is_root_bus(bus))
272 return IODA_INVALID_PE;
274 /* We support only one M64 window on each bus */
276 pci_bus_for_each_resource(bus, r, i) {
277 if (r && r->parent &&
278 pnv_pci_is_mem_pref_64(r->flags)) {
284 /* No M64 window found ? */
286 return IODA_INVALID_PE;
288 /* Allocate bitmap */
289 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
290 pe_alloc = kzalloc(size, GFP_KERNEL);
292 pr_warn("%s: Out of memory !\n",
294 return IODA_INVALID_PE;
298 * Figure out reserved PE numbers by the PE
301 start = (r->start - phb->ioda.m64_base) / segsz;
302 for (i = 0; i < resource_size(r) / segsz; i++)
303 set_bit(start + i, pe_alloc);
309 * If the PE doesn't cover all subordinate buses,
310 * we need subtract from reserved PEs for children.
312 list_for_each_entry(pdev, &bus->devices, bus_list) {
313 if (!pdev->subordinate)
316 pci_bus_for_each_resource(pdev->subordinate, r, i) {
317 if (!r || !r->parent ||
318 !pnv_pci_is_mem_pref_64(r->flags))
321 start = (r->start - phb->ioda.m64_base) / segsz;
322 for (j = 0; j < resource_size(r) / segsz ; j++)
323 clear_bit(start + j, pe_alloc);
328 * the current bus might not own M64 window and that's all
329 * contributed by its child buses. For the case, we needn't
330 * pick M64 dependent PE#.
332 if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
334 return IODA_INVALID_PE;
338 * Figure out the master PE and put all slave PEs to master
339 * PE's list to form compound PE.
344 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
345 phb->ioda.total_pe) {
346 pe = &phb->ioda.pe_array[i];
349 pe->flags |= PNV_IODA_PE_MASTER;
350 INIT_LIST_HEAD(&pe->slaves);
353 pe->flags |= PNV_IODA_PE_SLAVE;
354 pe->master = master_pe;
355 list_add_tail(&pe->list, &master_pe->slaves);
360 return master_pe->pe_number;
363 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
365 struct pci_controller *hose = phb->hose;
366 struct device_node *dn = hose->dn;
367 struct resource *res;
371 /* FIXME: Support M64 for P7IOC */
372 if (phb->type != PNV_PHB_IODA2) {
373 pr_info(" Not support M64 window\n");
377 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
378 pr_info(" Firmware too old to support M64 window\n");
382 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
384 pr_info(" No <ibm,opal-m64-window> on %s\n",
389 res = &hose->mem_resources[1];
390 res->start = of_translate_address(dn, r + 2);
391 res->end = res->start + of_read_number(r + 4, 2) - 1;
392 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
393 pci_addr = of_read_number(r, 2);
394 hose->mem_offset[1] = res->start - pci_addr;
396 phb->ioda.m64_size = resource_size(res);
397 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
398 phb->ioda.m64_base = pci_addr;
400 pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
401 res->start, res->end, pci_addr);
403 /* Use last M64 BAR to cover M64 window */
404 phb->ioda.m64_bar_idx = 15;
405 phb->init_m64 = pnv_ioda2_init_m64;
406 phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
407 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
410 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
412 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
413 struct pnv_ioda_pe *slave;
416 /* Fetch master PE */
417 if (pe->flags & PNV_IODA_PE_SLAVE) {
419 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
422 pe_no = pe->pe_number;
425 /* Freeze master PE */
426 rc = opal_pci_eeh_freeze_set(phb->opal_id,
428 OPAL_EEH_ACTION_SET_FREEZE_ALL);
429 if (rc != OPAL_SUCCESS) {
430 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
431 __func__, rc, phb->hose->global_number, pe_no);
435 /* Freeze slave PEs */
436 if (!(pe->flags & PNV_IODA_PE_MASTER))
439 list_for_each_entry(slave, &pe->slaves, list) {
440 rc = opal_pci_eeh_freeze_set(phb->opal_id,
442 OPAL_EEH_ACTION_SET_FREEZE_ALL);
443 if (rc != OPAL_SUCCESS)
444 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
445 __func__, rc, phb->hose->global_number,
450 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
452 struct pnv_ioda_pe *pe, *slave;
456 pe = &phb->ioda.pe_array[pe_no];
457 if (pe->flags & PNV_IODA_PE_SLAVE) {
459 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
460 pe_no = pe->pe_number;
463 /* Clear frozen state for master PE */
464 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
465 if (rc != OPAL_SUCCESS) {
466 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
467 __func__, rc, opt, phb->hose->global_number, pe_no);
471 if (!(pe->flags & PNV_IODA_PE_MASTER))
474 /* Clear frozen state for slave PEs */
475 list_for_each_entry(slave, &pe->slaves, list) {
476 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
479 if (rc != OPAL_SUCCESS) {
480 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
481 __func__, rc, opt, phb->hose->global_number,
490 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
492 struct pnv_ioda_pe *slave, *pe;
497 /* Sanity check on PE number */
498 if (pe_no < 0 || pe_no >= phb->ioda.total_pe)
499 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
502 * Fetch the master PE and the PE instance might be
503 * not initialized yet.
505 pe = &phb->ioda.pe_array[pe_no];
506 if (pe->flags & PNV_IODA_PE_SLAVE) {
508 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
509 pe_no = pe->pe_number;
512 /* Check the master PE */
513 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
514 &state, &pcierr, NULL);
515 if (rc != OPAL_SUCCESS) {
516 pr_warn("%s: Failure %lld getting "
517 "PHB#%x-PE#%x state\n",
519 phb->hose->global_number, pe_no);
520 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
523 /* Check the slave PE */
524 if (!(pe->flags & PNV_IODA_PE_MASTER))
527 list_for_each_entry(slave, &pe->slaves, list) {
528 rc = opal_pci_eeh_freeze_status(phb->opal_id,
533 if (rc != OPAL_SUCCESS) {
534 pr_warn("%s: Failure %lld getting "
535 "PHB#%x-PE#%x state\n",
537 phb->hose->global_number, slave->pe_number);
538 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
542 * Override the result based on the ascending
552 /* Currently those 2 are only used when MSIs are enabled, this will change
553 * but in the meantime, we need to protect them to avoid warnings
555 #ifdef CONFIG_PCI_MSI
556 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
558 struct pci_controller *hose = pci_bus_to_host(dev->bus);
559 struct pnv_phb *phb = hose->private_data;
560 struct pci_dn *pdn = pci_get_pdn(dev);
564 if (pdn->pe_number == IODA_INVALID_PE)
566 return &phb->ioda.pe_array[pdn->pe_number];
568 #endif /* CONFIG_PCI_MSI */
570 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
571 struct pnv_ioda_pe *parent,
572 struct pnv_ioda_pe *child,
575 const char *desc = is_add ? "adding" : "removing";
576 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
577 OPAL_REMOVE_PE_FROM_DOMAIN;
578 struct pnv_ioda_pe *slave;
581 /* Parent PE affects child PE */
582 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
583 child->pe_number, op);
584 if (rc != OPAL_SUCCESS) {
585 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
590 if (!(child->flags & PNV_IODA_PE_MASTER))
593 /* Compound case: parent PE affects slave PEs */
594 list_for_each_entry(slave, &child->slaves, list) {
595 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
596 slave->pe_number, op);
597 if (rc != OPAL_SUCCESS) {
598 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
607 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
608 struct pnv_ioda_pe *pe,
611 struct pnv_ioda_pe *slave;
612 struct pci_dev *pdev = NULL;
616 * Clear PE frozen state. If it's master PE, we need
617 * clear slave PE frozen state as well.
620 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
621 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
622 if (pe->flags & PNV_IODA_PE_MASTER) {
623 list_for_each_entry(slave, &pe->slaves, list)
624 opal_pci_eeh_freeze_clear(phb->opal_id,
626 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
631 * Associate PE in PELT. We need add the PE into the
632 * corresponding PELT-V as well. Otherwise, the error
633 * originated from the PE might contribute to other
636 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
640 /* For compound PEs, any one affects all of them */
641 if (pe->flags & PNV_IODA_PE_MASTER) {
642 list_for_each_entry(slave, &pe->slaves, list) {
643 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
649 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
650 pdev = pe->pbus->self;
651 else if (pe->flags & PNV_IODA_PE_DEV)
652 pdev = pe->pdev->bus->self;
653 #ifdef CONFIG_PCI_IOV
654 else if (pe->flags & PNV_IODA_PE_VF)
655 pdev = pe->parent_dev->bus->self;
656 #endif /* CONFIG_PCI_IOV */
658 struct pci_dn *pdn = pci_get_pdn(pdev);
659 struct pnv_ioda_pe *parent;
661 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
662 parent = &phb->ioda.pe_array[pdn->pe_number];
663 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
668 pdev = pdev->bus->self;
674 #ifdef CONFIG_PCI_IOV
675 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
677 struct pci_dev *parent;
678 uint8_t bcomp, dcomp, fcomp;
682 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
686 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
687 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
688 parent = pe->pbus->self;
689 if (pe->flags & PNV_IODA_PE_BUS_ALL)
690 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
695 case 1: bcomp = OpalPciBusAll; break;
696 case 2: bcomp = OpalPciBus7Bits; break;
697 case 4: bcomp = OpalPciBus6Bits; break;
698 case 8: bcomp = OpalPciBus5Bits; break;
699 case 16: bcomp = OpalPciBus4Bits; break;
700 case 32: bcomp = OpalPciBus3Bits; break;
702 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
704 /* Do an exact match only */
705 bcomp = OpalPciBusAll;
707 rid_end = pe->rid + (count << 8);
709 if (pe->flags & PNV_IODA_PE_VF)
710 parent = pe->parent_dev;
712 parent = pe->pdev->bus->self;
713 bcomp = OpalPciBusAll;
714 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
715 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
716 rid_end = pe->rid + 1;
719 /* Clear the reverse map */
720 for (rid = pe->rid; rid < rid_end; rid++)
721 phb->ioda.pe_rmap[rid] = 0;
723 /* Release from all parents PELT-V */
725 struct pci_dn *pdn = pci_get_pdn(parent);
726 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
727 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
728 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
729 /* XXX What to do in case of error ? */
731 parent = parent->bus->self;
734 opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
735 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
737 /* Disassociate PE in PELT */
738 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
739 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
741 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
742 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
743 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
745 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
749 pe->parent_dev = NULL;
753 #endif /* CONFIG_PCI_IOV */
755 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
757 struct pci_dev *parent;
758 uint8_t bcomp, dcomp, fcomp;
759 long rc, rid_end, rid;
761 /* Bus validation ? */
765 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
766 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
767 parent = pe->pbus->self;
768 if (pe->flags & PNV_IODA_PE_BUS_ALL)
769 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
774 case 1: bcomp = OpalPciBusAll; break;
775 case 2: bcomp = OpalPciBus7Bits; break;
776 case 4: bcomp = OpalPciBus6Bits; break;
777 case 8: bcomp = OpalPciBus5Bits; break;
778 case 16: bcomp = OpalPciBus4Bits; break;
779 case 32: bcomp = OpalPciBus3Bits; break;
781 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
783 /* Do an exact match only */
784 bcomp = OpalPciBusAll;
786 rid_end = pe->rid + (count << 8);
788 #ifdef CONFIG_PCI_IOV
789 if (pe->flags & PNV_IODA_PE_VF)
790 parent = pe->parent_dev;
792 #endif /* CONFIG_PCI_IOV */
793 parent = pe->pdev->bus->self;
794 bcomp = OpalPciBusAll;
795 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
796 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
797 rid_end = pe->rid + 1;
801 * Associate PE in PELT. We need add the PE into the
802 * corresponding PELT-V as well. Otherwise, the error
803 * originated from the PE might contribute to other
806 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
807 bcomp, dcomp, fcomp, OPAL_MAP_PE);
809 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
813 /* Configure PELTV */
814 pnv_ioda_set_peltv(phb, pe, true);
816 /* Setup reverse map */
817 for (rid = pe->rid; rid < rid_end; rid++)
818 phb->ioda.pe_rmap[rid] = pe->pe_number;
820 /* Setup one MVTs on IODA1 */
821 if (phb->type != PNV_PHB_IODA1) {
826 pe->mve_number = pe->pe_number;
827 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
828 if (rc != OPAL_SUCCESS) {
829 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
833 rc = opal_pci_set_mve_enable(phb->opal_id,
834 pe->mve_number, OPAL_ENABLE_MVE);
836 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
846 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
847 struct pnv_ioda_pe *pe)
849 struct pnv_ioda_pe *lpe;
851 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
852 if (lpe->dma_weight < pe->dma_weight) {
853 list_add_tail(&pe->dma_link, &lpe->dma_link);
857 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
860 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
862 /* This is quite simplistic. The "base" weight of a device
863 * is 10. 0 means no DMA is to be accounted for it.
866 /* If it's a bridge, no DMA */
867 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
870 /* Reduce the weight of slow USB controllers */
871 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
872 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
873 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
876 /* Increase the weight of RAID (includes Obsidian) */
877 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
884 #ifdef CONFIG_PCI_IOV
885 static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
887 struct pci_dn *pdn = pci_get_pdn(dev);
889 struct resource *res, res2;
890 resource_size_t size;
897 * "offset" is in VFs. The M64 windows are sized so that when they
898 * are segmented, each segment is the same size as the IOV BAR.
899 * Each segment is in a separate PE, and the high order bits of the
900 * address are the PE number. Therefore, each VF's BAR is in a
901 * separate PE, and changing the IOV BAR start address changes the
902 * range of PEs the VFs are in.
904 num_vfs = pdn->num_vfs;
905 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
906 res = &dev->resource[i + PCI_IOV_RESOURCES];
907 if (!res->flags || !res->parent)
910 if (!pnv_pci_is_mem_pref_64(res->flags))
914 * The actual IOV BAR range is determined by the start address
915 * and the actual size for num_vfs VFs BAR. This check is to
916 * make sure that after shifting, the range will not overlap
917 * with another device.
919 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
920 res2.flags = res->flags;
921 res2.start = res->start + (size * offset);
922 res2.end = res2.start + (size * num_vfs) - 1;
924 if (res2.end > res->end) {
925 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
926 i, &res2, res, num_vfs, offset);
932 * After doing so, there would be a "hole" in the /proc/iomem when
933 * offset is a positive value. It looks like the device return some
934 * mmio back to the system, which actually no one could use it.
936 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
937 res = &dev->resource[i + PCI_IOV_RESOURCES];
938 if (!res->flags || !res->parent)
941 if (!pnv_pci_is_mem_pref_64(res->flags))
944 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
946 res->start += size * offset;
948 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
949 i, &res2, res, num_vfs, offset);
950 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
954 #endif /* CONFIG_PCI_IOV */
957 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
959 struct pci_controller *hose = pci_bus_to_host(dev->bus);
960 struct pnv_phb *phb = hose->private_data;
961 struct pci_dn *pdn = pci_get_pdn(dev);
962 struct pnv_ioda_pe *pe;
966 pr_err("%s: Device tree node not associated properly\n",
970 if (pdn->pe_number != IODA_INVALID_PE)
973 /* PE#0 has been pre-set */
974 if (dev->bus->number == 0)
977 pe_num = pnv_ioda_alloc_pe(phb);
978 if (pe_num == IODA_INVALID_PE) {
979 pr_warning("%s: Not enough PE# available, disabling device\n",
984 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
985 * pointer in the PE data structure, both should be destroyed at the
986 * same time. However, this needs to be looked at more closely again
987 * once we actually start removing things (Hotplug, SR-IOV, ...)
989 * At some point we want to remove the PDN completely anyways
991 pe = &phb->ioda.pe_array[pe_num];
994 pdn->pe_number = pe_num;
999 pe->rid = dev->bus->number << 8 | pdn->devfn;
1001 pe_info(pe, "Associated device to PE\n");
1003 if (pnv_ioda_configure_pe(phb, pe)) {
1004 /* XXX What do we do here ? */
1006 pnv_ioda_free_pe(phb, pe_num);
1007 pdn->pe_number = IODA_INVALID_PE;
1013 /* Assign a DMA weight to the device */
1014 pe->dma_weight = pnv_ioda_dma_weight(dev);
1015 if (pe->dma_weight != 0) {
1016 phb->ioda.dma_weight += pe->dma_weight;
1017 phb->ioda.dma_pe_count++;
1021 pnv_ioda_link_pe_by_weight(phb, pe);
1025 #endif /* Useful for SRIOV case */
1027 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1029 struct pci_dev *dev;
1031 list_for_each_entry(dev, &bus->devices, bus_list) {
1032 struct pci_dn *pdn = pci_get_pdn(dev);
1035 pr_warn("%s: No device node associated with device !\n",
1039 pdn->pe_number = pe->pe_number;
1040 pe->dma_weight += pnv_ioda_dma_weight(dev);
1041 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1042 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1047 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1048 * single PCI bus. Another one that contains the primary PCI bus and its
1049 * subordinate PCI devices and buses. The second type of PE is normally
1050 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1052 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
1054 struct pci_controller *hose = pci_bus_to_host(bus);
1055 struct pnv_phb *phb = hose->private_data;
1056 struct pnv_ioda_pe *pe;
1057 int pe_num = IODA_INVALID_PE;
1059 /* Check if PE is determined by M64 */
1060 if (phb->pick_m64_pe)
1061 pe_num = phb->pick_m64_pe(phb, bus, all);
1063 /* The PE number isn't pinned by M64 */
1064 if (pe_num == IODA_INVALID_PE)
1065 pe_num = pnv_ioda_alloc_pe(phb);
1067 if (pe_num == IODA_INVALID_PE) {
1068 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1069 __func__, pci_domain_nr(bus), bus->number);
1073 pe = &phb->ioda.pe_array[pe_num];
1074 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1078 pe->mve_number = -1;
1079 pe->rid = bus->busn_res.start << 8;
1083 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
1084 bus->busn_res.start, bus->busn_res.end, pe_num);
1086 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
1087 bus->busn_res.start, pe_num);
1089 if (pnv_ioda_configure_pe(phb, pe)) {
1090 /* XXX What do we do here ? */
1092 pnv_ioda_free_pe(phb, pe_num);
1097 /* Associate it with all child devices */
1098 pnv_ioda_setup_same_PE(bus, pe);
1100 /* Put PE to the list */
1101 list_add_tail(&pe->list, &phb->ioda.pe_list);
1103 /* Account for one DMA PE if at least one DMA capable device exist
1106 if (pe->dma_weight != 0) {
1107 phb->ioda.dma_weight += pe->dma_weight;
1108 phb->ioda.dma_pe_count++;
1112 pnv_ioda_link_pe_by_weight(phb, pe);
1115 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
1117 struct pci_dev *dev;
1119 pnv_ioda_setup_bus_PE(bus, 0);
1121 list_for_each_entry(dev, &bus->devices, bus_list) {
1122 if (dev->subordinate) {
1123 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
1124 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
1126 pnv_ioda_setup_PEs(dev->subordinate);
1132 * Configure PEs so that the downstream PCI buses and devices
1133 * could have their associated PE#. Unfortunately, we didn't
1134 * figure out the way to identify the PLX bridge yet. So we
1135 * simply put the PCI bus and the subordinate behind the root
1136 * port to PE# here. The game rule here is expected to be changed
1137 * as soon as we can detected PLX bridge correctly.
1139 static void pnv_pci_ioda_setup_PEs(void)
1141 struct pci_controller *hose, *tmp;
1142 struct pnv_phb *phb;
1144 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1145 phb = hose->private_data;
1147 /* M64 layout might affect PE allocation */
1148 if (phb->reserve_m64_pe)
1149 phb->reserve_m64_pe(phb);
1151 pnv_ioda_setup_PEs(hose->bus);
1155 #ifdef CONFIG_PCI_IOV
1156 static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
1158 struct pci_bus *bus;
1159 struct pci_controller *hose;
1160 struct pnv_phb *phb;
1165 hose = pci_bus_to_host(bus);
1166 phb = hose->private_data;
1167 pdn = pci_get_pdn(pdev);
1169 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1170 for (j = 0; j < M64_PER_IOV; j++) {
1171 if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
1173 opal_pci_phb_mmio_enable(phb->opal_id,
1174 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
1175 clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
1176 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1182 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1184 struct pci_bus *bus;
1185 struct pci_controller *hose;
1186 struct pnv_phb *phb;
1189 struct resource *res;
1193 resource_size_t size, start;
1199 hose = pci_bus_to_host(bus);
1200 phb = hose->private_data;
1201 pdn = pci_get_pdn(pdev);
1202 total_vfs = pci_sriov_get_totalvfs(pdev);
1204 /* Initialize the m64_wins to IODA_INVALID_M64 */
1205 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1206 for (j = 0; j < M64_PER_IOV; j++)
1207 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1209 if (pdn->m64_per_iov == M64_PER_IOV) {
1210 vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
1211 vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
1212 roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1218 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1219 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1220 if (!res->flags || !res->parent)
1223 if (!pnv_pci_is_mem_pref_64(res->flags))
1226 for (j = 0; j < vf_groups; j++) {
1228 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1229 phb->ioda.m64_bar_idx + 1, 0);
1231 if (win >= phb->ioda.m64_bar_idx + 1)
1233 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1235 pdn->m64_wins[i][j] = win;
1237 if (pdn->m64_per_iov == M64_PER_IOV) {
1238 size = pci_iov_resource_size(pdev,
1239 PCI_IOV_RESOURCES + i);
1240 size = size * vf_per_group;
1241 start = res->start + size * j;
1243 size = resource_size(res);
1247 /* Map the M64 here */
1248 if (pdn->m64_per_iov == M64_PER_IOV) {
1249 pe_num = pdn->offset + j;
1250 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1251 pe_num, OPAL_M64_WINDOW_TYPE,
1252 pdn->m64_wins[i][j], 0);
1255 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1256 OPAL_M64_WINDOW_TYPE,
1257 pdn->m64_wins[i][j],
1263 if (rc != OPAL_SUCCESS) {
1264 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1269 if (pdn->m64_per_iov == M64_PER_IOV)
1270 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1271 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
1273 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1274 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
1276 if (rc != OPAL_SUCCESS) {
1277 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1286 pnv_pci_vf_release_m64(pdev);
1290 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1292 struct pci_bus *bus;
1293 struct pci_controller *hose;
1294 struct pnv_phb *phb;
1295 struct iommu_table *tbl;
1300 hose = pci_bus_to_host(bus);
1301 phb = hose->private_data;
1302 tbl = pe->table_group.tables[0];
1303 addr = tbl->it_base;
1305 opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1306 pe->pe_number << 1, 1, __pa(addr),
1309 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1311 (pe->pe_number << 1) + 1,
1312 pe->tce_bypass_base,
1315 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1317 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1318 if (pe->table_group.group) {
1319 iommu_group_put(pe->table_group.group);
1320 BUG_ON(pe->table_group.group);
1322 pnv_pci_ioda2_table_free_pages(tbl);
1323 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1326 static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1328 struct pci_bus *bus;
1329 struct pci_controller *hose;
1330 struct pnv_phb *phb;
1331 struct pnv_ioda_pe *pe, *pe_n;
1337 hose = pci_bus_to_host(bus);
1338 phb = hose->private_data;
1339 pdn = pci_get_pdn(pdev);
1341 if (!pdev->is_physfn)
1344 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1349 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1351 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
1352 for (vf_index = vf_group * vf_per_group;
1353 vf_index < (vf_group + 1) * vf_per_group &&
1356 for (vf_index1 = vf_group * vf_per_group;
1357 vf_index1 < (vf_group + 1) * vf_per_group &&
1358 vf_index1 < num_vfs;
1361 rc = opal_pci_set_peltv(phb->opal_id,
1362 pdn->offset + vf_index,
1363 pdn->offset + vf_index1,
1364 OPAL_REMOVE_PE_FROM_DOMAIN);
1367 dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
1369 pdn->offset + vf_index1, rc);
1373 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1374 if (pe->parent_dev != pdev)
1377 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1379 /* Remove from list */
1380 mutex_lock(&phb->ioda.pe_list_mutex);
1381 list_del(&pe->list);
1382 mutex_unlock(&phb->ioda.pe_list_mutex);
1384 pnv_ioda_deconfigure_pe(phb, pe);
1386 pnv_ioda_free_pe(phb, pe->pe_number);
1390 void pnv_pci_sriov_disable(struct pci_dev *pdev)
1392 struct pci_bus *bus;
1393 struct pci_controller *hose;
1394 struct pnv_phb *phb;
1396 struct pci_sriov *iov;
1400 hose = pci_bus_to_host(bus);
1401 phb = hose->private_data;
1402 pdn = pci_get_pdn(pdev);
1404 num_vfs = pdn->num_vfs;
1406 /* Release VF PEs */
1407 pnv_ioda_release_vf_PE(pdev, num_vfs);
1409 if (phb->type == PNV_PHB_IODA2) {
1410 if (pdn->m64_per_iov == 1)
1411 pnv_pci_vf_resource_shift(pdev, -pdn->offset);
1413 /* Release M64 windows */
1414 pnv_pci_vf_release_m64(pdev);
1416 /* Release PE numbers */
1417 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1422 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1423 struct pnv_ioda_pe *pe);
1424 static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1426 struct pci_bus *bus;
1427 struct pci_controller *hose;
1428 struct pnv_phb *phb;
1429 struct pnv_ioda_pe *pe;
1436 hose = pci_bus_to_host(bus);
1437 phb = hose->private_data;
1438 pdn = pci_get_pdn(pdev);
1440 if (!pdev->is_physfn)
1443 /* Reserve PE for each VF */
1444 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1445 pe_num = pdn->offset + vf_index;
1447 pe = &phb->ioda.pe_array[pe_num];
1448 pe->pe_number = pe_num;
1450 pe->flags = PNV_IODA_PE_VF;
1452 pe->parent_dev = pdev;
1454 pe->mve_number = -1;
1455 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1456 pci_iov_virtfn_devfn(pdev, vf_index);
1458 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1459 hose->global_number, pdev->bus->number,
1460 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1461 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1463 if (pnv_ioda_configure_pe(phb, pe)) {
1464 /* XXX What do we do here ? */
1466 pnv_ioda_free_pe(phb, pe_num);
1471 /* Put PE to the list */
1472 mutex_lock(&phb->ioda.pe_list_mutex);
1473 list_add_tail(&pe->list, &phb->ioda.pe_list);
1474 mutex_unlock(&phb->ioda.pe_list_mutex);
1476 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1479 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1484 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1486 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
1487 for (vf_index = vf_group * vf_per_group;
1488 vf_index < (vf_group + 1) * vf_per_group &&
1491 for (vf_index1 = vf_group * vf_per_group;
1492 vf_index1 < (vf_group + 1) * vf_per_group &&
1493 vf_index1 < num_vfs;
1496 rc = opal_pci_set_peltv(phb->opal_id,
1497 pdn->offset + vf_index,
1498 pdn->offset + vf_index1,
1499 OPAL_ADD_PE_TO_DOMAIN);
1502 dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
1504 pdn->offset + vf_index1, rc);
1511 int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1513 struct pci_bus *bus;
1514 struct pci_controller *hose;
1515 struct pnv_phb *phb;
1520 hose = pci_bus_to_host(bus);
1521 phb = hose->private_data;
1522 pdn = pci_get_pdn(pdev);
1524 if (phb->type == PNV_PHB_IODA2) {
1525 /* Calculate available PE for required VFs */
1526 mutex_lock(&phb->ioda.pe_alloc_mutex);
1527 pdn->offset = bitmap_find_next_zero_area(
1528 phb->ioda.pe_alloc, phb->ioda.total_pe,
1530 if (pdn->offset >= phb->ioda.total_pe) {
1531 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1532 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1536 bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1537 pdn->num_vfs = num_vfs;
1538 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1540 /* Assign M64 window accordingly */
1541 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1543 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1548 * When using one M64 BAR to map one IOV BAR, we need to shift
1549 * the IOV BAR according to the PE# allocated to the VFs.
1550 * Otherwise, the PE# for the VF will conflict with others.
1552 if (pdn->m64_per_iov == 1) {
1553 ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
1560 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1565 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1571 int pcibios_sriov_disable(struct pci_dev *pdev)
1573 pnv_pci_sriov_disable(pdev);
1575 /* Release PCI data */
1576 remove_dev_pci_data(pdev);
1580 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1582 /* Allocate PCI data */
1583 add_dev_pci_data(pdev);
1585 pnv_pci_sriov_enable(pdev, num_vfs);
1588 #endif /* CONFIG_PCI_IOV */
1590 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
1592 struct pci_dn *pdn = pci_get_pdn(pdev);
1593 struct pnv_ioda_pe *pe;
1596 * The function can be called while the PE#
1597 * hasn't been assigned. Do nothing for the
1600 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1603 pe = &phb->ioda.pe_array[pdn->pe_number];
1604 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1605 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1607 * Note: iommu_add_device() will fail here as
1608 * for physical PE: the device is already added by now;
1609 * for virtual PE: sysfs entries are not ready yet and
1610 * tce_iommu_bus_notifier will add the device to a group later.
1614 static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
1616 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1617 struct pnv_phb *phb = hose->private_data;
1618 struct pci_dn *pdn = pci_get_pdn(pdev);
1619 struct pnv_ioda_pe *pe;
1621 bool bypass = false;
1623 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1626 pe = &phb->ioda.pe_array[pdn->pe_number];
1627 if (pe->tce_bypass_enabled) {
1628 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1629 bypass = (dma_mask >= top);
1633 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1634 set_dma_ops(&pdev->dev, &dma_direct_ops);
1635 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1637 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1638 set_dma_ops(&pdev->dev, &dma_iommu_ops);
1639 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1641 *pdev->dev.dma_mask = dma_mask;
1645 static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
1646 struct pci_dev *pdev)
1648 struct pci_dn *pdn = pci_get_pdn(pdev);
1649 struct pnv_ioda_pe *pe;
1652 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1655 pe = &phb->ioda.pe_array[pdn->pe_number];
1656 if (!pe->tce_bypass_enabled)
1657 return __dma_get_required_mask(&pdev->dev);
1660 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1661 mask = 1ULL << (fls64(end) - 1);
1667 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1668 struct pci_bus *bus)
1670 struct pci_dev *dev;
1672 list_for_each_entry(dev, &bus->devices, bus_list) {
1673 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1674 iommu_add_device(&dev->dev);
1676 if (dev->subordinate)
1677 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1681 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
1682 unsigned long index, unsigned long npages, bool rm)
1684 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1685 &tbl->it_group_list, struct iommu_table_group_link,
1687 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1688 struct pnv_ioda_pe, table_group);
1689 __be64 __iomem *invalidate = rm ?
1690 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
1691 pe->phb->ioda.tce_inval_reg;
1692 unsigned long start, end, inc;
1693 const unsigned shift = tbl->it_page_shift;
1695 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1696 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1699 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
1700 if (tbl->it_busno) {
1703 inc = 128ull << shift;
1704 start |= tbl->it_busno;
1705 end |= tbl->it_busno;
1706 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
1707 /* p7ioc-style invalidation, 2 TCEs per write */
1708 start |= (1ull << 63);
1709 end |= (1ull << 63);
1712 /* Default (older HW) */
1716 end |= inc - 1; /* round up end to be different than start */
1718 mb(); /* Ensure above stores are visible */
1719 while (start <= end) {
1721 __raw_rm_writeq(cpu_to_be64(start), invalidate);
1723 __raw_writeq(cpu_to_be64(start), invalidate);
1728 * The iommu layer will do another mb() for us on build()
1729 * and we don't care on free()
1733 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1734 long npages, unsigned long uaddr,
1735 enum dma_data_direction direction,
1736 struct dma_attrs *attrs)
1738 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1741 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1742 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1747 #ifdef CONFIG_IOMMU_API
1748 static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
1749 unsigned long *hpa, enum dma_data_direction *direction)
1751 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1753 if (!ret && (tbl->it_type &
1754 (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
1755 pnv_pci_ioda1_tce_invalidate(tbl, index, 1, false);
1761 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1764 pnv_tce_free(tbl, index, npages);
1766 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1767 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1770 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1771 .set = pnv_ioda1_tce_build,
1772 #ifdef CONFIG_IOMMU_API
1773 .exchange = pnv_ioda1_tce_xchg,
1775 .clear = pnv_ioda1_tce_free,
1779 static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
1781 /* 01xb - invalidate TCEs that match the specified PE# */
1782 unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
1783 struct pnv_phb *phb = pe->phb;
1785 if (!phb->ioda.tce_inval_reg)
1788 mb(); /* Ensure above stores are visible */
1789 __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
1792 static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
1793 __be64 __iomem *invalidate, unsigned shift,
1794 unsigned long index, unsigned long npages)
1796 unsigned long start, end, inc;
1798 /* We'll invalidate DMA address in PE scope */
1799 start = 0x2ull << 60;
1800 start |= (pe_number & 0xFF);
1803 /* Figure out the start, end and step */
1804 start |= (index << shift);
1805 end |= ((index + npages - 1) << shift);
1806 inc = (0x1ull << shift);
1809 while (start <= end) {
1811 __raw_rm_writeq(cpu_to_be64(start), invalidate);
1813 __raw_writeq(cpu_to_be64(start), invalidate);
1818 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1819 unsigned long index, unsigned long npages, bool rm)
1821 struct iommu_table_group_link *tgl;
1823 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
1824 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1825 struct pnv_ioda_pe, table_group);
1826 __be64 __iomem *invalidate = rm ?
1827 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
1828 pe->phb->ioda.tce_inval_reg;
1830 pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
1831 invalidate, tbl->it_page_shift,
1836 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1837 long npages, unsigned long uaddr,
1838 enum dma_data_direction direction,
1839 struct dma_attrs *attrs)
1841 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1844 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1845 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1850 #ifdef CONFIG_IOMMU_API
1851 static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
1852 unsigned long *hpa, enum dma_data_direction *direction)
1854 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1856 if (!ret && (tbl->it_type &
1857 (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
1858 pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
1864 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1867 pnv_tce_free(tbl, index, npages);
1869 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1870 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1873 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
1874 .set = pnv_ioda2_tce_build,
1875 #ifdef CONFIG_IOMMU_API
1876 .exchange = pnv_ioda2_tce_xchg,
1878 .clear = pnv_ioda2_tce_free,
1882 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1883 struct pnv_ioda_pe *pe, unsigned int base,
1887 struct page *tce_mem = NULL;
1888 struct iommu_table *tbl;
1893 /* XXX FIXME: Handle 64-bit only DMA devices */
1894 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1895 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1897 /* We shouldn't already have a 32-bit DMA associated */
1898 if (WARN_ON(pe->tce32_seg >= 0))
1901 tbl = pnv_pci_table_alloc(phb->hose->node);
1902 iommu_register_group(&pe->table_group, phb->hose->global_number,
1904 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
1906 /* Grab a 32-bit TCE table */
1907 pe->tce32_seg = base;
1908 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1909 (base << 28), ((base + segs) << 28) - 1);
1911 /* XXX Currently, we allocate one big contiguous table for the
1912 * TCEs. We only really need one chunk per 256M of TCE space
1913 * (ie per segment) but that's an optimization for later, it
1914 * requires some added smarts with our get/put_tce implementation
1916 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1917 get_order(TCE32_TABLE_SIZE * segs));
1919 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1922 addr = page_address(tce_mem);
1923 memset(addr, 0, TCE32_TABLE_SIZE * segs);
1926 for (i = 0; i < segs; i++) {
1927 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1930 __pa(addr) + TCE32_TABLE_SIZE * i,
1931 TCE32_TABLE_SIZE, 0x1000);
1933 pe_err(pe, " Failed to configure 32-bit TCE table,"
1939 /* Setup linux iommu table */
1940 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
1941 base << 28, IOMMU_PAGE_SHIFT_4K);
1943 /* OPAL variant of P7IOC SW invalidated TCEs */
1944 if (phb->ioda.tce_inval_reg)
1945 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
1946 TCE_PCI_SWINV_FREE |
1947 TCE_PCI_SWINV_PAIR);
1949 tbl->it_ops = &pnv_ioda1_iommu_ops;
1950 iommu_init_table(tbl, phb->hose->node);
1952 if (pe->flags & PNV_IODA_PE_DEV) {
1954 * Setting table base here only for carrying iommu_group
1955 * further down to let iommu_add_device() do the job.
1956 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
1958 set_iommu_table_base(&pe->pdev->dev, tbl);
1959 iommu_add_device(&pe->pdev->dev);
1960 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
1961 pnv_ioda_setup_bus_dma(pe, pe->pbus);
1965 /* XXX Failure: Try to fallback to 64-bit only ? */
1966 if (pe->tce32_seg >= 0)
1969 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
1971 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1972 iommu_free_table(tbl, "pnv");
1976 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
1977 int num, struct iommu_table *tbl)
1979 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1981 struct pnv_phb *phb = pe->phb;
1983 const unsigned long size = tbl->it_indirect_levels ?
1984 tbl->it_level_size : tbl->it_size;
1985 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
1986 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
1988 pe_info(pe, "Setting up window %llx..%llx pg=%x\n",
1989 start_addr, start_addr + win_size - 1,
1990 IOMMU_PAGE_SIZE(tbl));
1993 * Map TCE table through TVT. The TVE index is the PE number
1994 * shifted by 1 bit for 32-bits DMA space.
1996 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1999 tbl->it_indirect_levels + 1,
2002 IOMMU_PAGE_SIZE(tbl));
2004 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
2008 pnv_pci_link_table_and_group(phb->hose->node, num,
2009 tbl, &pe->table_group);
2010 pnv_pci_ioda2_tce_invalidate_entire(pe);
2015 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
2017 uint16_t window_id = (pe->pe_number << 1 ) + 1;
2020 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2022 phys_addr_t top = memblock_end_of_DRAM();
2024 top = roundup_pow_of_two(top);
2025 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2028 pe->tce_bypass_base,
2031 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2034 pe->tce_bypass_base,
2038 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2040 pe->tce_bypass_enabled = enable;
2043 #ifdef CONFIG_IOMMU_API
2044 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2046 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2049 iommu_take_ownership(table_group->tables[0]);
2050 pnv_pci_ioda2_set_bypass(pe, false);
2053 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2055 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2058 iommu_release_ownership(table_group->tables[0]);
2059 pnv_pci_ioda2_set_bypass(pe, true);
2062 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
2063 .take_ownership = pnv_ioda2_take_ownership,
2064 .release_ownership = pnv_ioda2_release_ownership,
2068 static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2070 const __be64 *swinvp;
2072 /* OPAL variant of PHB3 invalidated TCEs */
2073 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
2077 phb->ioda.tce_inval_reg_phys = be64_to_cpup(swinvp);
2078 phb->ioda.tce_inval_reg = ioremap(phb->ioda.tce_inval_reg_phys, 8);
2081 static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2082 unsigned levels, unsigned long limit,
2083 unsigned long *current_offset)
2085 struct page *tce_mem = NULL;
2087 unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
2088 unsigned long allocated = 1UL << (order + PAGE_SHIFT);
2089 unsigned entries = 1UL << (shift - 3);
2092 tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
2094 pr_err("Failed to allocate a TCE memory, order=%d\n", order);
2097 addr = page_address(tce_mem);
2098 memset(addr, 0, allocated);
2102 *current_offset += allocated;
2106 for (i = 0; i < entries; ++i) {
2107 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2108 levels, limit, current_offset);
2112 addr[i] = cpu_to_be64(__pa(tmp) |
2113 TCE_PCI_READ | TCE_PCI_WRITE);
2115 if (*current_offset >= limit)
2122 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2123 unsigned long size, unsigned level);
2125 static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2126 __u32 page_shift, __u64 window_size, __u32 levels,
2127 struct iommu_table *tbl)
2130 unsigned long offset = 0, level_shift;
2131 const unsigned window_shift = ilog2(window_size);
2132 unsigned entries_shift = window_shift - page_shift;
2133 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
2134 const unsigned long tce_table_size = 1UL << table_shift;
2136 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
2139 if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
2142 /* Adjust direct table size from window_size and levels */
2143 entries_shift = (entries_shift + levels - 1) / levels;
2144 level_shift = entries_shift + 3;
2145 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2147 /* Allocate TCE table */
2148 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2149 levels, tce_table_size, &offset);
2151 /* addr==NULL means that the first level allocation failed */
2156 * First level was allocated but some lower level failed as
2157 * we did not allocate as much as we wanted,
2158 * release partially allocated table.
2160 if (offset < tce_table_size) {
2161 pnv_pci_ioda2_table_do_free_pages(addr,
2162 1ULL << (level_shift - 3), levels - 1);
2166 /* Setup linux iommu table */
2167 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
2169 tbl->it_level_size = 1ULL << (level_shift - 3);
2170 tbl->it_indirect_levels = levels - 1;
2172 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2173 window_size, tce_table_size, bus_offset);
2178 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2179 unsigned long size, unsigned level)
2181 const unsigned long addr_ul = (unsigned long) addr &
2182 ~(TCE_PCI_READ | TCE_PCI_WRITE);
2186 u64 *tmp = (u64 *) addr_ul;
2188 for (i = 0; i < size; ++i) {
2189 unsigned long hpa = be64_to_cpu(tmp[i]);
2191 if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
2194 pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
2199 free_pages(addr_ul, get_order(size << 3));
2202 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
2204 const unsigned long size = tbl->it_indirect_levels ?
2205 tbl->it_level_size : tbl->it_size;
2210 pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
2211 tbl->it_indirect_levels);
2214 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2215 struct pnv_ioda_pe *pe)
2217 struct iommu_table *tbl;
2220 /* We shouldn't already have a 32-bit DMA associated */
2221 if (WARN_ON(pe->tce32_seg >= 0))
2224 /* TVE #1 is selected by PCI address bit 59 */
2225 pe->tce_bypass_base = 1ull << 59;
2227 tbl = pnv_pci_table_alloc(phb->hose->node);
2228 iommu_register_group(&pe->table_group, phb->hose->global_number,
2230 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
2232 /* The PE will reserve all possible 32-bits space */
2234 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2235 phb->ioda.m32_pci_base);
2237 /* Setup linux iommu table */
2238 rc = pnv_pci_ioda2_table_alloc_pages(pe->phb->hose->node,
2239 0, IOMMU_PAGE_SHIFT_4K, phb->ioda.m32_pci_base,
2240 POWERNV_IOMMU_DEFAULT_LEVELS, tbl);
2242 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", rc);
2246 tbl->it_ops = &pnv_ioda2_iommu_ops;
2247 iommu_init_table(tbl, phb->hose->node);
2248 #ifdef CONFIG_IOMMU_API
2249 pe->table_group.ops = &pnv_pci_ioda2_ops;
2252 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2254 pe_err(pe, "Failed to configure 32-bit TCE table,"
2259 /* OPAL variant of PHB3 invalidated TCEs */
2260 if (phb->ioda.tce_inval_reg)
2261 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
2263 if (pe->flags & PNV_IODA_PE_DEV) {
2265 * Setting table base here only for carrying iommu_group
2266 * further down to let iommu_add_device() do the job.
2267 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2269 set_iommu_table_base(&pe->pdev->dev, tbl);
2270 iommu_add_device(&pe->pdev->dev);
2271 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2272 pnv_ioda_setup_bus_dma(pe, pe->pbus);
2274 /* Also create a bypass window */
2275 if (!pnv_iommu_bypass_disabled)
2276 pnv_pci_ioda2_set_bypass(pe, true);
2280 if (pe->tce32_seg >= 0)
2283 pnv_pci_ioda2_table_free_pages(tbl);
2284 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2285 iommu_free_table(tbl, "pnv");
2289 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
2291 struct pci_controller *hose = phb->hose;
2292 unsigned int residual, remaining, segs, tw, base;
2293 struct pnv_ioda_pe *pe;
2295 /* If we have more PE# than segments available, hand out one
2296 * per PE until we run out and let the rest fail. If not,
2297 * then we assign at least one segment per PE, plus more based
2298 * on the amount of devices under that PE
2300 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
2303 residual = phb->ioda.tce32_count -
2304 phb->ioda.dma_pe_count;
2306 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
2307 hose->global_number, phb->ioda.tce32_count);
2308 pr_info("PCI: %d PE# for a total weight of %d\n",
2309 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
2311 pnv_pci_ioda_setup_opal_tce_kill(phb);
2313 /* Walk our PE list and configure their DMA segments, hand them
2314 * out one base segment plus any residual segments based on
2317 remaining = phb->ioda.tce32_count;
2318 tw = phb->ioda.dma_weight;
2320 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
2321 if (!pe->dma_weight)
2324 pe_warn(pe, "No DMA32 resources available\n");
2329 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
2330 if (segs > remaining)
2335 * For IODA2 compliant PHB3, we needn't care about the weight.
2336 * The all available 32-bits DMA space will be assigned to
2339 if (phb->type == PNV_PHB_IODA1) {
2340 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
2341 pe->dma_weight, segs);
2342 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
2344 pe_info(pe, "Assign DMA32 space\n");
2346 pnv_pci_ioda2_setup_dma_pe(phb, pe);
2354 #ifdef CONFIG_PCI_MSI
2355 static void pnv_ioda2_msi_eoi(struct irq_data *d)
2357 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2358 struct irq_chip *chip = irq_data_get_irq_chip(d);
2359 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2363 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
2370 static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2372 struct irq_data *idata;
2373 struct irq_chip *ichip;
2375 if (phb->type != PNV_PHB_IODA2)
2378 if (!phb->ioda.irq_chip_init) {
2380 * First time we setup an MSI IRQ, we need to setup the
2381 * corresponding IRQ chip to route correctly.
2383 idata = irq_get_irq_data(virq);
2384 ichip = irq_data_get_irq_chip(idata);
2385 phb->ioda.irq_chip_init = 1;
2386 phb->ioda.irq_chip = *ichip;
2387 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2389 irq_set_chip(virq, &phb->ioda.irq_chip);
2392 #ifdef CONFIG_CXL_BASE
2394 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
2396 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2398 return of_node_get(hose->dn);
2400 EXPORT_SYMBOL(pnv_pci_get_phb_node);
2402 int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
2404 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2405 struct pnv_phb *phb = hose->private_data;
2406 struct pnv_ioda_pe *pe;
2409 pe = pnv_ioda_get_pe(dev);
2413 pe_info(pe, "Switching PHB to CXL\n");
2415 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
2417 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
2421 EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
2423 /* Find PHB for cxl dev and allocate MSI hwirqs?
2424 * Returns the absolute hardware IRQ number
2426 int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
2428 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2429 struct pnv_phb *phb = hose->private_data;
2430 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
2433 dev_warn(&dev->dev, "Failed to find a free MSI\n");
2437 return phb->msi_base + hwirq;
2439 EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
2441 void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
2443 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2444 struct pnv_phb *phb = hose->private_data;
2446 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
2448 EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
2450 void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
2451 struct pci_dev *dev)
2453 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2454 struct pnv_phb *phb = hose->private_data;
2457 for (i = 1; i < CXL_IRQ_RANGES; i++) {
2458 if (!irqs->range[i])
2460 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
2463 hwirq = irqs->offset[i] - phb->msi_base;
2464 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
2468 EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
2470 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
2471 struct pci_dev *dev, int num)
2473 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2474 struct pnv_phb *phb = hose->private_data;
2477 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
2479 /* 0 is reserved for the multiplexed PSL DSI interrupt */
2480 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
2483 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
2491 irqs->offset[i] = phb->msi_base + hwirq;
2492 irqs->range[i] = try;
2493 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
2494 i, irqs->offset[i], irqs->range[i]);
2502 pnv_cxl_release_hwirq_ranges(irqs, dev);
2505 EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
2507 int pnv_cxl_get_irq_count(struct pci_dev *dev)
2509 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2510 struct pnv_phb *phb = hose->private_data;
2512 return phb->msi_bmp.irq_count;
2514 EXPORT_SYMBOL(pnv_cxl_get_irq_count);
2516 int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
2519 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2520 struct pnv_phb *phb = hose->private_data;
2521 unsigned int xive_num = hwirq - phb->msi_base;
2522 struct pnv_ioda_pe *pe;
2525 if (!(pe = pnv_ioda_get_pe(dev)))
2528 /* Assign XIVE to PE */
2529 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2531 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
2532 "hwirq 0x%x XIVE 0x%x PE\n",
2533 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
2536 set_msi_irq_chip(phb, virq);
2540 EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
2543 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2544 unsigned int hwirq, unsigned int virq,
2545 unsigned int is_64, struct msi_msg *msg)
2547 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2548 unsigned int xive_num = hwirq - phb->msi_base;
2552 /* No PE assigned ? bail out ... no MSI for you ! */
2556 /* Check if we have an MVE */
2557 if (pe->mve_number < 0)
2560 /* Force 32-bit MSI on some broken devices */
2561 if (dev->no_64bit_msi)
2564 /* Assign XIVE to PE */
2565 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2567 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2568 pci_name(dev), rc, xive_num);
2575 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2578 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2582 msg->address_hi = be64_to_cpu(addr64) >> 32;
2583 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2587 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2590 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2594 msg->address_hi = 0;
2595 msg->address_lo = be32_to_cpu(addr32);
2597 msg->data = be32_to_cpu(data);
2599 set_msi_irq_chip(phb, virq);
2601 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2602 " address=%x_%08x data=%x PE# %d\n",
2603 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2604 msg->address_hi, msg->address_lo, data, pe->pe_number);
2609 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2612 const __be32 *prop = of_get_property(phb->hose->dn,
2613 "ibm,opal-msi-ranges", NULL);
2616 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2621 phb->msi_base = be32_to_cpup(prop);
2622 count = be32_to_cpup(prop + 1);
2623 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2624 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2625 phb->hose->global_number);
2629 phb->msi_setup = pnv_pci_ioda_msi_setup;
2630 phb->msi32_support = 1;
2631 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2632 count, phb->msi_base);
2635 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2636 #endif /* CONFIG_PCI_MSI */
2638 #ifdef CONFIG_PCI_IOV
2639 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2641 struct pci_controller *hose;
2642 struct pnv_phb *phb;
2643 struct resource *res;
2645 resource_size_t size;
2649 if (!pdev->is_physfn || pdev->is_added)
2652 hose = pci_bus_to_host(pdev->bus);
2653 phb = hose->private_data;
2655 pdn = pci_get_pdn(pdev);
2656 pdn->vfs_expanded = 0;
2658 total_vfs = pci_sriov_get_totalvfs(pdev);
2659 pdn->m64_per_iov = 1;
2660 mul = phb->ioda.total_pe;
2662 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2663 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2664 if (!res->flags || res->parent)
2666 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2667 dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
2672 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2674 /* bigger than 64M */
2675 if (size > (1 << 26)) {
2676 dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
2678 pdn->m64_per_iov = M64_PER_IOV;
2679 mul = roundup_pow_of_two(total_vfs);
2684 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2685 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2686 if (!res->flags || res->parent)
2688 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2689 dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
2694 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2695 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2696 res->end = res->start + size * mul - 1;
2697 dev_dbg(&pdev->dev, " %pR\n", res);
2698 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
2701 pdn->vfs_expanded = mul;
2703 #endif /* CONFIG_PCI_IOV */
2706 * This function is supposed to be called on basis of PE from top
2707 * to bottom style. So the the I/O or MMIO segment assigned to
2708 * parent PE could be overrided by its child PEs if necessary.
2710 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2711 struct pnv_ioda_pe *pe)
2713 struct pnv_phb *phb = hose->private_data;
2714 struct pci_bus_region region;
2715 struct resource *res;
2720 * NOTE: We only care PCI bus based PE for now. For PCI
2721 * device based PE, for example SRIOV sensitive VF should
2722 * be figured out later.
2724 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2726 pci_bus_for_each_resource(pe->pbus, res, i) {
2727 if (!res || !res->flags ||
2728 res->start > res->end)
2731 if (res->flags & IORESOURCE_IO) {
2732 region.start = res->start - phb->ioda.io_pci_base;
2733 region.end = res->end - phb->ioda.io_pci_base;
2734 index = region.start / phb->ioda.io_segsize;
2736 while (index < phb->ioda.total_pe &&
2737 region.start <= region.end) {
2738 phb->ioda.io_segmap[index] = pe->pe_number;
2739 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2740 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2741 if (rc != OPAL_SUCCESS) {
2742 pr_err("%s: OPAL error %d when mapping IO "
2743 "segment #%d to PE#%d\n",
2744 __func__, rc, index, pe->pe_number);
2748 region.start += phb->ioda.io_segsize;
2751 } else if ((res->flags & IORESOURCE_MEM) &&
2752 !pnv_pci_is_mem_pref_64(res->flags)) {
2753 region.start = res->start -
2754 hose->mem_offset[0] -
2755 phb->ioda.m32_pci_base;
2756 region.end = res->end -
2757 hose->mem_offset[0] -
2758 phb->ioda.m32_pci_base;
2759 index = region.start / phb->ioda.m32_segsize;
2761 while (index < phb->ioda.total_pe &&
2762 region.start <= region.end) {
2763 phb->ioda.m32_segmap[index] = pe->pe_number;
2764 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2765 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2766 if (rc != OPAL_SUCCESS) {
2767 pr_err("%s: OPAL error %d when mapping M32 "
2768 "segment#%d to PE#%d",
2769 __func__, rc, index, pe->pe_number);
2773 region.start += phb->ioda.m32_segsize;
2780 static void pnv_pci_ioda_setup_seg(void)
2782 struct pci_controller *tmp, *hose;
2783 struct pnv_phb *phb;
2784 struct pnv_ioda_pe *pe;
2786 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2787 phb = hose->private_data;
2788 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2789 pnv_ioda_setup_pe_seg(hose, pe);
2794 static void pnv_pci_ioda_setup_DMA(void)
2796 struct pci_controller *hose, *tmp;
2797 struct pnv_phb *phb;
2799 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2800 pnv_ioda_setup_dma(hose->private_data);
2802 /* Mark the PHB initialization done */
2803 phb = hose->private_data;
2804 phb->initialized = 1;
2808 static void pnv_pci_ioda_create_dbgfs(void)
2810 #ifdef CONFIG_DEBUG_FS
2811 struct pci_controller *hose, *tmp;
2812 struct pnv_phb *phb;
2815 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2816 phb = hose->private_data;
2818 sprintf(name, "PCI%04x", hose->global_number);
2819 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
2821 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
2822 __func__, hose->global_number);
2824 #endif /* CONFIG_DEBUG_FS */
2827 static void pnv_pci_ioda_fixup(void)
2829 pnv_pci_ioda_setup_PEs();
2830 pnv_pci_ioda_setup_seg();
2831 pnv_pci_ioda_setup_DMA();
2833 pnv_pci_ioda_create_dbgfs();
2837 eeh_addr_cache_build();
2842 * Returns the alignment for I/O or memory windows for P2P
2843 * bridges. That actually depends on how PEs are segmented.
2844 * For now, we return I/O or M32 segment size for PE sensitive
2845 * P2P bridges. Otherwise, the default values (4KiB for I/O,
2846 * 1MiB for memory) will be returned.
2848 * The current PCI bus might be put into one PE, which was
2849 * create against the parent PCI bridge. For that case, we
2850 * needn't enlarge the alignment so that we can save some
2853 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2856 struct pci_dev *bridge;
2857 struct pci_controller *hose = pci_bus_to_host(bus);
2858 struct pnv_phb *phb = hose->private_data;
2859 int num_pci_bridges = 0;
2863 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2865 if (num_pci_bridges >= 2)
2869 bridge = bridge->bus->self;
2872 /* We fail back to M32 if M64 isn't supported */
2873 if (phb->ioda.m64_segsize &&
2874 pnv_pci_is_mem_pref_64(type))
2875 return phb->ioda.m64_segsize;
2876 if (type & IORESOURCE_MEM)
2877 return phb->ioda.m32_segsize;
2879 return phb->ioda.io_segsize;
2882 #ifdef CONFIG_PCI_IOV
2883 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
2886 struct pci_dn *pdn = pci_get_pdn(pdev);
2887 resource_size_t align, iov_align;
2889 iov_align = resource_size(&pdev->resource[resno]);
2893 align = pci_iov_resource_size(pdev, resno);
2894 if (pdn->vfs_expanded)
2895 return pdn->vfs_expanded * align;
2899 #endif /* CONFIG_PCI_IOV */
2901 /* Prevent enabling devices for which we couldn't properly
2904 static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
2906 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2907 struct pnv_phb *phb = hose->private_data;
2910 /* The function is probably called while the PEs have
2911 * not be created yet. For example, resource reassignment
2912 * during PCI probe period. We just skip the check if
2915 if (!phb->initialized)
2918 pdn = pci_get_pdn(dev);
2919 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2925 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
2928 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
2931 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
2933 struct pnv_phb *phb = hose->private_data;
2935 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
2939 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2940 .dma_dev_setup = pnv_pci_dma_dev_setup,
2941 #ifdef CONFIG_PCI_MSI
2942 .setup_msi_irqs = pnv_setup_msi_irqs,
2943 .teardown_msi_irqs = pnv_teardown_msi_irqs,
2945 .enable_device_hook = pnv_pci_enable_device_hook,
2946 .window_alignment = pnv_pci_window_alignment,
2947 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
2948 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
2949 .shutdown = pnv_pci_ioda_shutdown,
2952 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2953 u64 hub_id, int ioda_type)
2955 struct pci_controller *hose;
2956 struct pnv_phb *phb;
2957 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
2958 const __be64 *prop64;
2959 const __be32 *prop32;
2965 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
2967 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2969 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2972 phb_id = be64_to_cpup(prop64);
2973 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2975 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
2977 /* Allocate PCI controller */
2978 phb->hose = hose = pcibios_alloc_controller(np);
2980 pr_err(" Can't allocate PCI controller for %s\n",
2982 memblock_free(__pa(phb), sizeof(struct pnv_phb));
2986 spin_lock_init(&phb->lock);
2987 prop32 = of_get_property(np, "bus-range", &len);
2988 if (prop32 && len == 8) {
2989 hose->first_busno = be32_to_cpu(prop32[0]);
2990 hose->last_busno = be32_to_cpu(prop32[1]);
2992 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
2993 hose->first_busno = 0;
2994 hose->last_busno = 0xff;
2996 hose->private_data = phb;
2997 phb->hub_id = hub_id;
2998 phb->opal_id = phb_id;
2999 phb->type = ioda_type;
3000 mutex_init(&phb->ioda.pe_alloc_mutex);
3002 /* Detect specific models for error handling */
3003 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3004 phb->model = PNV_PHB_MODEL_P7IOC;
3005 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3006 phb->model = PNV_PHB_MODEL_PHB3;
3008 phb->model = PNV_PHB_MODEL_UNKNOWN;
3010 /* Parse 32-bit and IO ranges (if any) */
3011 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3014 phb->regs = of_iomap(np, 0);
3015 if (phb->regs == NULL)
3016 pr_err(" Failed to map registers !\n");
3018 /* Initialize more IODA stuff */
3019 phb->ioda.total_pe = 1;
3020 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3022 phb->ioda.total_pe = be32_to_cpup(prop32);
3023 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3025 phb->ioda.reserved_pe = be32_to_cpup(prop32);
3027 /* Parse 64-bit MMIO range */
3028 pnv_ioda_parse_m64_window(phb);
3030 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3031 /* FW Has already off top 64k of M32 space (MSI space) */
3032 phb->ioda.m32_size += 0x10000;
3034 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
3035 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3036 phb->ioda.io_size = hose->pci_io_size;
3037 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
3038 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3040 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3041 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
3043 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
3044 if (phb->type == PNV_PHB_IODA1) {
3046 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
3049 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
3050 aux = memblock_virt_alloc(size, 0);
3051 phb->ioda.pe_alloc = aux;
3052 phb->ioda.m32_segmap = aux + m32map_off;
3053 if (phb->type == PNV_PHB_IODA1)
3054 phb->ioda.io_segmap = aux + iomap_off;
3055 phb->ioda.pe_array = aux + pemap_off;
3056 set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
3058 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
3059 INIT_LIST_HEAD(&phb->ioda.pe_list);
3060 mutex_init(&phb->ioda.pe_list_mutex);
3062 /* Calculate how many 32-bit TCE segments we have */
3063 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
3065 #if 0 /* We should really do that ... */
3066 rc = opal_pci_set_phb_mem_window(opal->phb_id,
3069 starting_real_address,
3070 starting_pci_address,
3074 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3075 phb->ioda.total_pe, phb->ioda.reserved_pe,
3076 phb->ioda.m32_size, phb->ioda.m32_segsize);
3077 if (phb->ioda.m64_size)
3078 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
3079 phb->ioda.m64_size, phb->ioda.m64_segsize);
3080 if (phb->ioda.io_size)
3081 pr_info(" IO: 0x%x [segment=0x%x]\n",
3082 phb->ioda.io_size, phb->ioda.io_segsize);
3085 phb->hose->ops = &pnv_pci_ops;
3086 phb->get_pe_state = pnv_ioda_get_pe_state;
3087 phb->freeze_pe = pnv_ioda_freeze_pe;
3088 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3090 /* Setup RID -> PE mapping function */
3091 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
3094 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
3095 phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
3097 /* Setup MSI support */
3098 pnv_pci_init_ioda_msis(phb);
3101 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3102 * to let the PCI core do resource assignment. It's supposed
3103 * that the PCI core will do correct I/O and MMIO alignment
3104 * for the P2P bridge bars so that each PCI bus (excluding
3105 * the child P2P bridges) can form individual PE.
3107 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3108 hose->controller_ops = pnv_pci_ioda_controller_ops;
3110 #ifdef CONFIG_PCI_IOV
3111 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
3112 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3115 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3117 /* Reset IODA tables to a clean state */
3118 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3120 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
3122 /* If we're running in kdump kerenl, the previous kerenl never
3123 * shutdown PCI devices correctly. We already got IODA table
3124 * cleaned out. So we have to issue PHB reset to stop all PCI
3125 * transactions from previous kerenl.
3127 if (is_kdump_kernel()) {
3128 pr_info(" Issue PHB reset ...\n");
3129 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3130 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3133 /* Remove M64 resource if we can't configure it successfully */
3134 if (!phb->init_m64 || phb->init_m64(phb))
3135 hose->mem_resources[1].flags = 0;
3138 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3140 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3143 void __init pnv_pci_init_ioda_hub(struct device_node *np)
3145 struct device_node *phbn;
3146 const __be64 *prop64;
3149 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
3151 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3153 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3156 hub_id = be64_to_cpup(prop64);
3157 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3159 /* Count child PHBs */
3160 for_each_child_of_node(np, phbn) {
3161 /* Look for IODA1 PHBs */
3162 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3163 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);