2 * The file intends to implement the platform dependent EEH operations on
3 * powernv platform. Actually, the powernv was created in order to fully
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/msi.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/spinlock.h>
31 #include <asm/eeh_event.h>
32 #include <asm/firmware.h>
34 #include <asm/iommu.h>
35 #include <asm/machdep.h>
36 #include <asm/msi_bitmap.h>
38 #include <asm/ppc-pci.h>
39 #include <asm/pnv-pci.h>
44 static bool pnv_eeh_nb_init = false;
45 static int eeh_event_irq = -EINVAL;
47 static int pnv_eeh_init(void)
49 struct pci_controller *hose;
52 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
53 pr_warn("%s: OPAL is required !\n",
59 eeh_add_flag(EEH_PROBE_MODE_DEV);
62 * P7IOC blocks PCI config access to frozen PE, but PHB3
63 * doesn't do that. So we have to selectively enable I/O
64 * prior to collecting error log.
66 list_for_each_entry(hose, &hose_list, list_node) {
67 phb = hose->private_data;
69 if (phb->model == PNV_PHB_MODEL_P7IOC)
70 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
73 * PE#0 should be regarded as valid by EEH core
74 * if it's not the reserved one. Currently, we
75 * have the reserved PE#255 and PE#127 for PHB3
76 * and P7IOC separately. So we should regard
77 * PE#0 as valid for PHB3 and P7IOC.
79 if (phb->ioda.reserved_pe_idx != 0)
80 eeh_add_flag(EEH_VALID_PE_ZERO);
88 static irqreturn_t pnv_eeh_event(int irq, void *data)
91 * We simply send a special EEH event if EEH has been
92 * enabled. We don't care about EEH events until we've
93 * finished processing the outstanding ones. Event processing
94 * gets unmasked in next_error() if EEH is enabled.
96 disable_irq_nosync(irq);
99 eeh_send_failure_event(NULL);
104 #ifdef CONFIG_DEBUG_FS
105 static ssize_t pnv_eeh_ei_write(struct file *filp,
106 const char __user *user_buf,
107 size_t count, loff_t *ppos)
109 struct pci_controller *hose = filp->private_data;
110 struct eeh_dev *edev;
112 int pe_no, type, func;
113 unsigned long addr, mask;
117 if (!eeh_ops || !eeh_ops->err_inject)
120 /* Copy over argument buffer */
121 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
125 /* Retrieve parameters */
126 ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
127 &pe_no, &type, &func, &addr, &mask);
132 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
136 edev->pe_config_addr = pe_no;
137 pe = eeh_pe_get(edev);
142 /* Do error injection */
143 ret = eeh_ops->err_inject(pe, type, func, addr, mask);
144 return ret < 0 ? ret : count;
147 static const struct file_operations pnv_eeh_ei_fops = {
150 .write = pnv_eeh_ei_write,
153 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
155 struct pci_controller *hose = data;
156 struct pnv_phb *phb = hose->private_data;
158 out_be64(phb->regs + offset, val);
162 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
164 struct pci_controller *hose = data;
165 struct pnv_phb *phb = hose->private_data;
167 *val = in_be64(phb->regs + offset);
171 #define PNV_EEH_DBGFS_ENTRY(name, reg) \
172 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \
174 return pnv_eeh_dbgfs_set(data, reg, val); \
177 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \
179 return pnv_eeh_dbgfs_get(data, reg, val); \
182 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \
183 pnv_eeh_dbgfs_get_##name, \
184 pnv_eeh_dbgfs_set_##name, \
187 PNV_EEH_DBGFS_ENTRY(outb, 0xD10);
188 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90);
189 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
191 #endif /* CONFIG_DEBUG_FS */
194 * pnv_eeh_post_init - EEH platform dependent post initialization
196 * EEH platform dependent post initialization on powernv. When
197 * the function is called, the EEH PEs and devices should have
198 * been built. If the I/O cache staff has been built, EEH is
199 * ready to supply service.
201 static int pnv_eeh_post_init(void)
203 struct pci_controller *hose;
207 /* Register OPAL event notifier */
208 if (!pnv_eeh_nb_init) {
209 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
210 if (eeh_event_irq < 0) {
211 pr_err("%s: Can't register OPAL event interrupt (%d)\n",
212 __func__, eeh_event_irq);
213 return eeh_event_irq;
216 ret = request_irq(eeh_event_irq, pnv_eeh_event,
217 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
219 irq_dispose_mapping(eeh_event_irq);
220 pr_err("%s: Can't request OPAL event interrupt (%d)\n",
221 __func__, eeh_event_irq);
225 pnv_eeh_nb_init = true;
229 disable_irq(eeh_event_irq);
231 list_for_each_entry(hose, &hose_list, list_node) {
232 phb = hose->private_data;
235 * If EEH is enabled, we're going to rely on that.
236 * Otherwise, we restore to conventional mechanism
237 * to clear frozen PE during PCI config access.
240 phb->flags |= PNV_PHB_FLAG_EEH;
242 phb->flags &= ~PNV_PHB_FLAG_EEH;
244 /* Create debugfs entries */
245 #ifdef CONFIG_DEBUG_FS
246 if (phb->has_dbgfs || !phb->dbgfs)
250 debugfs_create_file("err_injct", 0200,
254 debugfs_create_file("err_injct_outbound", 0600,
256 &pnv_eeh_dbgfs_ops_outb);
257 debugfs_create_file("err_injct_inboundA", 0600,
259 &pnv_eeh_dbgfs_ops_inbA);
260 debugfs_create_file("err_injct_inboundB", 0600,
262 &pnv_eeh_dbgfs_ops_inbB);
263 #endif /* CONFIG_DEBUG_FS */
269 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
271 int pos = PCI_CAPABILITY_LIST;
272 int cnt = 48; /* Maximal number of capabilities */
278 /* Check if the device supports capabilities */
279 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
280 if (!(status & PCI_STATUS_CAP_LIST))
284 pnv_pci_cfg_read(pdn, pos, 1, &pos);
289 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
298 pos += PCI_CAP_LIST_NEXT;
304 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
306 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
308 int pos = 256, ttl = (4096 - 256) / 8;
310 if (!edev || !edev->pcie_cap)
312 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
318 if (PCI_EXT_CAP_ID(header) == cap && pos)
321 pos = PCI_EXT_CAP_NEXT(header);
325 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
333 * pnv_eeh_probe - Do probe on PCI device
334 * @pdn: PCI device node
337 * When EEH module is installed during system boot, all PCI devices
338 * are checked one by one to see if it supports EEH. The function
339 * is introduced for the purpose. By default, EEH has been enabled
340 * on all PCI devices. That's to say, we only need do necessary
341 * initialization on the corresponding eeh device and create PE
344 * It's notable that's unsafe to retrieve the EEH device through
345 * the corresponding PCI device. During the PCI device hotplug, which
346 * was possiblly triggered by EEH core, the binding between EEH device
347 * and the PCI device isn't built yet.
349 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
351 struct pci_controller *hose = pdn->phb;
352 struct pnv_phb *phb = hose->private_data;
353 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
358 * When probing the root bridge, which doesn't have any
359 * subordinate PCI devices. We don't have OF node for
360 * the root bridge. So it's not reasonable to continue
363 if (!edev || edev->pe)
366 /* Skip for PCI-ISA bridge */
367 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
370 /* Initialize eeh device */
371 edev->class_code = pdn->class_code;
372 edev->mode &= 0xFFFFFF00;
373 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
374 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
375 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF);
376 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
377 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
378 edev->mode |= EEH_DEV_BRIDGE;
379 if (edev->pcie_cap) {
380 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
382 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
383 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
384 edev->mode |= EEH_DEV_ROOT_PORT;
385 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
386 edev->mode |= EEH_DEV_DS_PORT;
390 edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
391 edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
394 ret = eeh_add_to_parent_pe(edev);
396 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
397 __func__, hose->global_number, pdn->busno,
398 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
403 * If the PE contains any one of following adapters, the
404 * PCI config space can't be accessed when dumping EEH log.
405 * Otherwise, we will run into fenced PHB caused by shortage
406 * of outbound credits in the adapter. The PCI config access
407 * should be blocked until PE reset. MMIO access is dropped
408 * by hardware certainly. In order to drop PCI config requests,
409 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
410 * will be checked in the backend for PE state retrival. If
411 * the PE becomes frozen for the first time and the flag has
412 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
413 * that PE to block its config space.
415 * Broadcom Austin 4-ports NICs (14e4:1657)
416 * Broadcom Shiner 4-ports 1G NICs (14e4:168a)
417 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
419 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
420 pdn->device_id == 0x1657) ||
421 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
422 pdn->device_id == 0x168a) ||
423 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
424 pdn->device_id == 0x168e))
425 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
428 * Cache the PE primary bus, which can't be fetched when
429 * full hotplug is in progress. In that case, all child
430 * PCI devices of the PE are expected to be removed prior
433 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
434 edev->pe->bus = pci_find_bus(hose->global_number,
437 edev->pe->state |= EEH_PE_PRI_BUS;
441 * Enable EEH explicitly so that we will do EEH check
442 * while accessing I/O stuff
444 eeh_add_flag(EEH_ENABLED);
446 /* Save memory bars */
453 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
455 * @option: operation to be issued
457 * The function is used to control the EEH functionality globally.
458 * Currently, following options are support according to PAPR:
459 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
461 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
463 struct pci_controller *hose = pe->phb;
464 struct pnv_phb *phb = hose->private_data;
465 bool freeze_pe = false;
470 case EEH_OPT_DISABLE:
474 case EEH_OPT_THAW_MMIO:
475 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
477 case EEH_OPT_THAW_DMA:
478 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
480 case EEH_OPT_FREEZE_PE:
482 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
485 pr_warn("%s: Invalid option %d\n", __func__, option);
489 /* Freeze master and slave PEs if PHB supports compound PEs */
491 if (phb->freeze_pe) {
492 phb->freeze_pe(phb, pe->addr);
496 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt);
497 if (rc != OPAL_SUCCESS) {
498 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
499 __func__, rc, phb->hose->global_number,
507 /* Unfreeze master and slave PEs if PHB supports */
508 if (phb->unfreeze_pe)
509 return phb->unfreeze_pe(phb, pe->addr, opt);
511 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt);
512 if (rc != OPAL_SUCCESS) {
513 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
514 __func__, rc, option, phb->hose->global_number,
523 * pnv_eeh_get_pe_addr - Retrieve PE address
526 * Retrieve the PE address according to the given tranditional
527 * PCI BDF (Bus/Device/Function) address.
529 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
534 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
536 struct pnv_phb *phb = pe->phb->private_data;
539 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
540 PNV_PCI_DIAG_BUF_SIZE);
541 if (rc != OPAL_SUCCESS)
542 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
543 __func__, rc, pe->phb->global_number);
546 static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
548 struct pnv_phb *phb = pe->phb->private_data;
554 rc = opal_pci_eeh_freeze_status(phb->opal_id,
559 if (rc != OPAL_SUCCESS) {
560 pr_warn("%s: Failure %lld getting PHB#%x state\n",
561 __func__, rc, phb->hose->global_number);
562 return EEH_STATE_NOT_SUPPORT;
566 * Check PHB state. If the PHB is frozen for the
567 * first time, to dump the PHB diag-data.
569 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
570 result = (EEH_STATE_MMIO_ACTIVE |
571 EEH_STATE_DMA_ACTIVE |
572 EEH_STATE_MMIO_ENABLED |
573 EEH_STATE_DMA_ENABLED);
574 } else if (!(pe->state & EEH_PE_ISOLATED)) {
575 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
576 pnv_eeh_get_phb_diag(pe);
578 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
579 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
585 static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
587 struct pnv_phb *phb = pe->phb->private_data;
594 * We don't clobber hardware frozen state until PE
595 * reset is completed. In order to keep EEH core
596 * moving forward, we have to return operational
597 * state during PE reset.
599 if (pe->state & EEH_PE_RESET) {
600 result = (EEH_STATE_MMIO_ACTIVE |
601 EEH_STATE_DMA_ACTIVE |
602 EEH_STATE_MMIO_ENABLED |
603 EEH_STATE_DMA_ENABLED);
608 * Fetch PE state from hardware. If the PHB
609 * supports compound PE, let it handle that.
611 if (phb->get_pe_state) {
612 fstate = phb->get_pe_state(phb, pe->addr);
614 rc = opal_pci_eeh_freeze_status(phb->opal_id,
619 if (rc != OPAL_SUCCESS) {
620 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
621 __func__, rc, phb->hose->global_number,
623 return EEH_STATE_NOT_SUPPORT;
627 /* Figure out state */
629 case OPAL_EEH_STOPPED_NOT_FROZEN:
630 result = (EEH_STATE_MMIO_ACTIVE |
631 EEH_STATE_DMA_ACTIVE |
632 EEH_STATE_MMIO_ENABLED |
633 EEH_STATE_DMA_ENABLED);
635 case OPAL_EEH_STOPPED_MMIO_FREEZE:
636 result = (EEH_STATE_DMA_ACTIVE |
637 EEH_STATE_DMA_ENABLED);
639 case OPAL_EEH_STOPPED_DMA_FREEZE:
640 result = (EEH_STATE_MMIO_ACTIVE |
641 EEH_STATE_MMIO_ENABLED);
643 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
646 case OPAL_EEH_STOPPED_RESET:
647 result = EEH_STATE_RESET_ACTIVE;
649 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
650 result = EEH_STATE_UNAVAILABLE;
652 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
653 result = EEH_STATE_NOT_SUPPORT;
656 result = EEH_STATE_NOT_SUPPORT;
657 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
658 __func__, phb->hose->global_number,
663 * If PHB supports compound PE, to freeze all
664 * slave PEs for consistency.
666 * If the PE is switching to frozen state for the
667 * first time, to dump the PHB diag-data.
669 if (!(result & EEH_STATE_NOT_SUPPORT) &&
670 !(result & EEH_STATE_UNAVAILABLE) &&
671 !(result & EEH_STATE_MMIO_ACTIVE) &&
672 !(result & EEH_STATE_DMA_ACTIVE) &&
673 !(pe->state & EEH_PE_ISOLATED)) {
675 phb->freeze_pe(phb, pe->addr);
677 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
678 pnv_eeh_get_phb_diag(pe);
680 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
681 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
688 * pnv_eeh_get_state - Retrieve PE state
690 * @delay: delay while PE state is temporarily unavailable
692 * Retrieve the state of the specified PE. For IODA-compitable
693 * platform, it should be retrieved from IODA table. Therefore,
694 * we prefer passing down to hardware implementation to handle
697 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
701 if (pe->type & EEH_PE_PHB)
702 ret = pnv_eeh_get_phb_state(pe);
704 ret = pnv_eeh_get_pe_state(pe);
710 * If the PE state is temporarily unavailable,
711 * to inform the EEH core delay for default
715 if (ret & EEH_STATE_UNAVAILABLE)
721 static s64 pnv_eeh_poll(unsigned long id)
723 s64 rc = OPAL_HARDWARE;
726 rc = opal_pci_poll(id);
730 if (system_state < SYSTEM_RUNNING)
739 int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
741 struct pnv_phb *phb = hose->private_data;
742 s64 rc = OPAL_HARDWARE;
744 pr_debug("%s: Reset PHB#%x, option=%d\n",
745 __func__, hose->global_number, option);
747 /* Issue PHB complete reset request */
748 if (option == EEH_RESET_FUNDAMENTAL ||
749 option == EEH_RESET_HOT)
750 rc = opal_pci_reset(phb->opal_id,
751 OPAL_RESET_PHB_COMPLETE,
753 else if (option == EEH_RESET_DEACTIVATE)
754 rc = opal_pci_reset(phb->opal_id,
755 OPAL_RESET_PHB_COMPLETE,
756 OPAL_DEASSERT_RESET);
761 * Poll state of the PHB until the request is done
762 * successfully. The PHB reset is usually PHB complete
763 * reset followed by hot reset on root bus. So we also
764 * need the PCI bus settlement delay.
767 rc = pnv_eeh_poll(phb->opal_id);
768 if (option == EEH_RESET_DEACTIVATE) {
769 if (system_state < SYSTEM_RUNNING)
770 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
772 msleep(EEH_PE_RST_SETTLE_TIME);
775 if (rc != OPAL_SUCCESS)
781 static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
783 struct pnv_phb *phb = hose->private_data;
784 s64 rc = OPAL_HARDWARE;
786 pr_debug("%s: Reset PHB#%x, option=%d\n",
787 __func__, hose->global_number, option);
790 * During the reset deassert time, we needn't care
791 * the reset scope because the firmware does nothing
792 * for fundamental or hot reset during deassert phase.
794 if (option == EEH_RESET_FUNDAMENTAL)
795 rc = opal_pci_reset(phb->opal_id,
796 OPAL_RESET_PCI_FUNDAMENTAL,
798 else if (option == EEH_RESET_HOT)
799 rc = opal_pci_reset(phb->opal_id,
802 else if (option == EEH_RESET_DEACTIVATE)
803 rc = opal_pci_reset(phb->opal_id,
805 OPAL_DEASSERT_RESET);
809 /* Poll state of the PHB until the request is done */
811 rc = pnv_eeh_poll(phb->opal_id);
812 if (option == EEH_RESET_DEACTIVATE)
813 msleep(EEH_PE_RST_SETTLE_TIME);
815 if (rc != OPAL_SUCCESS)
821 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
823 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
824 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
825 int aer = edev ? edev->aer_cap : 0;
828 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
829 __func__, pci_domain_nr(dev->bus),
830 dev->bus->number, option);
833 case EEH_RESET_FUNDAMENTAL:
835 /* Don't report linkDown event */
837 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
839 ctrl |= PCI_ERR_UNC_SURPDN;
840 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
844 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
845 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
846 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
848 msleep(EEH_PE_RST_HOLD_TIME);
850 case EEH_RESET_DEACTIVATE:
851 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
852 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
853 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
855 msleep(EEH_PE_RST_SETTLE_TIME);
857 /* Continue reporting linkDown event */
859 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
861 ctrl &= ~PCI_ERR_UNC_SURPDN;
862 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
872 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
874 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
875 struct pnv_phb *phb = hose->private_data;
876 struct device_node *dn = pci_device_to_OF_node(pdev);
877 uint64_t id = PCI_SLOT_ID(phb->opal_id,
878 (pdev->bus->number << 8) | pdev->devfn);
882 /* Hot reset to the bus if firmware cannot handle */
883 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
884 return __pnv_eeh_bridge_reset(pdev, option);
887 case EEH_RESET_FUNDAMENTAL:
888 scope = OPAL_RESET_PCI_FUNDAMENTAL;
891 scope = OPAL_RESET_PCI_HOT;
893 case EEH_RESET_DEACTIVATE:
896 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
901 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
902 if (rc <= OPAL_SUCCESS)
905 rc = pnv_eeh_poll(id);
907 return (rc == OPAL_SUCCESS) ? 0 : -EIO;
910 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
912 struct pci_controller *hose;
914 if (pci_is_root_bus(dev->bus)) {
915 hose = pci_bus_to_host(dev->bus);
916 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
917 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
919 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
920 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
924 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
927 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
930 /* Wait for Transaction Pending bit to be cleared */
931 for (i = 0; i < 4; i++) {
932 eeh_ops->read_config(pdn, pos, 2, &status);
933 if (!(status & mask))
936 msleep((1 << i) * 100);
939 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n",
941 edev->phb->global_number, pdn->busno,
942 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
945 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
947 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
950 if (WARN_ON(!edev->pcie_cap))
953 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®);
954 if (!(reg & PCI_EXP_DEVCAP_FLR))
959 case EEH_RESET_FUNDAMENTAL:
960 pnv_eeh_wait_for_pending(pdn, "",
961 edev->pcie_cap + PCI_EXP_DEVSTA,
962 PCI_EXP_DEVSTA_TRPND);
963 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
965 reg |= PCI_EXP_DEVCTL_BCR_FLR;
966 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
968 msleep(EEH_PE_RST_HOLD_TIME);
970 case EEH_RESET_DEACTIVATE:
971 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
973 reg &= ~PCI_EXP_DEVCTL_BCR_FLR;
974 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
976 msleep(EEH_PE_RST_SETTLE_TIME);
983 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
985 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
988 if (WARN_ON(!edev->af_cap))
991 eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap);
992 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
997 case EEH_RESET_FUNDAMENTAL:
999 * Wait for Transaction Pending bit to clear. A word-aligned
1000 * test is used, so we use the conrol offset rather than status
1001 * and shift the test bit to match.
1003 pnv_eeh_wait_for_pending(pdn, "AF",
1004 edev->af_cap + PCI_AF_CTRL,
1005 PCI_AF_STATUS_TP << 8);
1006 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL,
1007 1, PCI_AF_CTRL_FLR);
1008 msleep(EEH_PE_RST_HOLD_TIME);
1010 case EEH_RESET_DEACTIVATE:
1011 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0);
1012 msleep(EEH_PE_RST_SETTLE_TIME);
1019 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option)
1021 struct eeh_dev *edev;
1025 /* The VF PE should have only one child device */
1026 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
1027 pdn = eeh_dev_to_pdn(edev);
1031 ret = pnv_eeh_do_flr(pdn, option);
1035 return pnv_eeh_do_af_flr(pdn, option);
1039 * pnv_eeh_reset - Reset the specified PE
1041 * @option: reset option
1043 * Do reset on the indicated PE. For PCI bus sensitive PE,
1044 * we need to reset the parent p2p bridge. The PHB has to
1045 * be reinitialized if the p2p bridge is root bridge. For
1046 * PCI device sensitive PE, we will try to reset the device
1047 * through FLR. For now, we don't have OPAL APIs to do HARD
1048 * reset yet, so all reset would be SOFT (HOT) reset.
1050 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
1052 struct pci_controller *hose = pe->phb;
1053 struct pnv_phb *phb;
1054 struct pci_bus *bus;
1058 * For PHB reset, we always have complete reset. For those PEs whose
1059 * primary bus derived from root complex (root bus) or root port
1060 * (usually bus#1), we apply hot or fundamental reset on the root port.
1061 * For other PEs, we always have hot reset on the PE primary bus.
1063 * Here, we have different design to pHyp, which always clear the
1064 * frozen state during PE reset. However, the good idea here from
1065 * benh is to keep frozen state before we get PE reset done completely
1066 * (until BAR restore). With the frozen state, HW drops illegal IO
1067 * or MMIO access, which can incur recrusive frozen PE during PE
1068 * reset. The side effect is that EEH core has to clear the frozen
1069 * state explicitly after BAR restore.
1071 if (pe->type & EEH_PE_PHB)
1072 return pnv_eeh_phb_reset(hose, option);
1075 * The frozen PE might be caused by PAPR error injection
1076 * registers, which are expected to be cleared after hitting
1077 * frozen PE as stated in the hardware spec. Unfortunately,
1078 * that's not true on P7IOC. So we have to clear it manually
1079 * to avoid recursive EEH errors during recovery.
1081 phb = hose->private_data;
1082 if (phb->model == PNV_PHB_MODEL_P7IOC &&
1083 (option == EEH_RESET_HOT ||
1084 option == EEH_RESET_FUNDAMENTAL)) {
1085 rc = opal_pci_reset(phb->opal_id,
1086 OPAL_RESET_PHB_ERROR,
1088 if (rc != OPAL_SUCCESS) {
1089 pr_warn("%s: Failure %lld clearing error injection registers\n",
1095 if (pe->type & EEH_PE_VF)
1096 return pnv_eeh_reset_vf_pe(pe, option);
1098 bus = eeh_pe_bus_get(pe);
1100 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
1101 __func__, pe->phb->global_number, pe->addr);
1106 * If dealing with the root bus (or the bus underneath the
1107 * root port), we reset the bus underneath the root port.
1109 * The cxl driver depends on this behaviour for bi-modal card
1112 if (pci_is_root_bus(bus) ||
1113 pci_is_root_bus(bus->parent))
1114 return pnv_eeh_root_reset(hose, option);
1116 return pnv_eeh_bridge_reset(bus->self, option);
1120 * pnv_eeh_wait_state - Wait for PE state
1122 * @max_wait: maximal period in millisecond
1124 * Wait for the state of associated PE. It might take some time
1125 * to retrieve the PE's state.
1127 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
1133 ret = pnv_eeh_get_state(pe, &mwait);
1136 * If the PE's state is temporarily unavailable,
1137 * we have to wait for the specified time. Otherwise,
1138 * the PE's state will be returned immediately.
1140 if (ret != EEH_STATE_UNAVAILABLE)
1143 if (max_wait <= 0) {
1144 pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
1145 __func__, pe->addr, max_wait);
1146 return EEH_STATE_NOT_SUPPORT;
1153 return EEH_STATE_NOT_SUPPORT;
1157 * pnv_eeh_get_log - Retrieve error log
1159 * @severity: temporary or permanent error log
1160 * @drv_log: driver log to be combined with retrieved error log
1161 * @len: length of driver log
1163 * Retrieve the temporary or permanent error from the PE.
1165 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
1166 char *drv_log, unsigned long len)
1168 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
1169 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
1175 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
1178 * The function will be called to reconfigure the bridges included
1179 * in the specified PE so that the mulfunctional PE would be recovered
1182 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
1188 * pnv_pe_err_inject - Inject specified error to the indicated PE
1189 * @pe: the indicated PE
1191 * @func: specific error type
1193 * @mask: address mask
1195 * The routine is called to inject specified error, which is
1196 * determined by @type and @func, to the indicated PE for
1199 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
1200 unsigned long addr, unsigned long mask)
1202 struct pci_controller *hose = pe->phb;
1203 struct pnv_phb *phb = hose->private_data;
1206 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1207 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1208 pr_warn("%s: Invalid error type %d\n",
1213 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
1214 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1215 pr_warn("%s: Invalid error function %d\n",
1220 /* Firmware supports error injection ? */
1221 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1222 pr_warn("%s: Firmware doesn't support error injection\n",
1227 /* Do error injection */
1228 rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1229 type, func, addr, mask);
1230 if (rc != OPAL_SUCCESS) {
1231 pr_warn("%s: Failure %lld injecting error "
1232 "%d-%d to PHB#%x-PE#%x\n",
1233 __func__, rc, type, func,
1234 hose->global_number, pe->addr);
1241 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
1243 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1245 if (!edev || !edev->pe)
1249 * We will issue FLR or AF FLR to all VFs, which are contained
1250 * in VF PE. It relies on the EEH PCI config accessors. So we
1251 * can't block them during the window.
1253 if (edev->physfn && (edev->pe->state & EEH_PE_RESET))
1256 if (edev->pe->state & EEH_PE_CFG_BLOCKED)
1262 static int pnv_eeh_read_config(struct pci_dn *pdn,
1263 int where, int size, u32 *val)
1266 return PCIBIOS_DEVICE_NOT_FOUND;
1268 if (pnv_eeh_cfg_blocked(pdn)) {
1270 return PCIBIOS_SET_FAILED;
1273 return pnv_pci_cfg_read(pdn, where, size, val);
1276 static int pnv_eeh_write_config(struct pci_dn *pdn,
1277 int where, int size, u32 val)
1280 return PCIBIOS_DEVICE_NOT_FOUND;
1282 if (pnv_eeh_cfg_blocked(pdn))
1283 return PCIBIOS_SET_FAILED;
1285 return pnv_pci_cfg_write(pdn, where, size, val);
1288 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1291 if (data->gemXfir || data->gemRfir ||
1292 data->gemRirqfir || data->gemMask || data->gemRwof)
1293 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
1294 be64_to_cpu(data->gemXfir),
1295 be64_to_cpu(data->gemRfir),
1296 be64_to_cpu(data->gemRirqfir),
1297 be64_to_cpu(data->gemMask),
1298 be64_to_cpu(data->gemRwof));
1301 if (data->lemFir || data->lemErrMask ||
1302 data->lemAction0 || data->lemAction1 || data->lemWof)
1303 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
1304 be64_to_cpu(data->lemFir),
1305 be64_to_cpu(data->lemErrMask),
1306 be64_to_cpu(data->lemAction0),
1307 be64_to_cpu(data->lemAction1),
1308 be64_to_cpu(data->lemWof));
1311 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1313 struct pnv_phb *phb = hose->private_data;
1314 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
1317 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1318 if (rc != OPAL_SUCCESS) {
1319 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1320 __func__, phb->hub_id, rc);
1324 switch (be16_to_cpu(data->type)) {
1325 case OPAL_P7IOC_DIAG_TYPE_RGC:
1326 pr_info("P7IOC diag-data for RGC\n\n");
1327 pnv_eeh_dump_hub_diag_common(data);
1328 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1329 pr_info(" RGC: %016llx %016llx\n",
1330 be64_to_cpu(data->rgc.rgcStatus),
1331 be64_to_cpu(data->rgc.rgcLdcp));
1333 case OPAL_P7IOC_DIAG_TYPE_BI:
1334 pr_info("P7IOC diag-data for BI %s\n\n",
1335 data->bi.biDownbound ? "Downbound" : "Upbound");
1336 pnv_eeh_dump_hub_diag_common(data);
1337 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1338 data->bi.biLdcp2 || data->bi.biFenceStatus)
1339 pr_info(" BI: %016llx %016llx %016llx %016llx\n",
1340 be64_to_cpu(data->bi.biLdcp0),
1341 be64_to_cpu(data->bi.biLdcp1),
1342 be64_to_cpu(data->bi.biLdcp2),
1343 be64_to_cpu(data->bi.biFenceStatus));
1345 case OPAL_P7IOC_DIAG_TYPE_CI:
1346 pr_info("P7IOC diag-data for CI Port %d\n\n",
1348 pnv_eeh_dump_hub_diag_common(data);
1349 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1350 pr_info(" CI: %016llx %016llx\n",
1351 be64_to_cpu(data->ci.ciPortStatus),
1352 be64_to_cpu(data->ci.ciPortLdcp));
1354 case OPAL_P7IOC_DIAG_TYPE_MISC:
1355 pr_info("P7IOC diag-data for MISC\n\n");
1356 pnv_eeh_dump_hub_diag_common(data);
1358 case OPAL_P7IOC_DIAG_TYPE_I2C:
1359 pr_info("P7IOC diag-data for I2C\n\n");
1360 pnv_eeh_dump_hub_diag_common(data);
1363 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1364 __func__, phb->hub_id, data->type);
1368 static int pnv_eeh_get_pe(struct pci_controller *hose,
1369 u16 pe_no, struct eeh_pe **pe)
1371 struct pnv_phb *phb = hose->private_data;
1372 struct pnv_ioda_pe *pnv_pe;
1373 struct eeh_pe *dev_pe;
1374 struct eeh_dev edev;
1377 * If PHB supports compound PE, to fetch
1378 * the master PE because slave PE is invisible
1381 pnv_pe = &phb->ioda.pe_array[pe_no];
1382 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1383 pnv_pe = pnv_pe->master;
1385 !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1386 pe_no = pnv_pe->pe_number;
1389 /* Find the PE according to PE# */
1390 memset(&edev, 0, sizeof(struct eeh_dev));
1392 edev.pe_config_addr = pe_no;
1393 dev_pe = eeh_pe_get(&edev);
1397 /* Freeze the (compound) PE */
1399 if (!(dev_pe->state & EEH_PE_ISOLATED))
1400 phb->freeze_pe(phb, pe_no);
1403 * At this point, we're sure the (compound) PE should
1404 * have been frozen. However, we still need poke until
1405 * hitting the frozen PE on top level.
1407 dev_pe = dev_pe->parent;
1408 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1410 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1411 EEH_STATE_DMA_ACTIVE);
1413 ret = eeh_ops->get_state(dev_pe, NULL);
1414 if (ret <= 0 || (ret & active_flags) == active_flags) {
1415 dev_pe = dev_pe->parent;
1419 /* Frozen parent PE */
1421 if (!(dev_pe->state & EEH_PE_ISOLATED))
1422 phb->freeze_pe(phb, dev_pe->addr);
1425 dev_pe = dev_pe->parent;
1432 * pnv_eeh_next_error - Retrieve next EEH error to handle
1435 * The function is expected to be called by EEH core while it gets
1436 * special EEH event (without binding PE). The function calls to
1437 * OPAL APIs for next error to handle. The informational error is
1438 * handled internally by platform. However, the dead IOC, dead PHB,
1439 * fenced PHB and frozen PE should be handled by EEH core eventually.
1441 static int pnv_eeh_next_error(struct eeh_pe **pe)
1443 struct pci_controller *hose;
1444 struct pnv_phb *phb;
1445 struct eeh_pe *phb_pe, *parent_pe;
1446 __be64 frozen_pe_no;
1447 __be16 err_type, severity;
1448 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1450 int state, ret = EEH_NEXT_ERR_NONE;
1453 * While running here, it's safe to purge the event queue. The
1454 * event should still be masked.
1456 eeh_remove_event(NULL, false);
1458 list_for_each_entry(hose, &hose_list, list_node) {
1460 * If the subordinate PCI buses of the PHB has been
1461 * removed or is exactly under error recovery, we
1462 * needn't take care of it any more.
1464 phb = hose->private_data;
1465 phb_pe = eeh_phb_pe_get(hose);
1466 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1469 rc = opal_pci_next_error(phb->opal_id,
1470 &frozen_pe_no, &err_type, &severity);
1471 if (rc != OPAL_SUCCESS) {
1472 pr_devel("%s: Invalid return value on "
1473 "PHB#%x (0x%lx) from opal_pci_next_error",
1474 __func__, hose->global_number, rc);
1478 /* If the PHB doesn't have error, stop processing */
1479 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1480 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1481 pr_devel("%s: No error found on PHB#%x\n",
1482 __func__, hose->global_number);
1487 * Processing the error. We're expecting the error with
1488 * highest priority reported upon multiple errors on the
1491 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1492 __func__, be16_to_cpu(err_type),
1493 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1494 hose->global_number);
1495 switch (be16_to_cpu(err_type)) {
1496 case OPAL_EEH_IOC_ERROR:
1497 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1498 pr_err("EEH: dead IOC detected\n");
1499 ret = EEH_NEXT_ERR_DEAD_IOC;
1500 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1501 pr_info("EEH: IOC informative error "
1503 pnv_eeh_get_and_dump_hub_diag(hose);
1504 ret = EEH_NEXT_ERR_NONE;
1508 case OPAL_EEH_PHB_ERROR:
1509 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1511 pr_err("EEH: dead PHB#%x detected, "
1513 hose->global_number,
1514 eeh_pe_loc_get(phb_pe));
1515 ret = EEH_NEXT_ERR_DEAD_PHB;
1516 } else if (be16_to_cpu(severity) ==
1517 OPAL_EEH_SEV_PHB_FENCED) {
1519 pr_err("EEH: Fenced PHB#%x detected, "
1521 hose->global_number,
1522 eeh_pe_loc_get(phb_pe));
1523 ret = EEH_NEXT_ERR_FENCED_PHB;
1524 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1525 pr_info("EEH: PHB#%x informative error "
1526 "detected, location: %s\n",
1527 hose->global_number,
1528 eeh_pe_loc_get(phb_pe));
1529 pnv_eeh_get_phb_diag(phb_pe);
1530 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1531 ret = EEH_NEXT_ERR_NONE;
1535 case OPAL_EEH_PE_ERROR:
1537 * If we can't find the corresponding PE, we
1538 * just try to unfreeze.
1540 if (pnv_eeh_get_pe(hose,
1541 be64_to_cpu(frozen_pe_no), pe)) {
1542 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1543 hose->global_number, be64_to_cpu(frozen_pe_no));
1544 pr_info("EEH: PHB location: %s\n",
1545 eeh_pe_loc_get(phb_pe));
1547 /* Dump PHB diag-data */
1548 rc = opal_pci_get_phb_diag_data2(phb->opal_id,
1549 phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
1550 if (rc == OPAL_SUCCESS)
1551 pnv_pci_dump_phb_diag_data(hose,
1554 /* Try best to clear it */
1555 opal_pci_eeh_freeze_clear(phb->opal_id,
1556 be64_to_cpu(frozen_pe_no),
1557 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1558 ret = EEH_NEXT_ERR_NONE;
1559 } else if ((*pe)->state & EEH_PE_ISOLATED ||
1560 eeh_pe_passed(*pe)) {
1561 ret = EEH_NEXT_ERR_NONE;
1563 pr_err("EEH: Frozen PE#%x "
1564 "on PHB#%x detected\n",
1566 (*pe)->phb->global_number);
1567 pr_err("EEH: PE location: %s, "
1568 "PHB location: %s\n",
1569 eeh_pe_loc_get(*pe),
1570 eeh_pe_loc_get(phb_pe));
1571 ret = EEH_NEXT_ERR_FROZEN_PE;
1576 pr_warn("%s: Unexpected error type %d\n",
1577 __func__, be16_to_cpu(err_type));
1581 * EEH core will try recover from fenced PHB or
1582 * frozen PE. In the time for frozen PE, EEH core
1583 * enable IO path for that before collecting logs,
1584 * but it ruins the site. So we have to dump the
1585 * log in advance here.
1587 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
1588 ret == EEH_NEXT_ERR_FENCED_PHB) &&
1589 !((*pe)->state & EEH_PE_ISOLATED)) {
1590 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1591 pnv_eeh_get_phb_diag(*pe);
1593 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1594 pnv_pci_dump_phb_diag_data((*pe)->phb,
1599 * We probably have the frozen parent PE out there and
1600 * we need have to handle frozen parent PE firstly.
1602 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1603 parent_pe = (*pe)->parent;
1605 /* Hit the ceiling ? */
1606 if (parent_pe->type & EEH_PE_PHB)
1609 /* Frozen parent PE ? */
1610 state = eeh_ops->get_state(parent_pe, NULL);
1612 (state & active_flags) != active_flags)
1615 /* Next parent level */
1616 parent_pe = parent_pe->parent;
1619 /* We possibly migrate to another PE */
1620 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1624 * If we have no errors on the specific PHB or only
1625 * informative error there, we continue poking it.
1626 * Otherwise, we need actions to be taken by upper
1629 if (ret > EEH_NEXT_ERR_INF)
1633 /* Unmask the event */
1634 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1635 enable_irq(eeh_event_irq);
1640 static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
1642 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1643 u32 devctl, cmd, cap2, aer_capctl;
1646 if (edev->pcie_cap) {
1648 old_mps = (ffs(pdn->mps) - 8) << 5;
1649 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1651 devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
1653 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1656 /* Disable Completion Timeout */
1657 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
1660 eeh_ops->read_config(pdn,
1661 edev->pcie_cap + PCI_EXP_DEVCTL2,
1664 eeh_ops->write_config(pdn,
1665 edev->pcie_cap + PCI_EXP_DEVCTL2,
1670 /* Enable SERR and parity checking */
1671 eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
1672 cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1673 eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
1675 /* Enable report various errors */
1676 if (edev->pcie_cap) {
1677 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1679 devctl &= ~PCI_EXP_DEVCTL_CERE;
1680 devctl |= (PCI_EXP_DEVCTL_NFERE |
1681 PCI_EXP_DEVCTL_FERE |
1682 PCI_EXP_DEVCTL_URRE);
1683 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1687 /* Enable ECRC generation and check */
1688 if (edev->pcie_cap && edev->aer_cap) {
1689 eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1691 aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
1692 eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1699 static int pnv_eeh_restore_config(struct pci_dn *pdn)
1701 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1702 struct pnv_phb *phb;
1709 * We have to restore the PCI config space after reset since the
1710 * firmware can't see SRIOV VFs.
1712 * FIXME: The MPS, error routing rules, timeout setting are worthy
1713 * to be exported by firmware in extendible way.
1716 ret = pnv_eeh_restore_vf_config(pdn);
1718 phb = edev->phb->private_data;
1719 ret = opal_pci_reinit(phb->opal_id,
1720 OPAL_REINIT_PCI_DEV, edev->config_addr);
1724 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
1725 __func__, edev->config_addr, ret);
1732 static struct eeh_ops pnv_eeh_ops = {
1734 .init = pnv_eeh_init,
1735 .post_init = pnv_eeh_post_init,
1736 .probe = pnv_eeh_probe,
1737 .set_option = pnv_eeh_set_option,
1738 .get_pe_addr = pnv_eeh_get_pe_addr,
1739 .get_state = pnv_eeh_get_state,
1740 .reset = pnv_eeh_reset,
1741 .wait_state = pnv_eeh_wait_state,
1742 .get_log = pnv_eeh_get_log,
1743 .configure_bridge = pnv_eeh_configure_bridge,
1744 .err_inject = pnv_eeh_err_inject,
1745 .read_config = pnv_eeh_read_config,
1746 .write_config = pnv_eeh_write_config,
1747 .next_error = pnv_eeh_next_error,
1748 .restore_config = pnv_eeh_restore_config
1751 void pcibios_bus_add_device(struct pci_dev *pdev)
1753 struct pci_dn *pdn = pci_get_pdn(pdev);
1755 if (!pdev->is_virtfn)
1759 * The following operations will fail if VF's sysfs files
1760 * aren't created or its resources aren't finalized.
1762 eeh_add_device_early(pdn);
1763 eeh_add_device_late(pdev);
1764 eeh_sysfs_add_device(pdev);
1767 #ifdef CONFIG_PCI_IOV
1768 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
1770 struct pci_dn *pdn = pci_get_pdn(pdev);
1773 if (!pdev->is_virtfn)
1776 /* Synchronize MPS for VF and PF */
1777 parent_mps = pcie_get_mps(pdev->physfn);
1778 if ((128 << pdev->pcie_mpss) >= parent_mps)
1779 pcie_set_mps(pdev, parent_mps);
1780 pdn->mps = pcie_get_mps(pdev);
1782 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
1783 #endif /* CONFIG_PCI_IOV */
1786 * eeh_powernv_init - Register platform dependent EEH operations
1788 * EEH initialization on powernv platform. This function should be
1789 * called before any EEH related functions.
1791 static int __init eeh_powernv_init(void)
1795 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
1796 ret = eeh_ops_register(&pnv_eeh_ops);
1798 pr_info("EEH: PowerNV platform initialized\n");
1800 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
1804 machine_early_initcall(powernv, eeh_powernv_init);