]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/platforms/powernv/eeh-ioda.c
powrpc/powernv: Reset PHB in kdump kernel
[karo-tx-linux.git] / arch / powerpc / platforms / powernv / eeh-ioda.c
1 /*
2  * The file intends to implement the functions needed by EEH, which is
3  * built on IODA compliant chip. Actually, lots of functions related
4  * to EEH would be built based on the OPAL APIs.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24
25 #include <asm/eeh.h>
26 #include <asm/eeh_event.h>
27 #include <asm/io.h>
28 #include <asm/iommu.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/opal.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/tce.h>
34
35 #include "powernv.h"
36 #include "pci.h"
37
38 static int ioda_eeh_nb_init = 0;
39
40 static int ioda_eeh_event(struct notifier_block *nb,
41                           unsigned long events, void *change)
42 {
43         uint64_t changed_evts = (uint64_t)change;
44
45         /*
46          * We simply send special EEH event if EEH has
47          * been enabled, or clear pending events in
48          * case that we enable EEH soon
49          */
50         if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
51             !(events & OPAL_EVENT_PCI_ERROR))
52                 return 0;
53
54         if (eeh_enabled())
55                 eeh_send_failure_event(NULL);
56         else
57                 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
58
59         return 0;
60 }
61
62 static struct notifier_block ioda_eeh_nb = {
63         .notifier_call  = ioda_eeh_event,
64         .next           = NULL,
65         .priority       = 0
66 };
67
68 #ifdef CONFIG_DEBUG_FS
69 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
70 {
71         struct pci_controller *hose = data;
72         struct pnv_phb *phb = hose->private_data;
73
74         out_be64(phb->regs + offset, val);
75         return 0;
76 }
77
78 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
79 {
80         struct pci_controller *hose = data;
81         struct pnv_phb *phb = hose->private_data;
82
83         *val = in_be64(phb->regs + offset);
84         return 0;
85 }
86
87 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
88 {
89         return ioda_eeh_dbgfs_set(data, 0xD10, val);
90 }
91
92 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
93 {
94         return ioda_eeh_dbgfs_get(data, 0xD10, val);
95 }
96
97 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
98 {
99         return ioda_eeh_dbgfs_set(data, 0xD90, val);
100 }
101
102 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
103 {
104         return ioda_eeh_dbgfs_get(data, 0xD90, val);
105 }
106
107 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
108 {
109         return ioda_eeh_dbgfs_set(data, 0xE10, val);
110 }
111
112 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
113 {
114         return ioda_eeh_dbgfs_get(data, 0xE10, val);
115 }
116
117 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
118                         ioda_eeh_outb_dbgfs_set, "0x%llx\n");
119 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
120                         ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
121 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
122                         ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
123 #endif /* CONFIG_DEBUG_FS */
124
125
126 /**
127  * ioda_eeh_post_init - Chip dependent post initialization
128  * @hose: PCI controller
129  *
130  * The function will be called after eeh PEs and devices
131  * have been built. That means the EEH is ready to supply
132  * service with I/O cache.
133  */
134 static int ioda_eeh_post_init(struct pci_controller *hose)
135 {
136         struct pnv_phb *phb = hose->private_data;
137         int ret;
138
139         /* Register OPAL event notifier */
140         if (!ioda_eeh_nb_init) {
141                 ret = opal_notifier_register(&ioda_eeh_nb);
142                 if (ret) {
143                         pr_err("%s: Can't register OPAL event notifier (%d)\n",
144                                __func__, ret);
145                         return ret;
146                 }
147
148                 ioda_eeh_nb_init = 1;
149         }
150
151 #ifdef CONFIG_DEBUG_FS
152         if (!phb->has_dbgfs && phb->dbgfs) {
153                 phb->has_dbgfs = 1;
154
155                 debugfs_create_file("err_injct_outbound", 0600,
156                                     phb->dbgfs, hose,
157                                     &ioda_eeh_outb_dbgfs_ops);
158                 debugfs_create_file("err_injct_inboundA", 0600,
159                                     phb->dbgfs, hose,
160                                     &ioda_eeh_inbA_dbgfs_ops);
161                 debugfs_create_file("err_injct_inboundB", 0600,
162                                     phb->dbgfs, hose,
163                                     &ioda_eeh_inbB_dbgfs_ops);
164         }
165 #endif
166
167         /* If EEH is enabled, we're going to rely on that.
168          * Otherwise, we restore to conventional mechanism
169          * to clear frozen PE during PCI config access.
170          */
171         if (eeh_enabled())
172                 phb->flags |= PNV_PHB_FLAG_EEH;
173         else
174                 phb->flags &= ~PNV_PHB_FLAG_EEH;
175
176         return 0;
177 }
178
179 /**
180  * ioda_eeh_set_option - Set EEH operation or I/O setting
181  * @pe: EEH PE
182  * @option: options
183  *
184  * Enable or disable EEH option for the indicated PE. The
185  * function also can be used to enable I/O or DMA for the
186  * PE.
187  */
188 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
189 {
190         s64 ret;
191         u32 pe_no;
192         struct pci_controller *hose = pe->phb;
193         struct pnv_phb *phb = hose->private_data;
194
195         /* Check on PE number */
196         if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
197                 pr_err("%s: PE address %x out of range [0, %x] "
198                        "on PHB#%x\n",
199                         __func__, pe->addr, phb->ioda.total_pe,
200                         hose->global_number);
201                 return -EINVAL;
202         }
203
204         pe_no = pe->addr;
205         switch (option) {
206         case EEH_OPT_DISABLE:
207                 ret = -EEXIST;
208                 break;
209         case EEH_OPT_ENABLE:
210                 ret = 0;
211                 break;
212         case EEH_OPT_THAW_MMIO:
213                 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
214                                 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
215                 if (ret) {
216                         pr_warning("%s: Failed to enable MMIO for "
217                                    "PHB#%x-PE#%x, err=%lld\n",
218                                 __func__, hose->global_number, pe_no, ret);
219                         return -EIO;
220                 }
221
222                 break;
223         case EEH_OPT_THAW_DMA:
224                 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
225                                 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
226                 if (ret) {
227                         pr_warning("%s: Failed to enable DMA for "
228                                    "PHB#%x-PE#%x, err=%lld\n",
229                                 __func__, hose->global_number, pe_no, ret);
230                         return -EIO;
231                 }
232
233                 break;
234         default:
235                 pr_warning("%s: Invalid option %d\n", __func__, option);
236                 return -EINVAL;
237         }
238
239         return ret;
240 }
241
242 static void ioda_eeh_phb_diag(struct pci_controller *hose)
243 {
244         struct pnv_phb *phb = hose->private_data;
245         long rc;
246
247         rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
248                                          PNV_PCI_DIAG_BUF_SIZE);
249         if (rc != OPAL_SUCCESS) {
250                 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
251                             __func__, hose->global_number, rc);
252                 return;
253         }
254
255         pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
256 }
257
258 /**
259  * ioda_eeh_get_state - Retrieve the state of PE
260  * @pe: EEH PE
261  *
262  * The PE's state should be retrieved from the PEEV, PEST
263  * IODA tables. Since the OPAL has exported the function
264  * to do it, it'd better to use that.
265  */
266 static int ioda_eeh_get_state(struct eeh_pe *pe)
267 {
268         s64 ret = 0;
269         u8 fstate;
270         u16 pcierr;
271         u32 pe_no;
272         int result;
273         struct pci_controller *hose = pe->phb;
274         struct pnv_phb *phb = hose->private_data;
275
276         /*
277          * Sanity check on PE address. The PHB PE address should
278          * be zero.
279          */
280         if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
281                 pr_err("%s: PE address %x out of range [0, %x] "
282                        "on PHB#%x\n",
283                        __func__, pe->addr, phb->ioda.total_pe,
284                        hose->global_number);
285                 return EEH_STATE_NOT_SUPPORT;
286         }
287
288         /*
289          * If we're in middle of PE reset, return normal
290          * state to keep EEH core going. For PHB reset, we
291          * still expect to have fenced PHB cleared with
292          * PHB reset.
293          */
294         if (!(pe->type & EEH_PE_PHB) &&
295             (pe->state & EEH_PE_RESET)) {
296                 result = (EEH_STATE_MMIO_ACTIVE |
297                           EEH_STATE_DMA_ACTIVE |
298                           EEH_STATE_MMIO_ENABLED |
299                           EEH_STATE_DMA_ENABLED);
300                 return result;
301         }
302
303         /* Retrieve PE status through OPAL */
304         pe_no = pe->addr;
305         ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
306                         &fstate, &pcierr, NULL);
307         if (ret) {
308                 pr_err("%s: Failed to get EEH status on "
309                        "PHB#%x-PE#%x\n, err=%lld\n",
310                        __func__, hose->global_number, pe_no, ret);
311                 return EEH_STATE_NOT_SUPPORT;
312         }
313
314         /* Check PHB status */
315         if (pe->type & EEH_PE_PHB) {
316                 result = 0;
317                 result &= ~EEH_STATE_RESET_ACTIVE;
318
319                 if (pcierr != OPAL_EEH_PHB_ERROR) {
320                         result |= EEH_STATE_MMIO_ACTIVE;
321                         result |= EEH_STATE_DMA_ACTIVE;
322                         result |= EEH_STATE_MMIO_ENABLED;
323                         result |= EEH_STATE_DMA_ENABLED;
324                 } else if (!(pe->state & EEH_PE_ISOLATED)) {
325                         eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
326                         ioda_eeh_phb_diag(hose);
327                 }
328
329                 return result;
330         }
331
332         /* Parse result out */
333         result = 0;
334         switch (fstate) {
335         case OPAL_EEH_STOPPED_NOT_FROZEN:
336                 result &= ~EEH_STATE_RESET_ACTIVE;
337                 result |= EEH_STATE_MMIO_ACTIVE;
338                 result |= EEH_STATE_DMA_ACTIVE;
339                 result |= EEH_STATE_MMIO_ENABLED;
340                 result |= EEH_STATE_DMA_ENABLED;
341                 break;
342         case OPAL_EEH_STOPPED_MMIO_FREEZE:
343                 result &= ~EEH_STATE_RESET_ACTIVE;
344                 result |= EEH_STATE_DMA_ACTIVE;
345                 result |= EEH_STATE_DMA_ENABLED;
346                 break;
347         case OPAL_EEH_STOPPED_DMA_FREEZE:
348                 result &= ~EEH_STATE_RESET_ACTIVE;
349                 result |= EEH_STATE_MMIO_ACTIVE;
350                 result |= EEH_STATE_MMIO_ENABLED;
351                 break;
352         case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
353                 result &= ~EEH_STATE_RESET_ACTIVE;
354                 break;
355         case OPAL_EEH_STOPPED_RESET:
356                 result |= EEH_STATE_RESET_ACTIVE;
357                 break;
358         case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
359                 result |= EEH_STATE_UNAVAILABLE;
360                 break;
361         case OPAL_EEH_STOPPED_PERM_UNAVAIL:
362                 result |= EEH_STATE_NOT_SUPPORT;
363                 break;
364         default:
365                 pr_warning("%s: Unexpected EEH status 0x%x "
366                            "on PHB#%x-PE#%x\n",
367                            __func__, fstate, hose->global_number, pe_no);
368         }
369
370         /* Dump PHB diag-data for frozen PE */
371         if (result != EEH_STATE_NOT_SUPPORT &&
372             (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
373             (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
374             !(pe->state & EEH_PE_ISOLATED)) {
375                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
376                 ioda_eeh_phb_diag(hose);
377         }
378
379         return result;
380 }
381
382 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
383 {
384         s64 rc = OPAL_HARDWARE;
385
386         while (1) {
387                 rc = opal_pci_poll(phb->opal_id);
388                 if (rc <= 0)
389                         break;
390
391                 if (system_state < SYSTEM_RUNNING)
392                         udelay(1000 * rc);
393                 else
394                         msleep(rc);
395         }
396
397         return rc;
398 }
399
400 int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
401 {
402         struct pnv_phb *phb = hose->private_data;
403         s64 rc = OPAL_HARDWARE;
404
405         pr_debug("%s: Reset PHB#%x, option=%d\n",
406                  __func__, hose->global_number, option);
407
408         /* Issue PHB complete reset request */
409         if (option == EEH_RESET_FUNDAMENTAL ||
410             option == EEH_RESET_HOT)
411                 rc = opal_pci_reset(phb->opal_id,
412                                 OPAL_PHB_COMPLETE,
413                                 OPAL_ASSERT_RESET);
414         else if (option == EEH_RESET_DEACTIVATE)
415                 rc = opal_pci_reset(phb->opal_id,
416                                 OPAL_PHB_COMPLETE,
417                                 OPAL_DEASSERT_RESET);
418         if (rc < 0)
419                 goto out;
420
421         /*
422          * Poll state of the PHB until the request is done
423          * successfully. The PHB reset is usually PHB complete
424          * reset followed by hot reset on root bus. So we also
425          * need the PCI bus settlement delay.
426          */
427         rc = ioda_eeh_phb_poll(phb);
428         if (option == EEH_RESET_DEACTIVATE) {
429                 if (system_state < SYSTEM_RUNNING)
430                         udelay(1000 * EEH_PE_RST_SETTLE_TIME);
431                 else
432                         msleep(EEH_PE_RST_SETTLE_TIME);
433         }
434 out:
435         if (rc != OPAL_SUCCESS)
436                 return -EIO;
437
438         return 0;
439 }
440
441 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
442 {
443         struct pnv_phb *phb = hose->private_data;
444         s64 rc = OPAL_SUCCESS;
445
446         pr_debug("%s: Reset PHB#%x, option=%d\n",
447                  __func__, hose->global_number, option);
448
449         /*
450          * During the reset deassert time, we needn't care
451          * the reset scope because the firmware does nothing
452          * for fundamental or hot reset during deassert phase.
453          */
454         if (option == EEH_RESET_FUNDAMENTAL)
455                 rc = opal_pci_reset(phb->opal_id,
456                                 OPAL_PCI_FUNDAMENTAL_RESET,
457                                 OPAL_ASSERT_RESET);
458         else if (option == EEH_RESET_HOT)
459                 rc = opal_pci_reset(phb->opal_id,
460                                 OPAL_PCI_HOT_RESET,
461                                 OPAL_ASSERT_RESET);
462         else if (option == EEH_RESET_DEACTIVATE)
463                 rc = opal_pci_reset(phb->opal_id,
464                                 OPAL_PCI_HOT_RESET,
465                                 OPAL_DEASSERT_RESET);
466         if (rc < 0)
467                 goto out;
468
469         /* Poll state of the PHB until the request is done */
470         rc = ioda_eeh_phb_poll(phb);
471         if (option == EEH_RESET_DEACTIVATE)
472                 msleep(EEH_PE_RST_SETTLE_TIME);
473 out:
474         if (rc != OPAL_SUCCESS)
475                 return -EIO;
476
477         return 0;
478 }
479
480 static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
481
482 {
483         struct device_node *dn = pci_device_to_OF_node(dev);
484         struct eeh_dev *edev = of_node_to_eeh_dev(dn);
485         int aer = edev ? edev->aer_cap : 0;
486         u32 ctrl;
487
488         pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
489                  __func__, pci_domain_nr(dev->bus),
490                  dev->bus->number, option);
491
492         switch (option) {
493         case EEH_RESET_FUNDAMENTAL:
494         case EEH_RESET_HOT:
495                 /* Don't report linkDown event */
496                 if (aer) {
497                         eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
498                                              4, &ctrl);
499                         ctrl |= PCI_ERR_UNC_SURPDN;
500                         eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
501                                               4, ctrl);
502                 }
503
504                 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
505                 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
506                 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
507                 msleep(EEH_PE_RST_HOLD_TIME);
508
509                 break;
510         case EEH_RESET_DEACTIVATE:
511                 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
512                 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
513                 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
514                 msleep(EEH_PE_RST_SETTLE_TIME);
515
516                 /* Continue reporting linkDown event */
517                 if (aer) {
518                         eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
519                                              4, &ctrl);
520                         ctrl &= ~PCI_ERR_UNC_SURPDN;
521                         eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
522                                               4, ctrl);
523                 }
524
525                 break;
526         }
527
528         return 0;
529 }
530
531 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
532 {
533         struct pci_controller *hose;
534
535         if (pci_is_root_bus(dev->bus)) {
536                 hose = pci_bus_to_host(dev->bus);
537                 ioda_eeh_root_reset(hose, EEH_RESET_HOT);
538                 ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
539         } else {
540                 ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
541                 ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
542         }
543 }
544
545 /**
546  * ioda_eeh_reset - Reset the indicated PE
547  * @pe: EEH PE
548  * @option: reset option
549  *
550  * Do reset on the indicated PE. For PCI bus sensitive PE,
551  * we need to reset the parent p2p bridge. The PHB has to
552  * be reinitialized if the p2p bridge is root bridge. For
553  * PCI device sensitive PE, we will try to reset the device
554  * through FLR. For now, we don't have OPAL APIs to do HARD
555  * reset yet, so all reset would be SOFT (HOT) reset.
556  */
557 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
558 {
559         struct pci_controller *hose = pe->phb;
560         struct pci_bus *bus;
561         int ret;
562
563         /*
564          * For PHB reset, we always have complete reset. For those PEs whose
565          * primary bus derived from root complex (root bus) or root port
566          * (usually bus#1), we apply hot or fundamental reset on the root port.
567          * For other PEs, we always have hot reset on the PE primary bus.
568          *
569          * Here, we have different design to pHyp, which always clear the
570          * frozen state during PE reset. However, the good idea here from
571          * benh is to keep frozen state before we get PE reset done completely
572          * (until BAR restore). With the frozen state, HW drops illegal IO
573          * or MMIO access, which can incur recrusive frozen PE during PE
574          * reset. The side effect is that EEH core has to clear the frozen
575          * state explicitly after BAR restore.
576          */
577         if (pe->type & EEH_PE_PHB) {
578                 ret = ioda_eeh_phb_reset(hose, option);
579         } else {
580                 bus = eeh_pe_bus_get(pe);
581                 if (pci_is_root_bus(bus) ||
582                     pci_is_root_bus(bus->parent))
583                         ret = ioda_eeh_root_reset(hose, option);
584                 else
585                         ret = ioda_eeh_bridge_reset(bus->self, option);
586         }
587
588         return ret;
589 }
590
591 /**
592  * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
593  * @pe: EEH PE
594  *
595  * For particular PE, it might have included PCI bridges. In order
596  * to make the PE work properly, those PCI bridges should be configured
597  * correctly. However, we need do nothing on P7IOC since the reset
598  * function will do everything that should be covered by the function.
599  */
600 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
601 {
602         return 0;
603 }
604
605 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
606 {
607         /* GEM */
608         pr_info("  GEM XFIR:        %016llx\n", data->gemXfir);
609         pr_info("  GEM RFIR:        %016llx\n", data->gemRfir);
610         pr_info("  GEM RIRQFIR:     %016llx\n", data->gemRirqfir);
611         pr_info("  GEM Mask:        %016llx\n", data->gemMask);
612         pr_info("  GEM RWOF:        %016llx\n", data->gemRwof);
613
614         /* LEM */
615         pr_info("  LEM FIR:         %016llx\n", data->lemFir);
616         pr_info("  LEM Error Mask:  %016llx\n", data->lemErrMask);
617         pr_info("  LEM Action 0:    %016llx\n", data->lemAction0);
618         pr_info("  LEM Action 1:    %016llx\n", data->lemAction1);
619         pr_info("  LEM WOF:         %016llx\n", data->lemWof);
620 }
621
622 static void ioda_eeh_hub_diag(struct pci_controller *hose)
623 {
624         struct pnv_phb *phb = hose->private_data;
625         struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
626         long rc;
627
628         rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
629         if (rc != OPAL_SUCCESS) {
630                 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
631                            __func__, phb->hub_id, rc);
632                 return;
633         }
634
635         switch (data->type) {
636         case OPAL_P7IOC_DIAG_TYPE_RGC:
637                 pr_info("P7IOC diag-data for RGC\n\n");
638                 ioda_eeh_hub_diag_common(data);
639                 pr_info("  RGC Status:      %016llx\n", data->rgc.rgcStatus);
640                 pr_info("  RGC LDCP:        %016llx\n", data->rgc.rgcLdcp);
641                 break;
642         case OPAL_P7IOC_DIAG_TYPE_BI:
643                 pr_info("P7IOC diag-data for BI %s\n\n",
644                         data->bi.biDownbound ? "Downbound" : "Upbound");
645                 ioda_eeh_hub_diag_common(data);
646                 pr_info("  BI LDCP 0:       %016llx\n", data->bi.biLdcp0);
647                 pr_info("  BI LDCP 1:       %016llx\n", data->bi.biLdcp1);
648                 pr_info("  BI LDCP 2:       %016llx\n", data->bi.biLdcp2);
649                 pr_info("  BI Fence Status: %016llx\n", data->bi.biFenceStatus);
650                 break;
651         case OPAL_P7IOC_DIAG_TYPE_CI:
652                 pr_info("P7IOC diag-data for CI Port %d\\nn",
653                         data->ci.ciPort);
654                 ioda_eeh_hub_diag_common(data);
655                 pr_info("  CI Port Status:  %016llx\n", data->ci.ciPortStatus);
656                 pr_info("  CI Port LDCP:    %016llx\n", data->ci.ciPortLdcp);
657                 break;
658         case OPAL_P7IOC_DIAG_TYPE_MISC:
659                 pr_info("P7IOC diag-data for MISC\n\n");
660                 ioda_eeh_hub_diag_common(data);
661                 break;
662         case OPAL_P7IOC_DIAG_TYPE_I2C:
663                 pr_info("P7IOC diag-data for I2C\n\n");
664                 ioda_eeh_hub_diag_common(data);
665                 break;
666         default:
667                 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
668                            __func__, phb->hub_id, data->type);
669         }
670 }
671
672 static int ioda_eeh_get_pe(struct pci_controller *hose,
673                            u16 pe_no, struct eeh_pe **pe)
674 {
675         struct eeh_pe *phb_pe, *dev_pe;
676         struct eeh_dev dev;
677
678         /* Find the PHB PE */
679         phb_pe = eeh_phb_pe_get(hose);
680         if (!phb_pe)
681                 return -EEXIST;
682
683         /* Find the PE according to PE# */
684         memset(&dev, 0, sizeof(struct eeh_dev));
685         dev.phb = hose;
686         dev.pe_config_addr = pe_no;
687         dev_pe = eeh_pe_get(&dev);
688         if (!dev_pe) return -EEXIST;
689
690         *pe = dev_pe;
691         return 0;
692 }
693
694 /**
695  * ioda_eeh_next_error - Retrieve next error for EEH core to handle
696  * @pe: The affected PE
697  *
698  * The function is expected to be called by EEH core while it gets
699  * special EEH event (without binding PE). The function calls to
700  * OPAL APIs for next error to handle. The informational error is
701  * handled internally by platform. However, the dead IOC, dead PHB,
702  * fenced PHB and frozen PE should be handled by EEH core eventually.
703  */
704 static int ioda_eeh_next_error(struct eeh_pe **pe)
705 {
706         struct pci_controller *hose;
707         struct pnv_phb *phb;
708         struct eeh_pe *phb_pe;
709         u64 frozen_pe_no;
710         u16 err_type, severity;
711         long rc;
712         int ret = EEH_NEXT_ERR_NONE;
713
714         /*
715          * While running here, it's safe to purge the event queue.
716          * And we should keep the cached OPAL notifier event sychronized
717          * between the kernel and firmware.
718          */
719         eeh_remove_event(NULL);
720         opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
721
722         list_for_each_entry(hose, &hose_list, list_node) {
723                 /*
724                  * If the subordinate PCI buses of the PHB has been
725                  * removed or is exactly under error recovery, we
726                  * needn't take care of it any more.
727                  */
728                 phb = hose->private_data;
729                 phb_pe = eeh_phb_pe_get(hose);
730                 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
731                         continue;
732
733                 rc = opal_pci_next_error(phb->opal_id,
734                                 &frozen_pe_no, &err_type, &severity);
735
736                 /* If OPAL API returns error, we needn't proceed */
737                 if (rc != OPAL_SUCCESS) {
738                         pr_devel("%s: Invalid return value on "
739                                  "PHB#%x (0x%lx) from opal_pci_next_error",
740                                  __func__, hose->global_number, rc);
741                         continue;
742                 }
743
744                 /* If the PHB doesn't have error, stop processing */
745                 if (err_type == OPAL_EEH_NO_ERROR ||
746                     severity == OPAL_EEH_SEV_NO_ERROR) {
747                         pr_devel("%s: No error found on PHB#%x\n",
748                                  __func__, hose->global_number);
749                         continue;
750                 }
751
752                 /*
753                  * Processing the error. We're expecting the error with
754                  * highest priority reported upon multiple errors on the
755                  * specific PHB.
756                  */
757                 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
758                          __func__, err_type, severity,
759                          frozen_pe_no, hose->global_number);
760                 switch (err_type) {
761                 case OPAL_EEH_IOC_ERROR:
762                         if (severity == OPAL_EEH_SEV_IOC_DEAD) {
763                                 pr_err("EEH: dead IOC detected\n");
764                                 ret = EEH_NEXT_ERR_DEAD_IOC;
765                         } else if (severity == OPAL_EEH_SEV_INF) {
766                                 pr_info("EEH: IOC informative error "
767                                         "detected\n");
768                                 ioda_eeh_hub_diag(hose);
769                                 ret = EEH_NEXT_ERR_NONE;
770                         }
771
772                         break;
773                 case OPAL_EEH_PHB_ERROR:
774                         if (severity == OPAL_EEH_SEV_PHB_DEAD) {
775                                 *pe = phb_pe;
776                                 pr_err("EEH: dead PHB#%x detected\n",
777                                         hose->global_number);
778                                 ret = EEH_NEXT_ERR_DEAD_PHB;
779                         } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
780                                 *pe = phb_pe;
781                                 pr_err("EEH: fenced PHB#%x detected\n",
782                                         hose->global_number);
783                                 ret = EEH_NEXT_ERR_FENCED_PHB;
784                         } else if (severity == OPAL_EEH_SEV_INF) {
785                                 pr_info("EEH: PHB#%x informative error "
786                                         "detected\n",
787                                         hose->global_number);
788                                 ioda_eeh_phb_diag(hose);
789                                 ret = EEH_NEXT_ERR_NONE;
790                         }
791
792                         break;
793                 case OPAL_EEH_PE_ERROR:
794                         /*
795                          * If we can't find the corresponding PE, the
796                          * PEEV / PEST would be messy. So we force an
797                          * fenced PHB so that it can be recovered.
798                          *
799                          * If the PE has been marked as isolated, that
800                          * should have been removed permanently or in
801                          * progress with recovery. We needn't report
802                          * it again.
803                          */
804                         if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
805                                 *pe = phb_pe;
806                                 pr_err("EEH: Escalated fenced PHB#%x "
807                                        "detected for PE#%llx\n",
808                                         hose->global_number,
809                                         frozen_pe_no);
810                                 ret = EEH_NEXT_ERR_FENCED_PHB;
811                         } else if ((*pe)->state & EEH_PE_ISOLATED) {
812                                 ret = EEH_NEXT_ERR_NONE;
813                         } else {
814                                 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
815                                         (*pe)->addr, (*pe)->phb->global_number);
816                                 ret = EEH_NEXT_ERR_FROZEN_PE;
817                         }
818
819                         break;
820                 default:
821                         pr_warn("%s: Unexpected error type %d\n",
822                                 __func__, err_type);
823                 }
824
825                 /*
826                  * EEH core will try recover from fenced PHB or
827                  * frozen PE. In the time for frozen PE, EEH core
828                  * enable IO path for that before collecting logs,
829                  * but it ruins the site. So we have to dump the
830                  * log in advance here.
831                  */
832                 if ((ret == EEH_NEXT_ERR_FROZEN_PE  ||
833                     ret == EEH_NEXT_ERR_FENCED_PHB) &&
834                     !((*pe)->state & EEH_PE_ISOLATED)) {
835                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
836                         ioda_eeh_phb_diag(hose);
837                 }
838
839                 /*
840                  * If we have no errors on the specific PHB or only
841                  * informative error there, we continue poking it.
842                  * Otherwise, we need actions to be taken by upper
843                  * layer.
844                  */
845                 if (ret > EEH_NEXT_ERR_INF)
846                         break;
847         }
848
849         return ret;
850 }
851
852 struct pnv_eeh_ops ioda_eeh_ops = {
853         .post_init              = ioda_eeh_post_init,
854         .set_option             = ioda_eeh_set_option,
855         .get_state              = ioda_eeh_get_state,
856         .reset                  = ioda_eeh_reset,
857         .configure_bridge       = ioda_eeh_configure_bridge,
858         .next_error             = ioda_eeh_next_error
859 };