]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/platforms/powernv/eeh-powernv.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / arch / powerpc / platforms / powernv / eeh-powernv.c
1 /*
2  * The file intends to implement the platform dependent EEH operations on
3  * powernv platform. Actually, the powernv was created in order to fully
4  * hypervisor support.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/msi.h>
22 #include <linux/of.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/spinlock.h>
29
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/firmware.h>
33 #include <asm/io.h>
34 #include <asm/iommu.h>
35 #include <asm/machdep.h>
36 #include <asm/msi_bitmap.h>
37 #include <asm/opal.h>
38 #include <asm/ppc-pci.h>
39 #include <asm/pnv-pci.h>
40
41 #include "powernv.h"
42 #include "pci.h"
43
44 static bool pnv_eeh_nb_init = false;
45 static int eeh_event_irq = -EINVAL;
46
47 static int pnv_eeh_init(void)
48 {
49         struct pci_controller *hose;
50         struct pnv_phb *phb;
51
52         if (!firmware_has_feature(FW_FEATURE_OPAL)) {
53                 pr_warn("%s: OPAL is required !\n",
54                         __func__);
55                 return -EINVAL;
56         }
57
58         /* Set probe mode */
59         eeh_add_flag(EEH_PROBE_MODE_DEV);
60
61         /*
62          * P7IOC blocks PCI config access to frozen PE, but PHB3
63          * doesn't do that. So we have to selectively enable I/O
64          * prior to collecting error log.
65          */
66         list_for_each_entry(hose, &hose_list, list_node) {
67                 phb = hose->private_data;
68
69                 if (phb->model == PNV_PHB_MODEL_P7IOC)
70                         eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
71
72                 /*
73                  * PE#0 should be regarded as valid by EEH core
74                  * if it's not the reserved one. Currently, we
75                  * have the reserved PE#255 and PE#127 for PHB3
76                  * and P7IOC separately. So we should regard
77                  * PE#0 as valid for PHB3 and P7IOC.
78                  */
79                 if (phb->ioda.reserved_pe_idx != 0)
80                         eeh_add_flag(EEH_VALID_PE_ZERO);
81
82                 break;
83         }
84
85         return 0;
86 }
87
88 static irqreturn_t pnv_eeh_event(int irq, void *data)
89 {
90         /*
91          * We simply send a special EEH event if EEH has been
92          * enabled. We don't care about EEH events until we've
93          * finished processing the outstanding ones. Event processing
94          * gets unmasked in next_error() if EEH is enabled.
95          */
96         disable_irq_nosync(irq);
97
98         if (eeh_enabled())
99                 eeh_send_failure_event(NULL);
100
101         return IRQ_HANDLED;
102 }
103
104 #ifdef CONFIG_DEBUG_FS
105 static ssize_t pnv_eeh_ei_write(struct file *filp,
106                                 const char __user *user_buf,
107                                 size_t count, loff_t *ppos)
108 {
109         struct pci_controller *hose = filp->private_data;
110         struct eeh_dev *edev;
111         struct eeh_pe *pe;
112         int pe_no, type, func;
113         unsigned long addr, mask;
114         char buf[50];
115         int ret;
116
117         if (!eeh_ops || !eeh_ops->err_inject)
118                 return -ENXIO;
119
120         /* Copy over argument buffer */
121         ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
122         if (!ret)
123                 return -EFAULT;
124
125         /* Retrieve parameters */
126         ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
127                      &pe_no, &type, &func, &addr, &mask);
128         if (ret != 5)
129                 return -EINVAL;
130
131         /* Retrieve PE */
132         edev = kzalloc(sizeof(*edev), GFP_KERNEL);
133         if (!edev)
134                 return -ENOMEM;
135         edev->phb = hose;
136         edev->pe_config_addr = pe_no;
137         pe = eeh_pe_get(edev);
138         kfree(edev);
139         if (!pe)
140                 return -ENODEV;
141
142         /* Do error injection */
143         ret = eeh_ops->err_inject(pe, type, func, addr, mask);
144         return ret < 0 ? ret : count;
145 }
146
147 static const struct file_operations pnv_eeh_ei_fops = {
148         .open   = simple_open,
149         .llseek = no_llseek,
150         .write  = pnv_eeh_ei_write,
151 };
152
153 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
154 {
155         struct pci_controller *hose = data;
156         struct pnv_phb *phb = hose->private_data;
157
158         out_be64(phb->regs + offset, val);
159         return 0;
160 }
161
162 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
163 {
164         struct pci_controller *hose = data;
165         struct pnv_phb *phb = hose->private_data;
166
167         *val = in_be64(phb->regs + offset);
168         return 0;
169 }
170
171 #define PNV_EEH_DBGFS_ENTRY(name, reg)                          \
172 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val)        \
173 {                                                               \
174         return pnv_eeh_dbgfs_set(data, reg, val);               \
175 }                                                               \
176                                                                 \
177 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val)       \
178 {                                                               \
179         return pnv_eeh_dbgfs_get(data, reg, val);               \
180 }                                                               \
181                                                                 \
182 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name,               \
183                         pnv_eeh_dbgfs_get_##name,               \
184                         pnv_eeh_dbgfs_set_##name,               \
185                         "0x%llx\n")
186
187 PNV_EEH_DBGFS_ENTRY(outb, 0xD10);
188 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90);
189 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
190
191 #endif /* CONFIG_DEBUG_FS */
192
193 /**
194  * pnv_eeh_post_init - EEH platform dependent post initialization
195  *
196  * EEH platform dependent post initialization on powernv. When
197  * the function is called, the EEH PEs and devices should have
198  * been built. If the I/O cache staff has been built, EEH is
199  * ready to supply service.
200  */
201 static int pnv_eeh_post_init(void)
202 {
203         struct pci_controller *hose;
204         struct pnv_phb *phb;
205         int ret = 0;
206
207         /* Register OPAL event notifier */
208         if (!pnv_eeh_nb_init) {
209                 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
210                 if (eeh_event_irq < 0) {
211                         pr_err("%s: Can't register OPAL event interrupt (%d)\n",
212                                __func__, eeh_event_irq);
213                         return eeh_event_irq;
214                 }
215
216                 ret = request_irq(eeh_event_irq, pnv_eeh_event,
217                                 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
218                 if (ret < 0) {
219                         irq_dispose_mapping(eeh_event_irq);
220                         pr_err("%s: Can't request OPAL event interrupt (%d)\n",
221                                __func__, eeh_event_irq);
222                         return ret;
223                 }
224
225                 pnv_eeh_nb_init = true;
226         }
227
228         if (!eeh_enabled())
229                 disable_irq(eeh_event_irq);
230
231         list_for_each_entry(hose, &hose_list, list_node) {
232                 phb = hose->private_data;
233
234                 /*
235                  * If EEH is enabled, we're going to rely on that.
236                  * Otherwise, we restore to conventional mechanism
237                  * to clear frozen PE during PCI config access.
238                  */
239                 if (eeh_enabled())
240                         phb->flags |= PNV_PHB_FLAG_EEH;
241                 else
242                         phb->flags &= ~PNV_PHB_FLAG_EEH;
243
244                 /* Create debugfs entries */
245 #ifdef CONFIG_DEBUG_FS
246                 if (phb->has_dbgfs || !phb->dbgfs)
247                         continue;
248
249                 phb->has_dbgfs = 1;
250                 debugfs_create_file("err_injct", 0200,
251                                     phb->dbgfs, hose,
252                                     &pnv_eeh_ei_fops);
253
254                 debugfs_create_file("err_injct_outbound", 0600,
255                                     phb->dbgfs, hose,
256                                     &pnv_eeh_dbgfs_ops_outb);
257                 debugfs_create_file("err_injct_inboundA", 0600,
258                                     phb->dbgfs, hose,
259                                     &pnv_eeh_dbgfs_ops_inbA);
260                 debugfs_create_file("err_injct_inboundB", 0600,
261                                     phb->dbgfs, hose,
262                                     &pnv_eeh_dbgfs_ops_inbB);
263 #endif /* CONFIG_DEBUG_FS */
264         }
265
266         return ret;
267 }
268
269 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
270 {
271         int pos = PCI_CAPABILITY_LIST;
272         int cnt = 48;   /* Maximal number of capabilities */
273         u32 status, id;
274
275         if (!pdn)
276                 return 0;
277
278         /* Check if the device supports capabilities */
279         pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
280         if (!(status & PCI_STATUS_CAP_LIST))
281                 return 0;
282
283         while (cnt--) {
284                 pnv_pci_cfg_read(pdn, pos, 1, &pos);
285                 if (pos < 0x40)
286                         break;
287
288                 pos &= ~3;
289                 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
290                 if (id == 0xff)
291                         break;
292
293                 /* Found */
294                 if (id == cap)
295                         return pos;
296
297                 /* Next one */
298                 pos += PCI_CAP_LIST_NEXT;
299         }
300
301         return 0;
302 }
303
304 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
305 {
306         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
307         u32 header;
308         int pos = 256, ttl = (4096 - 256) / 8;
309
310         if (!edev || !edev->pcie_cap)
311                 return 0;
312         if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
313                 return 0;
314         else if (!header)
315                 return 0;
316
317         while (ttl-- > 0) {
318                 if (PCI_EXT_CAP_ID(header) == cap && pos)
319                         return pos;
320
321                 pos = PCI_EXT_CAP_NEXT(header);
322                 if (pos < 256)
323                         break;
324
325                 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
326                         break;
327         }
328
329         return 0;
330 }
331
332 /**
333  * pnv_eeh_probe - Do probe on PCI device
334  * @pdn: PCI device node
335  * @data: unused
336  *
337  * When EEH module is installed during system boot, all PCI devices
338  * are checked one by one to see if it supports EEH. The function
339  * is introduced for the purpose. By default, EEH has been enabled
340  * on all PCI devices. That's to say, we only need do necessary
341  * initialization on the corresponding eeh device and create PE
342  * accordingly.
343  *
344  * It's notable that's unsafe to retrieve the EEH device through
345  * the corresponding PCI device. During the PCI device hotplug, which
346  * was possiblly triggered by EEH core, the binding between EEH device
347  * and the PCI device isn't built yet.
348  */
349 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
350 {
351         struct pci_controller *hose = pdn->phb;
352         struct pnv_phb *phb = hose->private_data;
353         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
354         uint32_t pcie_flags;
355         int ret;
356
357         /*
358          * When probing the root bridge, which doesn't have any
359          * subordinate PCI devices. We don't have OF node for
360          * the root bridge. So it's not reasonable to continue
361          * the probing.
362          */
363         if (!edev || edev->pe)
364                 return NULL;
365
366         /* Skip for PCI-ISA bridge */
367         if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
368                 return NULL;
369
370         /* Initialize eeh device */
371         edev->class_code = pdn->class_code;
372         edev->mode      &= 0xFFFFFF00;
373         edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
374         edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
375         edev->af_cap   = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF);
376         edev->aer_cap  = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
377         if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
378                 edev->mode |= EEH_DEV_BRIDGE;
379                 if (edev->pcie_cap) {
380                         pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
381                                          2, &pcie_flags);
382                         pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
383                         if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
384                                 edev->mode |= EEH_DEV_ROOT_PORT;
385                         else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
386                                 edev->mode |= EEH_DEV_DS_PORT;
387                 }
388         }
389
390         edev->config_addr    = (pdn->busno << 8) | (pdn->devfn);
391         edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
392
393         /* Create PE */
394         ret = eeh_add_to_parent_pe(edev);
395         if (ret) {
396                 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
397                         __func__, hose->global_number, pdn->busno,
398                         PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
399                 return NULL;
400         }
401
402         /*
403          * If the PE contains any one of following adapters, the
404          * PCI config space can't be accessed when dumping EEH log.
405          * Otherwise, we will run into fenced PHB caused by shortage
406          * of outbound credits in the adapter. The PCI config access
407          * should be blocked until PE reset. MMIO access is dropped
408          * by hardware certainly. In order to drop PCI config requests,
409          * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
410          * will be checked in the backend for PE state retrival. If
411          * the PE becomes frozen for the first time and the flag has
412          * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
413          * that PE to block its config space.
414          *
415          * Broadcom BCM5718 2-ports NICs (14e4:1656)
416          * Broadcom Austin 4-ports NICs (14e4:1657)
417          * Broadcom Shiner 4-ports 1G NICs (14e4:168a)
418          * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
419          */
420         if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
421              pdn->device_id == 0x1656) ||
422             (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
423              pdn->device_id == 0x1657) ||
424             (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
425              pdn->device_id == 0x168a) ||
426             (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
427              pdn->device_id == 0x168e))
428                 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
429
430         /*
431          * Cache the PE primary bus, which can't be fetched when
432          * full hotplug is in progress. In that case, all child
433          * PCI devices of the PE are expected to be removed prior
434          * to PE reset.
435          */
436         if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
437                 edev->pe->bus = pci_find_bus(hose->global_number,
438                                              pdn->busno);
439                 if (edev->pe->bus)
440                         edev->pe->state |= EEH_PE_PRI_BUS;
441         }
442
443         /*
444          * Enable EEH explicitly so that we will do EEH check
445          * while accessing I/O stuff
446          */
447         eeh_add_flag(EEH_ENABLED);
448
449         /* Save memory bars */
450         eeh_save_bars(edev);
451
452         return NULL;
453 }
454
455 /**
456  * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
457  * @pe: EEH PE
458  * @option: operation to be issued
459  *
460  * The function is used to control the EEH functionality globally.
461  * Currently, following options are support according to PAPR:
462  * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
463  */
464 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
465 {
466         struct pci_controller *hose = pe->phb;
467         struct pnv_phb *phb = hose->private_data;
468         bool freeze_pe = false;
469         int opt;
470         s64 rc;
471
472         switch (option) {
473         case EEH_OPT_DISABLE:
474                 return -EPERM;
475         case EEH_OPT_ENABLE:
476                 return 0;
477         case EEH_OPT_THAW_MMIO:
478                 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
479                 break;
480         case EEH_OPT_THAW_DMA:
481                 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
482                 break;
483         case EEH_OPT_FREEZE_PE:
484                 freeze_pe = true;
485                 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
486                 break;
487         default:
488                 pr_warn("%s: Invalid option %d\n", __func__, option);
489                 return -EINVAL;
490         }
491
492         /* Freeze master and slave PEs if PHB supports compound PEs */
493         if (freeze_pe) {
494                 if (phb->freeze_pe) {
495                         phb->freeze_pe(phb, pe->addr);
496                         return 0;
497                 }
498
499                 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt);
500                 if (rc != OPAL_SUCCESS) {
501                         pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
502                                 __func__, rc, phb->hose->global_number,
503                                 pe->addr);
504                         return -EIO;
505                 }
506
507                 return 0;
508         }
509
510         /* Unfreeze master and slave PEs if PHB supports */
511         if (phb->unfreeze_pe)
512                 return phb->unfreeze_pe(phb, pe->addr, opt);
513
514         rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt);
515         if (rc != OPAL_SUCCESS) {
516                 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
517                         __func__, rc, option, phb->hose->global_number,
518                         pe->addr);
519                 return -EIO;
520         }
521
522         return 0;
523 }
524
525 /**
526  * pnv_eeh_get_pe_addr - Retrieve PE address
527  * @pe: EEH PE
528  *
529  * Retrieve the PE address according to the given tranditional
530  * PCI BDF (Bus/Device/Function) address.
531  */
532 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
533 {
534         return pe->addr;
535 }
536
537 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
538 {
539         struct pnv_phb *phb = pe->phb->private_data;
540         s64 rc;
541
542         rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
543                                          PNV_PCI_DIAG_BUF_SIZE);
544         if (rc != OPAL_SUCCESS)
545                 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
546                         __func__, rc, pe->phb->global_number);
547 }
548
549 static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
550 {
551         struct pnv_phb *phb = pe->phb->private_data;
552         u8 fstate;
553         __be16 pcierr;
554         s64 rc;
555         int result = 0;
556
557         rc = opal_pci_eeh_freeze_status(phb->opal_id,
558                                         pe->addr,
559                                         &fstate,
560                                         &pcierr,
561                                         NULL);
562         if (rc != OPAL_SUCCESS) {
563                 pr_warn("%s: Failure %lld getting PHB#%x state\n",
564                         __func__, rc, phb->hose->global_number);
565                 return EEH_STATE_NOT_SUPPORT;
566         }
567
568         /*
569          * Check PHB state. If the PHB is frozen for the
570          * first time, to dump the PHB diag-data.
571          */
572         if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
573                 result = (EEH_STATE_MMIO_ACTIVE  |
574                           EEH_STATE_DMA_ACTIVE   |
575                           EEH_STATE_MMIO_ENABLED |
576                           EEH_STATE_DMA_ENABLED);
577         } else if (!(pe->state & EEH_PE_ISOLATED)) {
578                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
579                 pnv_eeh_get_phb_diag(pe);
580
581                 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
582                         pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
583         }
584
585         return result;
586 }
587
588 static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
589 {
590         struct pnv_phb *phb = pe->phb->private_data;
591         u8 fstate;
592         __be16 pcierr;
593         s64 rc;
594         int result;
595
596         /*
597          * We don't clobber hardware frozen state until PE
598          * reset is completed. In order to keep EEH core
599          * moving forward, we have to return operational
600          * state during PE reset.
601          */
602         if (pe->state & EEH_PE_RESET) {
603                 result = (EEH_STATE_MMIO_ACTIVE  |
604                           EEH_STATE_DMA_ACTIVE   |
605                           EEH_STATE_MMIO_ENABLED |
606                           EEH_STATE_DMA_ENABLED);
607                 return result;
608         }
609
610         /*
611          * Fetch PE state from hardware. If the PHB
612          * supports compound PE, let it handle that.
613          */
614         if (phb->get_pe_state) {
615                 fstate = phb->get_pe_state(phb, pe->addr);
616         } else {
617                 rc = opal_pci_eeh_freeze_status(phb->opal_id,
618                                                 pe->addr,
619                                                 &fstate,
620                                                 &pcierr,
621                                                 NULL);
622                 if (rc != OPAL_SUCCESS) {
623                         pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
624                                 __func__, rc, phb->hose->global_number,
625                                 pe->addr);
626                         return EEH_STATE_NOT_SUPPORT;
627                 }
628         }
629
630         /* Figure out state */
631         switch (fstate) {
632         case OPAL_EEH_STOPPED_NOT_FROZEN:
633                 result = (EEH_STATE_MMIO_ACTIVE  |
634                           EEH_STATE_DMA_ACTIVE   |
635                           EEH_STATE_MMIO_ENABLED |
636                           EEH_STATE_DMA_ENABLED);
637                 break;
638         case OPAL_EEH_STOPPED_MMIO_FREEZE:
639                 result = (EEH_STATE_DMA_ACTIVE |
640                           EEH_STATE_DMA_ENABLED);
641                 break;
642         case OPAL_EEH_STOPPED_DMA_FREEZE:
643                 result = (EEH_STATE_MMIO_ACTIVE |
644                           EEH_STATE_MMIO_ENABLED);
645                 break;
646         case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
647                 result = 0;
648                 break;
649         case OPAL_EEH_STOPPED_RESET:
650                 result = EEH_STATE_RESET_ACTIVE;
651                 break;
652         case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
653                 result = EEH_STATE_UNAVAILABLE;
654                 break;
655         case OPAL_EEH_STOPPED_PERM_UNAVAIL:
656                 result = EEH_STATE_NOT_SUPPORT;
657                 break;
658         default:
659                 result = EEH_STATE_NOT_SUPPORT;
660                 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
661                         __func__, phb->hose->global_number,
662                         pe->addr, fstate);
663         }
664
665         /*
666          * If PHB supports compound PE, to freeze all
667          * slave PEs for consistency.
668          *
669          * If the PE is switching to frozen state for the
670          * first time, to dump the PHB diag-data.
671          */
672         if (!(result & EEH_STATE_NOT_SUPPORT) &&
673             !(result & EEH_STATE_UNAVAILABLE) &&
674             !(result & EEH_STATE_MMIO_ACTIVE) &&
675             !(result & EEH_STATE_DMA_ACTIVE)  &&
676             !(pe->state & EEH_PE_ISOLATED)) {
677                 if (phb->freeze_pe)
678                         phb->freeze_pe(phb, pe->addr);
679
680                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
681                 pnv_eeh_get_phb_diag(pe);
682
683                 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
684                         pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
685         }
686
687         return result;
688 }
689
690 /**
691  * pnv_eeh_get_state - Retrieve PE state
692  * @pe: EEH PE
693  * @delay: delay while PE state is temporarily unavailable
694  *
695  * Retrieve the state of the specified PE. For IODA-compitable
696  * platform, it should be retrieved from IODA table. Therefore,
697  * we prefer passing down to hardware implementation to handle
698  * it.
699  */
700 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
701 {
702         int ret;
703
704         if (pe->type & EEH_PE_PHB)
705                 ret = pnv_eeh_get_phb_state(pe);
706         else
707                 ret = pnv_eeh_get_pe_state(pe);
708
709         if (!delay)
710                 return ret;
711
712         /*
713          * If the PE state is temporarily unavailable,
714          * to inform the EEH core delay for default
715          * period (1 second)
716          */
717         *delay = 0;
718         if (ret & EEH_STATE_UNAVAILABLE)
719                 *delay = 1000;
720
721         return ret;
722 }
723
724 static s64 pnv_eeh_poll(unsigned long id)
725 {
726         s64 rc = OPAL_HARDWARE;
727
728         while (1) {
729                 rc = opal_pci_poll(id);
730                 if (rc <= 0)
731                         break;
732
733                 if (system_state < SYSTEM_RUNNING)
734                         udelay(1000 * rc);
735                 else
736                         msleep(rc);
737         }
738
739         return rc;
740 }
741
742 int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
743 {
744         struct pnv_phb *phb = hose->private_data;
745         s64 rc = OPAL_HARDWARE;
746
747         pr_debug("%s: Reset PHB#%x, option=%d\n",
748                  __func__, hose->global_number, option);
749
750         /* Issue PHB complete reset request */
751         if (option == EEH_RESET_FUNDAMENTAL ||
752             option == EEH_RESET_HOT)
753                 rc = opal_pci_reset(phb->opal_id,
754                                     OPAL_RESET_PHB_COMPLETE,
755                                     OPAL_ASSERT_RESET);
756         else if (option == EEH_RESET_DEACTIVATE)
757                 rc = opal_pci_reset(phb->opal_id,
758                                     OPAL_RESET_PHB_COMPLETE,
759                                     OPAL_DEASSERT_RESET);
760         if (rc < 0)
761                 goto out;
762
763         /*
764          * Poll state of the PHB until the request is done
765          * successfully. The PHB reset is usually PHB complete
766          * reset followed by hot reset on root bus. So we also
767          * need the PCI bus settlement delay.
768          */
769         if (rc > 0)
770                 rc = pnv_eeh_poll(phb->opal_id);
771         if (option == EEH_RESET_DEACTIVATE) {
772                 if (system_state < SYSTEM_RUNNING)
773                         udelay(1000 * EEH_PE_RST_SETTLE_TIME);
774                 else
775                         msleep(EEH_PE_RST_SETTLE_TIME);
776         }
777 out:
778         if (rc != OPAL_SUCCESS)
779                 return -EIO;
780
781         return 0;
782 }
783
784 static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
785 {
786         struct pnv_phb *phb = hose->private_data;
787         s64 rc = OPAL_HARDWARE;
788
789         pr_debug("%s: Reset PHB#%x, option=%d\n",
790                  __func__, hose->global_number, option);
791
792         /*
793          * During the reset deassert time, we needn't care
794          * the reset scope because the firmware does nothing
795          * for fundamental or hot reset during deassert phase.
796          */
797         if (option == EEH_RESET_FUNDAMENTAL)
798                 rc = opal_pci_reset(phb->opal_id,
799                                     OPAL_RESET_PCI_FUNDAMENTAL,
800                                     OPAL_ASSERT_RESET);
801         else if (option == EEH_RESET_HOT)
802                 rc = opal_pci_reset(phb->opal_id,
803                                     OPAL_RESET_PCI_HOT,
804                                     OPAL_ASSERT_RESET);
805         else if (option == EEH_RESET_DEACTIVATE)
806                 rc = opal_pci_reset(phb->opal_id,
807                                     OPAL_RESET_PCI_HOT,
808                                     OPAL_DEASSERT_RESET);
809         if (rc < 0)
810                 goto out;
811
812         /* Poll state of the PHB until the request is done */
813         if (rc > 0)
814                 rc = pnv_eeh_poll(phb->opal_id);
815         if (option == EEH_RESET_DEACTIVATE)
816                 msleep(EEH_PE_RST_SETTLE_TIME);
817 out:
818         if (rc != OPAL_SUCCESS)
819                 return -EIO;
820
821         return 0;
822 }
823
824 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
825 {
826         struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
827         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
828         int aer = edev ? edev->aer_cap : 0;
829         u32 ctrl;
830
831         pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
832                  __func__, pci_domain_nr(dev->bus),
833                  dev->bus->number, option);
834
835         switch (option) {
836         case EEH_RESET_FUNDAMENTAL:
837         case EEH_RESET_HOT:
838                 /* Don't report linkDown event */
839                 if (aer) {
840                         eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
841                                              4, &ctrl);
842                         ctrl |= PCI_ERR_UNC_SURPDN;
843                         eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
844                                               4, ctrl);
845                 }
846
847                 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
848                 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
849                 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
850
851                 msleep(EEH_PE_RST_HOLD_TIME);
852                 break;
853         case EEH_RESET_DEACTIVATE:
854                 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
855                 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
856                 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
857
858                 msleep(EEH_PE_RST_SETTLE_TIME);
859
860                 /* Continue reporting linkDown event */
861                 if (aer) {
862                         eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
863                                              4, &ctrl);
864                         ctrl &= ~PCI_ERR_UNC_SURPDN;
865                         eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
866                                               4, ctrl);
867                 }
868
869                 break;
870         }
871
872         return 0;
873 }
874
875 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
876 {
877         struct pci_controller *hose = pci_bus_to_host(pdev->bus);
878         struct pnv_phb *phb = hose->private_data;
879         struct device_node *dn = pci_device_to_OF_node(pdev);
880         uint64_t id = PCI_SLOT_ID(phb->opal_id,
881                                   (pdev->bus->number << 8) | pdev->devfn);
882         uint8_t scope;
883         int64_t rc;
884
885         /* Hot reset to the bus if firmware cannot handle */
886         if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
887                 return __pnv_eeh_bridge_reset(pdev, option);
888
889         switch (option) {
890         case EEH_RESET_FUNDAMENTAL:
891                 scope = OPAL_RESET_PCI_FUNDAMENTAL;
892                 break;
893         case EEH_RESET_HOT:
894                 scope = OPAL_RESET_PCI_HOT;
895                 break;
896         case EEH_RESET_DEACTIVATE:
897                 return 0;
898         default:
899                 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
900                         __func__, option);
901                 return -EINVAL;
902         }
903
904         rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
905         if (rc <= OPAL_SUCCESS)
906                 goto out;
907
908         rc = pnv_eeh_poll(id);
909 out:
910         return (rc == OPAL_SUCCESS) ? 0 : -EIO;
911 }
912
913 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
914 {
915         struct pci_controller *hose;
916
917         if (pci_is_root_bus(dev->bus)) {
918                 hose = pci_bus_to_host(dev->bus);
919                 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
920                 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
921         } else {
922                 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
923                 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
924         }
925 }
926
927 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
928                                      int pos, u16 mask)
929 {
930         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
931         int i, status = 0;
932
933         /* Wait for Transaction Pending bit to be cleared */
934         for (i = 0; i < 4; i++) {
935                 eeh_ops->read_config(pdn, pos, 2, &status);
936                 if (!(status & mask))
937                         return;
938
939                 msleep((1 << i) * 100);
940         }
941
942         pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n",
943                 __func__, type,
944                 edev->phb->global_number, pdn->busno,
945                 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
946 }
947
948 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
949 {
950         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
951         u32 reg = 0;
952
953         if (WARN_ON(!edev->pcie_cap))
954                 return -ENOTTY;
955
956         eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, &reg);
957         if (!(reg & PCI_EXP_DEVCAP_FLR))
958                 return -ENOTTY;
959
960         switch (option) {
961         case EEH_RESET_HOT:
962         case EEH_RESET_FUNDAMENTAL:
963                 pnv_eeh_wait_for_pending(pdn, "",
964                                          edev->pcie_cap + PCI_EXP_DEVSTA,
965                                          PCI_EXP_DEVSTA_TRPND);
966                 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
967                                      4, &reg);
968                 reg |= PCI_EXP_DEVCTL_BCR_FLR;
969                 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
970                                       4, reg);
971                 msleep(EEH_PE_RST_HOLD_TIME);
972                 break;
973         case EEH_RESET_DEACTIVATE:
974                 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
975                                      4, &reg);
976                 reg &= ~PCI_EXP_DEVCTL_BCR_FLR;
977                 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
978                                       4, reg);
979                 msleep(EEH_PE_RST_SETTLE_TIME);
980                 break;
981         }
982
983         return 0;
984 }
985
986 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
987 {
988         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
989         u32 cap = 0;
990
991         if (WARN_ON(!edev->af_cap))
992                 return -ENOTTY;
993
994         eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap);
995         if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
996                 return -ENOTTY;
997
998         switch (option) {
999         case EEH_RESET_HOT:
1000         case EEH_RESET_FUNDAMENTAL:
1001                 /*
1002                  * Wait for Transaction Pending bit to clear. A word-aligned
1003                  * test is used, so we use the conrol offset rather than status
1004                  * and shift the test bit to match.
1005                  */
1006                 pnv_eeh_wait_for_pending(pdn, "AF",
1007                                          edev->af_cap + PCI_AF_CTRL,
1008                                          PCI_AF_STATUS_TP << 8);
1009                 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL,
1010                                       1, PCI_AF_CTRL_FLR);
1011                 msleep(EEH_PE_RST_HOLD_TIME);
1012                 break;
1013         case EEH_RESET_DEACTIVATE:
1014                 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0);
1015                 msleep(EEH_PE_RST_SETTLE_TIME);
1016                 break;
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option)
1023 {
1024         struct eeh_dev *edev;
1025         struct pci_dn *pdn;
1026         int ret;
1027
1028         /* The VF PE should have only one child device */
1029         edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
1030         pdn = eeh_dev_to_pdn(edev);
1031         if (!pdn)
1032                 return -ENXIO;
1033
1034         ret = pnv_eeh_do_flr(pdn, option);
1035         if (!ret)
1036                 return ret;
1037
1038         return pnv_eeh_do_af_flr(pdn, option);
1039 }
1040
1041 /**
1042  * pnv_eeh_reset - Reset the specified PE
1043  * @pe: EEH PE
1044  * @option: reset option
1045  *
1046  * Do reset on the indicated PE. For PCI bus sensitive PE,
1047  * we need to reset the parent p2p bridge. The PHB has to
1048  * be reinitialized if the p2p bridge is root bridge. For
1049  * PCI device sensitive PE, we will try to reset the device
1050  * through FLR. For now, we don't have OPAL APIs to do HARD
1051  * reset yet, so all reset would be SOFT (HOT) reset.
1052  */
1053 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
1054 {
1055         struct pci_controller *hose = pe->phb;
1056         struct pnv_phb *phb;
1057         struct pci_bus *bus;
1058         int64_t rc;
1059
1060         /*
1061          * For PHB reset, we always have complete reset. For those PEs whose
1062          * primary bus derived from root complex (root bus) or root port
1063          * (usually bus#1), we apply hot or fundamental reset on the root port.
1064          * For other PEs, we always have hot reset on the PE primary bus.
1065          *
1066          * Here, we have different design to pHyp, which always clear the
1067          * frozen state during PE reset. However, the good idea here from
1068          * benh is to keep frozen state before we get PE reset done completely
1069          * (until BAR restore). With the frozen state, HW drops illegal IO
1070          * or MMIO access, which can incur recrusive frozen PE during PE
1071          * reset. The side effect is that EEH core has to clear the frozen
1072          * state explicitly after BAR restore.
1073          */
1074         if (pe->type & EEH_PE_PHB)
1075                 return pnv_eeh_phb_reset(hose, option);
1076
1077         /*
1078          * The frozen PE might be caused by PAPR error injection
1079          * registers, which are expected to be cleared after hitting
1080          * frozen PE as stated in the hardware spec. Unfortunately,
1081          * that's not true on P7IOC. So we have to clear it manually
1082          * to avoid recursive EEH errors during recovery.
1083          */
1084         phb = hose->private_data;
1085         if (phb->model == PNV_PHB_MODEL_P7IOC &&
1086             (option == EEH_RESET_HOT ||
1087              option == EEH_RESET_FUNDAMENTAL)) {
1088                 rc = opal_pci_reset(phb->opal_id,
1089                                     OPAL_RESET_PHB_ERROR,
1090                                     OPAL_ASSERT_RESET);
1091                 if (rc != OPAL_SUCCESS) {
1092                         pr_warn("%s: Failure %lld clearing error injection registers\n",
1093                                 __func__, rc);
1094                         return -EIO;
1095                 }
1096         }
1097
1098         if (pe->type & EEH_PE_VF)
1099                 return pnv_eeh_reset_vf_pe(pe, option);
1100
1101         bus = eeh_pe_bus_get(pe);
1102         if (!bus) {
1103                 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
1104                         __func__, pe->phb->global_number, pe->addr);
1105                 return -EIO;
1106         }
1107
1108         /*
1109          * If dealing with the root bus (or the bus underneath the
1110          * root port), we reset the bus underneath the root port.
1111          *
1112          * The cxl driver depends on this behaviour for bi-modal card
1113          * switching.
1114          */
1115         if (pci_is_root_bus(bus) ||
1116             pci_is_root_bus(bus->parent))
1117                 return pnv_eeh_root_reset(hose, option);
1118
1119         return pnv_eeh_bridge_reset(bus->self, option);
1120 }
1121
1122 /**
1123  * pnv_eeh_wait_state - Wait for PE state
1124  * @pe: EEH PE
1125  * @max_wait: maximal period in millisecond
1126  *
1127  * Wait for the state of associated PE. It might take some time
1128  * to retrieve the PE's state.
1129  */
1130 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
1131 {
1132         int ret;
1133         int mwait;
1134
1135         while (1) {
1136                 ret = pnv_eeh_get_state(pe, &mwait);
1137
1138                 /*
1139                  * If the PE's state is temporarily unavailable,
1140                  * we have to wait for the specified time. Otherwise,
1141                  * the PE's state will be returned immediately.
1142                  */
1143                 if (ret != EEH_STATE_UNAVAILABLE)
1144                         return ret;
1145
1146                 if (max_wait <= 0) {
1147                         pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
1148                                 __func__, pe->addr, max_wait);
1149                         return EEH_STATE_NOT_SUPPORT;
1150                 }
1151
1152                 max_wait -= mwait;
1153                 msleep(mwait);
1154         }
1155
1156         return EEH_STATE_NOT_SUPPORT;
1157 }
1158
1159 /**
1160  * pnv_eeh_get_log - Retrieve error log
1161  * @pe: EEH PE
1162  * @severity: temporary or permanent error log
1163  * @drv_log: driver log to be combined with retrieved error log
1164  * @len: length of driver log
1165  *
1166  * Retrieve the temporary or permanent error from the PE.
1167  */
1168 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
1169                            char *drv_log, unsigned long len)
1170 {
1171         if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
1172                 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
1173
1174         return 0;
1175 }
1176
1177 /**
1178  * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
1179  * @pe: EEH PE
1180  *
1181  * The function will be called to reconfigure the bridges included
1182  * in the specified PE so that the mulfunctional PE would be recovered
1183  * again.
1184  */
1185 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
1186 {
1187         return 0;
1188 }
1189
1190 /**
1191  * pnv_pe_err_inject - Inject specified error to the indicated PE
1192  * @pe: the indicated PE
1193  * @type: error type
1194  * @func: specific error type
1195  * @addr: address
1196  * @mask: address mask
1197  *
1198  * The routine is called to inject specified error, which is
1199  * determined by @type and @func, to the indicated PE for
1200  * testing purpose.
1201  */
1202 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
1203                               unsigned long addr, unsigned long mask)
1204 {
1205         struct pci_controller *hose = pe->phb;
1206         struct pnv_phb *phb = hose->private_data;
1207         s64 rc;
1208
1209         if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1210             type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1211                 pr_warn("%s: Invalid error type %d\n",
1212                         __func__, type);
1213                 return -ERANGE;
1214         }
1215
1216         if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
1217             func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1218                 pr_warn("%s: Invalid error function %d\n",
1219                         __func__, func);
1220                 return -ERANGE;
1221         }
1222
1223         /* Firmware supports error injection ? */
1224         if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1225                 pr_warn("%s: Firmware doesn't support error injection\n",
1226                         __func__);
1227                 return -ENXIO;
1228         }
1229
1230         /* Do error injection */
1231         rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1232                                  type, func, addr, mask);
1233         if (rc != OPAL_SUCCESS) {
1234                 pr_warn("%s: Failure %lld injecting error "
1235                         "%d-%d to PHB#%x-PE#%x\n",
1236                         __func__, rc, type, func,
1237                         hose->global_number, pe->addr);
1238                 return -EIO;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
1245 {
1246         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1247
1248         if (!edev || !edev->pe)
1249                 return false;
1250
1251         /*
1252          * We will issue FLR or AF FLR to all VFs, which are contained
1253          * in VF PE. It relies on the EEH PCI config accessors. So we
1254          * can't block them during the window.
1255          */
1256         if (edev->physfn && (edev->pe->state & EEH_PE_RESET))
1257                 return false;
1258
1259         if (edev->pe->state & EEH_PE_CFG_BLOCKED)
1260                 return true;
1261
1262         return false;
1263 }
1264
1265 static int pnv_eeh_read_config(struct pci_dn *pdn,
1266                                int where, int size, u32 *val)
1267 {
1268         if (!pdn)
1269                 return PCIBIOS_DEVICE_NOT_FOUND;
1270
1271         if (pnv_eeh_cfg_blocked(pdn)) {
1272                 *val = 0xFFFFFFFF;
1273                 return PCIBIOS_SET_FAILED;
1274         }
1275
1276         return pnv_pci_cfg_read(pdn, where, size, val);
1277 }
1278
1279 static int pnv_eeh_write_config(struct pci_dn *pdn,
1280                                 int where, int size, u32 val)
1281 {
1282         if (!pdn)
1283                 return PCIBIOS_DEVICE_NOT_FOUND;
1284
1285         if (pnv_eeh_cfg_blocked(pdn))
1286                 return PCIBIOS_SET_FAILED;
1287
1288         return pnv_pci_cfg_write(pdn, where, size, val);
1289 }
1290
1291 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1292 {
1293         /* GEM */
1294         if (data->gemXfir || data->gemRfir ||
1295             data->gemRirqfir || data->gemMask || data->gemRwof)
1296                 pr_info("  GEM: %016llx %016llx %016llx %016llx %016llx\n",
1297                         be64_to_cpu(data->gemXfir),
1298                         be64_to_cpu(data->gemRfir),
1299                         be64_to_cpu(data->gemRirqfir),
1300                         be64_to_cpu(data->gemMask),
1301                         be64_to_cpu(data->gemRwof));
1302
1303         /* LEM */
1304         if (data->lemFir || data->lemErrMask ||
1305             data->lemAction0 || data->lemAction1 || data->lemWof)
1306                 pr_info("  LEM: %016llx %016llx %016llx %016llx %016llx\n",
1307                         be64_to_cpu(data->lemFir),
1308                         be64_to_cpu(data->lemErrMask),
1309                         be64_to_cpu(data->lemAction0),
1310                         be64_to_cpu(data->lemAction1),
1311                         be64_to_cpu(data->lemWof));
1312 }
1313
1314 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1315 {
1316         struct pnv_phb *phb = hose->private_data;
1317         struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
1318         long rc;
1319
1320         rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1321         if (rc != OPAL_SUCCESS) {
1322                 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1323                         __func__, phb->hub_id, rc);
1324                 return;
1325         }
1326
1327         switch (be16_to_cpu(data->type)) {
1328         case OPAL_P7IOC_DIAG_TYPE_RGC:
1329                 pr_info("P7IOC diag-data for RGC\n\n");
1330                 pnv_eeh_dump_hub_diag_common(data);
1331                 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1332                         pr_info("  RGC: %016llx %016llx\n",
1333                                 be64_to_cpu(data->rgc.rgcStatus),
1334                                 be64_to_cpu(data->rgc.rgcLdcp));
1335                 break;
1336         case OPAL_P7IOC_DIAG_TYPE_BI:
1337                 pr_info("P7IOC diag-data for BI %s\n\n",
1338                         data->bi.biDownbound ? "Downbound" : "Upbound");
1339                 pnv_eeh_dump_hub_diag_common(data);
1340                 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1341                     data->bi.biLdcp2 || data->bi.biFenceStatus)
1342                         pr_info("  BI:  %016llx %016llx %016llx %016llx\n",
1343                                 be64_to_cpu(data->bi.biLdcp0),
1344                                 be64_to_cpu(data->bi.biLdcp1),
1345                                 be64_to_cpu(data->bi.biLdcp2),
1346                                 be64_to_cpu(data->bi.biFenceStatus));
1347                 break;
1348         case OPAL_P7IOC_DIAG_TYPE_CI:
1349                 pr_info("P7IOC diag-data for CI Port %d\n\n",
1350                         data->ci.ciPort);
1351                 pnv_eeh_dump_hub_diag_common(data);
1352                 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1353                         pr_info("  CI:  %016llx %016llx\n",
1354                                 be64_to_cpu(data->ci.ciPortStatus),
1355                                 be64_to_cpu(data->ci.ciPortLdcp));
1356                 break;
1357         case OPAL_P7IOC_DIAG_TYPE_MISC:
1358                 pr_info("P7IOC diag-data for MISC\n\n");
1359                 pnv_eeh_dump_hub_diag_common(data);
1360                 break;
1361         case OPAL_P7IOC_DIAG_TYPE_I2C:
1362                 pr_info("P7IOC diag-data for I2C\n\n");
1363                 pnv_eeh_dump_hub_diag_common(data);
1364                 break;
1365         default:
1366                 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1367                         __func__, phb->hub_id, data->type);
1368         }
1369 }
1370
1371 static int pnv_eeh_get_pe(struct pci_controller *hose,
1372                           u16 pe_no, struct eeh_pe **pe)
1373 {
1374         struct pnv_phb *phb = hose->private_data;
1375         struct pnv_ioda_pe *pnv_pe;
1376         struct eeh_pe *dev_pe;
1377         struct eeh_dev edev;
1378
1379         /*
1380          * If PHB supports compound PE, to fetch
1381          * the master PE because slave PE is invisible
1382          * to EEH core.
1383          */
1384         pnv_pe = &phb->ioda.pe_array[pe_no];
1385         if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1386                 pnv_pe = pnv_pe->master;
1387                 WARN_ON(!pnv_pe ||
1388                         !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1389                 pe_no = pnv_pe->pe_number;
1390         }
1391
1392         /* Find the PE according to PE# */
1393         memset(&edev, 0, sizeof(struct eeh_dev));
1394         edev.phb = hose;
1395         edev.pe_config_addr = pe_no;
1396         dev_pe = eeh_pe_get(&edev);
1397         if (!dev_pe)
1398                 return -EEXIST;
1399
1400         /* Freeze the (compound) PE */
1401         *pe = dev_pe;
1402         if (!(dev_pe->state & EEH_PE_ISOLATED))
1403                 phb->freeze_pe(phb, pe_no);
1404
1405         /*
1406          * At this point, we're sure the (compound) PE should
1407          * have been frozen. However, we still need poke until
1408          * hitting the frozen PE on top level.
1409          */
1410         dev_pe = dev_pe->parent;
1411         while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1412                 int ret;
1413                 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1414                                     EEH_STATE_DMA_ACTIVE);
1415
1416                 ret = eeh_ops->get_state(dev_pe, NULL);
1417                 if (ret <= 0 || (ret & active_flags) == active_flags) {
1418                         dev_pe = dev_pe->parent;
1419                         continue;
1420                 }
1421
1422                 /* Frozen parent PE */
1423                 *pe = dev_pe;
1424                 if (!(dev_pe->state & EEH_PE_ISOLATED))
1425                         phb->freeze_pe(phb, dev_pe->addr);
1426
1427                 /* Next one */
1428                 dev_pe = dev_pe->parent;
1429         }
1430
1431         return 0;
1432 }
1433
1434 /**
1435  * pnv_eeh_next_error - Retrieve next EEH error to handle
1436  * @pe: Affected PE
1437  *
1438  * The function is expected to be called by EEH core while it gets
1439  * special EEH event (without binding PE). The function calls to
1440  * OPAL APIs for next error to handle. The informational error is
1441  * handled internally by platform. However, the dead IOC, dead PHB,
1442  * fenced PHB and frozen PE should be handled by EEH core eventually.
1443  */
1444 static int pnv_eeh_next_error(struct eeh_pe **pe)
1445 {
1446         struct pci_controller *hose;
1447         struct pnv_phb *phb;
1448         struct eeh_pe *phb_pe, *parent_pe;
1449         __be64 frozen_pe_no;
1450         __be16 err_type, severity;
1451         int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1452         long rc;
1453         int state, ret = EEH_NEXT_ERR_NONE;
1454
1455         /*
1456          * While running here, it's safe to purge the event queue. The
1457          * event should still be masked.
1458          */
1459         eeh_remove_event(NULL, false);
1460
1461         list_for_each_entry(hose, &hose_list, list_node) {
1462                 /*
1463                  * If the subordinate PCI buses of the PHB has been
1464                  * removed or is exactly under error recovery, we
1465                  * needn't take care of it any more.
1466                  */
1467                 phb = hose->private_data;
1468                 phb_pe = eeh_phb_pe_get(hose);
1469                 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1470                         continue;
1471
1472                 rc = opal_pci_next_error(phb->opal_id,
1473                                          &frozen_pe_no, &err_type, &severity);
1474                 if (rc != OPAL_SUCCESS) {
1475                         pr_devel("%s: Invalid return value on "
1476                                  "PHB#%x (0x%lx) from opal_pci_next_error",
1477                                  __func__, hose->global_number, rc);
1478                         continue;
1479                 }
1480
1481                 /* If the PHB doesn't have error, stop processing */
1482                 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1483                     be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1484                         pr_devel("%s: No error found on PHB#%x\n",
1485                                  __func__, hose->global_number);
1486                         continue;
1487                 }
1488
1489                 /*
1490                  * Processing the error. We're expecting the error with
1491                  * highest priority reported upon multiple errors on the
1492                  * specific PHB.
1493                  */
1494                 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1495                         __func__, be16_to_cpu(err_type),
1496                         be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1497                         hose->global_number);
1498                 switch (be16_to_cpu(err_type)) {
1499                 case OPAL_EEH_IOC_ERROR:
1500                         if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1501                                 pr_err("EEH: dead IOC detected\n");
1502                                 ret = EEH_NEXT_ERR_DEAD_IOC;
1503                         } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1504                                 pr_info("EEH: IOC informative error "
1505                                         "detected\n");
1506                                 pnv_eeh_get_and_dump_hub_diag(hose);
1507                                 ret = EEH_NEXT_ERR_NONE;
1508                         }
1509
1510                         break;
1511                 case OPAL_EEH_PHB_ERROR:
1512                         if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1513                                 *pe = phb_pe;
1514                                 pr_err("EEH: dead PHB#%x detected, "
1515                                        "location: %s\n",
1516                                         hose->global_number,
1517                                         eeh_pe_loc_get(phb_pe));
1518                                 ret = EEH_NEXT_ERR_DEAD_PHB;
1519                         } else if (be16_to_cpu(severity) ==
1520                                    OPAL_EEH_SEV_PHB_FENCED) {
1521                                 *pe = phb_pe;
1522                                 pr_err("EEH: Fenced PHB#%x detected, "
1523                                        "location: %s\n",
1524                                         hose->global_number,
1525                                         eeh_pe_loc_get(phb_pe));
1526                                 ret = EEH_NEXT_ERR_FENCED_PHB;
1527                         } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1528                                 pr_info("EEH: PHB#%x informative error "
1529                                         "detected, location: %s\n",
1530                                         hose->global_number,
1531                                         eeh_pe_loc_get(phb_pe));
1532                                 pnv_eeh_get_phb_diag(phb_pe);
1533                                 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1534                                 ret = EEH_NEXT_ERR_NONE;
1535                         }
1536
1537                         break;
1538                 case OPAL_EEH_PE_ERROR:
1539                         /*
1540                          * If we can't find the corresponding PE, we
1541                          * just try to unfreeze.
1542                          */
1543                         if (pnv_eeh_get_pe(hose,
1544                                 be64_to_cpu(frozen_pe_no), pe)) {
1545                                 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1546                                         hose->global_number, be64_to_cpu(frozen_pe_no));
1547                                 pr_info("EEH: PHB location: %s\n",
1548                                         eeh_pe_loc_get(phb_pe));
1549
1550                                 /* Dump PHB diag-data */
1551                                 rc = opal_pci_get_phb_diag_data2(phb->opal_id,
1552                                         phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
1553                                 if (rc == OPAL_SUCCESS)
1554                                         pnv_pci_dump_phb_diag_data(hose,
1555                                                         phb->diag.blob);
1556
1557                                 /* Try best to clear it */
1558                                 opal_pci_eeh_freeze_clear(phb->opal_id,
1559                                         be64_to_cpu(frozen_pe_no),
1560                                         OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1561                                 ret = EEH_NEXT_ERR_NONE;
1562                         } else if ((*pe)->state & EEH_PE_ISOLATED ||
1563                                    eeh_pe_passed(*pe)) {
1564                                 ret = EEH_NEXT_ERR_NONE;
1565                         } else {
1566                                 pr_err("EEH: Frozen PE#%x "
1567                                        "on PHB#%x detected\n",
1568                                        (*pe)->addr,
1569                                         (*pe)->phb->global_number);
1570                                 pr_err("EEH: PE location: %s, "
1571                                        "PHB location: %s\n",
1572                                        eeh_pe_loc_get(*pe),
1573                                        eeh_pe_loc_get(phb_pe));
1574                                 ret = EEH_NEXT_ERR_FROZEN_PE;
1575                         }
1576
1577                         break;
1578                 default:
1579                         pr_warn("%s: Unexpected error type %d\n",
1580                                 __func__, be16_to_cpu(err_type));
1581                 }
1582
1583                 /*
1584                  * EEH core will try recover from fenced PHB or
1585                  * frozen PE. In the time for frozen PE, EEH core
1586                  * enable IO path for that before collecting logs,
1587                  * but it ruins the site. So we have to dump the
1588                  * log in advance here.
1589                  */
1590                 if ((ret == EEH_NEXT_ERR_FROZEN_PE  ||
1591                     ret == EEH_NEXT_ERR_FENCED_PHB) &&
1592                     !((*pe)->state & EEH_PE_ISOLATED)) {
1593                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1594                         pnv_eeh_get_phb_diag(*pe);
1595
1596                         if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1597                                 pnv_pci_dump_phb_diag_data((*pe)->phb,
1598                                                            (*pe)->data);
1599                 }
1600
1601                 /*
1602                  * We probably have the frozen parent PE out there and
1603                  * we need have to handle frozen parent PE firstly.
1604                  */
1605                 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1606                         parent_pe = (*pe)->parent;
1607                         while (parent_pe) {
1608                                 /* Hit the ceiling ? */
1609                                 if (parent_pe->type & EEH_PE_PHB)
1610                                         break;
1611
1612                                 /* Frozen parent PE ? */
1613                                 state = eeh_ops->get_state(parent_pe, NULL);
1614                                 if (state > 0 &&
1615                                     (state & active_flags) != active_flags)
1616                                         *pe = parent_pe;
1617
1618                                 /* Next parent level */
1619                                 parent_pe = parent_pe->parent;
1620                         }
1621
1622                         /* We possibly migrate to another PE */
1623                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1624                 }
1625
1626                 /*
1627                  * If we have no errors on the specific PHB or only
1628                  * informative error there, we continue poking it.
1629                  * Otherwise, we need actions to be taken by upper
1630                  * layer.
1631                  */
1632                 if (ret > EEH_NEXT_ERR_INF)
1633                         break;
1634         }
1635
1636         /* Unmask the event */
1637         if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1638                 enable_irq(eeh_event_irq);
1639
1640         return ret;
1641 }
1642
1643 static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
1644 {
1645         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1646         u32 devctl, cmd, cap2, aer_capctl;
1647         int old_mps;
1648
1649         if (edev->pcie_cap) {
1650                 /* Restore MPS */
1651                 old_mps = (ffs(pdn->mps) - 8) << 5;
1652                 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1653                                      2, &devctl);
1654                 devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
1655                 devctl |= old_mps;
1656                 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1657                                       2, devctl);
1658
1659                 /* Disable Completion Timeout */
1660                 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
1661                                      4, &cap2);
1662                 if (cap2 & 0x10) {
1663                         eeh_ops->read_config(pdn,
1664                                              edev->pcie_cap + PCI_EXP_DEVCTL2,
1665                                              4, &cap2);
1666                         cap2 |= 0x10;
1667                         eeh_ops->write_config(pdn,
1668                                               edev->pcie_cap + PCI_EXP_DEVCTL2,
1669                                               4, cap2);
1670                 }
1671         }
1672
1673         /* Enable SERR and parity checking */
1674         eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
1675         cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1676         eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
1677
1678         /* Enable report various errors */
1679         if (edev->pcie_cap) {
1680                 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1681                                      2, &devctl);
1682                 devctl &= ~PCI_EXP_DEVCTL_CERE;
1683                 devctl |= (PCI_EXP_DEVCTL_NFERE |
1684                            PCI_EXP_DEVCTL_FERE |
1685                            PCI_EXP_DEVCTL_URRE);
1686                 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1687                                       2, devctl);
1688         }
1689
1690         /* Enable ECRC generation and check */
1691         if (edev->pcie_cap && edev->aer_cap) {
1692                 eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1693                                      4, &aer_capctl);
1694                 aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
1695                 eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1696                                       4, aer_capctl);
1697         }
1698
1699         return 0;
1700 }
1701
1702 static int pnv_eeh_restore_config(struct pci_dn *pdn)
1703 {
1704         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1705         struct pnv_phb *phb;
1706         s64 ret;
1707
1708         if (!edev)
1709                 return -EEXIST;
1710
1711         /*
1712          * We have to restore the PCI config space after reset since the
1713          * firmware can't see SRIOV VFs.
1714          *
1715          * FIXME: The MPS, error routing rules, timeout setting are worthy
1716          * to be exported by firmware in extendible way.
1717          */
1718         if (edev->physfn) {
1719                 ret = pnv_eeh_restore_vf_config(pdn);
1720         } else {
1721                 phb = edev->phb->private_data;
1722                 ret = opal_pci_reinit(phb->opal_id,
1723                                       OPAL_REINIT_PCI_DEV, edev->config_addr);
1724         }
1725
1726         if (ret) {
1727                 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
1728                         __func__, edev->config_addr, ret);
1729                 return -EIO;
1730         }
1731
1732         return 0;
1733 }
1734
1735 static struct eeh_ops pnv_eeh_ops = {
1736         .name                   = "powernv",
1737         .init                   = pnv_eeh_init,
1738         .post_init              = pnv_eeh_post_init,
1739         .probe                  = pnv_eeh_probe,
1740         .set_option             = pnv_eeh_set_option,
1741         .get_pe_addr            = pnv_eeh_get_pe_addr,
1742         .get_state              = pnv_eeh_get_state,
1743         .reset                  = pnv_eeh_reset,
1744         .wait_state             = pnv_eeh_wait_state,
1745         .get_log                = pnv_eeh_get_log,
1746         .configure_bridge       = pnv_eeh_configure_bridge,
1747         .err_inject             = pnv_eeh_err_inject,
1748         .read_config            = pnv_eeh_read_config,
1749         .write_config           = pnv_eeh_write_config,
1750         .next_error             = pnv_eeh_next_error,
1751         .restore_config         = pnv_eeh_restore_config
1752 };
1753
1754 void pcibios_bus_add_device(struct pci_dev *pdev)
1755 {
1756         struct pci_dn *pdn = pci_get_pdn(pdev);
1757
1758         if (!pdev->is_virtfn)
1759                 return;
1760
1761         /*
1762          * The following operations will fail if VF's sysfs files
1763          * aren't created or its resources aren't finalized.
1764          */
1765         eeh_add_device_early(pdn);
1766         eeh_add_device_late(pdev);
1767         eeh_sysfs_add_device(pdev);
1768 }
1769
1770 #ifdef CONFIG_PCI_IOV
1771 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
1772 {
1773         struct pci_dn *pdn = pci_get_pdn(pdev);
1774         int parent_mps;
1775
1776         if (!pdev->is_virtfn)
1777                 return;
1778
1779         /* Synchronize MPS for VF and PF */
1780         parent_mps = pcie_get_mps(pdev->physfn);
1781         if ((128 << pdev->pcie_mpss) >= parent_mps)
1782                 pcie_set_mps(pdev, parent_mps);
1783         pdn->mps = pcie_get_mps(pdev);
1784 }
1785 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
1786 #endif /* CONFIG_PCI_IOV */
1787
1788 /**
1789  * eeh_powernv_init - Register platform dependent EEH operations
1790  *
1791  * EEH initialization on powernv platform. This function should be
1792  * called before any EEH related functions.
1793  */
1794 static int __init eeh_powernv_init(void)
1795 {
1796         int ret = -EINVAL;
1797
1798         eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
1799         ret = eeh_ops_register(&pnv_eeh_ops);
1800         if (!ret)
1801                 pr_info("EEH: PowerNV platform initialized\n");
1802         else
1803                 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
1804
1805         return ret;
1806 }
1807 machine_early_initcall(powernv, eeh_powernv_init);