2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci_regs.h>
11 #include <linux/pci_ids.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/pci.h>
19 #include <linux/delay.h>
21 #include <asm/msi_bitmap.h>
22 #include <asm/pnv-pci.h>
30 #define CXL_PCI_VSEC_ID 0x1280
31 #define CXL_VSEC_MIN_SIZE 0x80
33 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
35 pci_read_config_word(dev, vsec + 0x6, dest); \
38 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
39 pci_read_config_byte(dev, vsec + 0x8, dest)
41 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
42 pci_read_config_byte(dev, vsec + 0x9, dest)
43 #define CXL_STATUS_SECOND_PORT 0x80
44 #define CXL_STATUS_MSI_X_FULL 0x40
45 #define CXL_STATUS_MSI_X_SINGLE 0x20
46 #define CXL_STATUS_FLASH_RW 0x08
47 #define CXL_STATUS_FLASH_RO 0x04
48 #define CXL_STATUS_LOADABLE_AFU 0x02
49 #define CXL_STATUS_LOADABLE_PSL 0x01
50 /* If we see these features we won't try to use the card */
51 #define CXL_UNSUPPORTED_FEATURES \
52 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
54 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
55 pci_read_config_byte(dev, vsec + 0xa, dest)
56 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
57 pci_write_config_byte(dev, vsec + 0xa, val)
58 #define CXL_VSEC_PROTOCOL_MASK 0xe0
59 #define CXL_VSEC_PROTOCOL_1024TB 0x80
60 #define CXL_VSEC_PROTOCOL_512TB 0x40
61 #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
62 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
64 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
65 pci_read_config_word(dev, vsec + 0xc, dest)
66 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
67 pci_read_config_byte(dev, vsec + 0xe, dest)
68 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
69 pci_read_config_byte(dev, vsec + 0xf, dest)
70 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
71 pci_read_config_word(dev, vsec + 0x10, dest)
73 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
74 pci_read_config_byte(dev, vsec + 0x13, dest)
75 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
76 pci_write_config_byte(dev, vsec + 0x13, val)
77 #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
78 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
79 #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
81 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
82 pci_read_config_dword(dev, vsec + 0x20, dest)
83 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
84 pci_read_config_dword(dev, vsec + 0x24, dest)
85 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
86 pci_read_config_dword(dev, vsec + 0x28, dest)
87 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
88 pci_read_config_dword(dev, vsec + 0x2c, dest)
91 /* This works a little different than the p1/p2 register accesses to make it
92 * easier to pull out individual fields */
93 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
94 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
95 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
96 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
98 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
99 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
100 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
101 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
102 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
103 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
104 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
105 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
106 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
107 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
108 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
109 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
110 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
111 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
112 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
113 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
114 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
115 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
116 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
117 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
119 static const struct pci_device_id cxl_pci_tbl[] = {
120 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
121 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
122 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
123 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
124 { PCI_DEVICE_CLASS(0x120000, ~0), },
128 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
132 * Mostly using these wrappers to avoid confusion:
133 * priv 1 is BAR2, while priv 2 is BAR0
135 static inline resource_size_t p1_base(struct pci_dev *dev)
137 return pci_resource_start(dev, 2);
140 static inline resource_size_t p1_size(struct pci_dev *dev)
142 return pci_resource_len(dev, 2);
145 static inline resource_size_t p2_base(struct pci_dev *dev)
147 return pci_resource_start(dev, 0);
150 static inline resource_size_t p2_size(struct pci_dev *dev)
152 return pci_resource_len(dev, 0);
155 static int find_cxl_vsec(struct pci_dev *dev)
160 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
161 pci_read_config_word(dev, vsec + 0x4, &val);
162 if (val == CXL_PCI_VSEC_ID)
169 static void dump_cxl_config_space(struct pci_dev *dev)
174 dev_info(&dev->dev, "dump_cxl_config_space\n");
176 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
177 dev_info(&dev->dev, "BAR0: %#.8x\n", val);
178 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
179 dev_info(&dev->dev, "BAR1: %#.8x\n", val);
180 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
181 dev_info(&dev->dev, "BAR2: %#.8x\n", val);
182 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
183 dev_info(&dev->dev, "BAR3: %#.8x\n", val);
184 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
185 dev_info(&dev->dev, "BAR4: %#.8x\n", val);
186 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
187 dev_info(&dev->dev, "BAR5: %#.8x\n", val);
189 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
190 p1_base(dev), p1_size(dev));
191 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
192 p2_base(dev), p2_size(dev));
193 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
194 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
196 if (!(vsec = find_cxl_vsec(dev)))
199 #define show_reg(name, what) \
200 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
202 pci_read_config_dword(dev, vsec + 0x0, &val);
203 show_reg("Cap ID", (val >> 0) & 0xffff);
204 show_reg("Cap Ver", (val >> 16) & 0xf);
205 show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
206 pci_read_config_dword(dev, vsec + 0x4, &val);
207 show_reg("VSEC ID", (val >> 0) & 0xffff);
208 show_reg("VSEC Rev", (val >> 16) & 0xf);
209 show_reg("VSEC Length", (val >> 20) & 0xfff);
210 pci_read_config_dword(dev, vsec + 0x8, &val);
211 show_reg("Num AFUs", (val >> 0) & 0xff);
212 show_reg("Status", (val >> 8) & 0xff);
213 show_reg("Mode Control", (val >> 16) & 0xff);
214 show_reg("Reserved", (val >> 24) & 0xff);
215 pci_read_config_dword(dev, vsec + 0xc, &val);
216 show_reg("PSL Rev", (val >> 0) & 0xffff);
217 show_reg("CAIA Ver", (val >> 16) & 0xffff);
218 pci_read_config_dword(dev, vsec + 0x10, &val);
219 show_reg("Base Image Rev", (val >> 0) & 0xffff);
220 show_reg("Reserved", (val >> 16) & 0x0fff);
221 show_reg("Image Control", (val >> 28) & 0x3);
222 show_reg("Reserved", (val >> 30) & 0x1);
223 show_reg("Image Loaded", (val >> 31) & 0x1);
225 pci_read_config_dword(dev, vsec + 0x14, &val);
226 show_reg("Reserved", val);
227 pci_read_config_dword(dev, vsec + 0x18, &val);
228 show_reg("Reserved", val);
229 pci_read_config_dword(dev, vsec + 0x1c, &val);
230 show_reg("Reserved", val);
232 pci_read_config_dword(dev, vsec + 0x20, &val);
233 show_reg("AFU Descriptor Offset", val);
234 pci_read_config_dword(dev, vsec + 0x24, &val);
235 show_reg("AFU Descriptor Size", val);
236 pci_read_config_dword(dev, vsec + 0x28, &val);
237 show_reg("Problem State Offset", val);
238 pci_read_config_dword(dev, vsec + 0x2c, &val);
239 show_reg("Problem State Size", val);
241 pci_read_config_dword(dev, vsec + 0x30, &val);
242 show_reg("Reserved", val);
243 pci_read_config_dword(dev, vsec + 0x34, &val);
244 show_reg("Reserved", val);
245 pci_read_config_dword(dev, vsec + 0x38, &val);
246 show_reg("Reserved", val);
247 pci_read_config_dword(dev, vsec + 0x3c, &val);
248 show_reg("Reserved", val);
250 pci_read_config_dword(dev, vsec + 0x40, &val);
251 show_reg("PSL Programming Port", val);
252 pci_read_config_dword(dev, vsec + 0x44, &val);
253 show_reg("PSL Programming Control", val);
255 pci_read_config_dword(dev, vsec + 0x48, &val);
256 show_reg("Reserved", val);
257 pci_read_config_dword(dev, vsec + 0x4c, &val);
258 show_reg("Reserved", val);
260 pci_read_config_dword(dev, vsec + 0x50, &val);
261 show_reg("Flash Address Register", val);
262 pci_read_config_dword(dev, vsec + 0x54, &val);
263 show_reg("Flash Size Register", val);
264 pci_read_config_dword(dev, vsec + 0x58, &val);
265 show_reg("Flash Status/Control Register", val);
266 pci_read_config_dword(dev, vsec + 0x58, &val);
267 show_reg("Flash Data Port", val);
272 static void dump_afu_descriptor(struct cxl_afu *afu)
274 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
277 #define show_reg(name, what) \
278 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
280 val = AFUD_READ_INFO(afu);
281 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
282 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
283 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
284 show_reg("req_prog_mode", val & 0xffffULL);
285 afu_cr_num = AFUD_NUM_CRS(val);
287 val = AFUD_READ(afu, 0x8);
288 show_reg("Reserved", val);
289 val = AFUD_READ(afu, 0x10);
290 show_reg("Reserved", val);
291 val = AFUD_READ(afu, 0x18);
292 show_reg("Reserved", val);
294 val = AFUD_READ_CR(afu);
295 show_reg("Reserved", (val >> (63-7)) & 0xff);
296 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
297 afu_cr_len = AFUD_CR_LEN(val) * 256;
299 val = AFUD_READ_CR_OFF(afu);
301 show_reg("AFU_CR_offset", val);
303 val = AFUD_READ_PPPSA(afu);
304 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
305 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
307 val = AFUD_READ_PPPSA_OFF(afu);
308 show_reg("PerProcessPSA_offset", val);
310 val = AFUD_READ_EB(afu);
311 show_reg("Reserved", (val >> (63-7)) & 0xff);
312 show_reg("AFU_EB_len", AFUD_EB_LEN(val));
314 val = AFUD_READ_EB_OFF(afu);
315 show_reg("AFU_EB_offset", val);
317 for (i = 0; i < afu_cr_num; i++) {
318 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
319 show_reg("CR Vendor", val & 0xffff);
320 show_reg("CR Device", (val >> 16) & 0xffff);
325 #define CAPP_UNIT0_ID 0xBA
326 #define CAPP_UNIT1_ID 0XBE
328 static u64 get_capp_unit_id(struct device_node *np)
333 * For chips other than POWER8NVL, we only have CAPP 0,
334 * irrespective of which PHB is used.
336 if (!pvr_version_is(PVR_POWER8NVL))
337 return CAPP_UNIT0_ID;
340 * For POWER8NVL, assume CAPP 0 is attached to PHB0 and
341 * CAPP 1 is attached to PHB1.
343 if (of_property_read_u32(np, "ibm,phb-index", &phb_index))
347 return CAPP_UNIT0_ID;
350 return CAPP_UNIT1_ID;
355 static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id)
357 struct device_node *np;
360 if (!(np = pnv_pci_get_phb_node(dev)))
363 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
364 np = of_get_next_parent(np);
367 *chipid = be32_to_cpup(prop);
368 *capp_unit_id = get_capp_unit_id(np);
370 if (!*capp_unit_id) {
371 pr_err("cxl: invalid capp unit id\n");
378 static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
385 rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
389 psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
390 psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
391 /* Tell PSL where to route data to */
392 psl_dsnctl |= (chipid << (63-5));
393 psl_dsnctl |= (capp_unit_id << (63-13));
395 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
396 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
397 /* snoop write mask */
398 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
400 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
401 /* for debugging with trace arrays */
402 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
407 static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev)
414 rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
418 /* Tell XSL where to route data to */
419 xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
420 xsl_dsnctl |= (capp_unit_id << (63-13));
421 cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
427 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
428 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
429 /* For the PSL this is a multiple for 0 < n <= 7: */
430 #define PSL_2048_250MHZ_CYCLES 1
432 static void write_timebase_ctrl_psl(struct cxl *adapter)
434 cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
435 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
439 #define TBSYNC_ENA (1ULL << 63)
440 /* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
441 #define XSL_2000_CLOCKS 1
442 #define XSL_4000_CLOCKS 2
443 #define XSL_8000_CLOCKS 3
445 static void write_timebase_ctrl_xsl(struct cxl *adapter)
447 cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
450 TBSYNC_CNT(XSL_4000_CLOCKS));
453 static u64 timebase_read_psl(struct cxl *adapter)
455 return cxl_p1_read(adapter, CXL_PSL_Timebase);
458 static u64 timebase_read_xsl(struct cxl *adapter)
460 return cxl_p1_read(adapter, CXL_XSL_Timebase);
463 static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
467 unsigned int retry = 0;
468 struct device_node *np;
470 adapter->psl_timebase_synced = false;
472 if (!(np = pnv_pci_get_phb_node(dev)))
475 /* Do not fail when CAPP timebase sync is not supported by OPAL */
477 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
479 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
485 * Setup PSL Timebase Control and Status register
486 * with the recommended Timebase Sync Count value
488 adapter->native->sl_ops->write_timebase_ctrl(adapter);
490 /* Enable PSL Timebase */
491 cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
492 cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
494 /* Wait until CORE TB and PSL TB difference <= 16usecs */
498 dev_info(&dev->dev, "PSL timebase can't synchronize\n");
501 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
502 delta = mftb() - psl_tb;
505 } while (tb_to_ns(delta) > 16000);
507 adapter->psl_timebase_synced = true;
511 static int init_implementation_afu_psl_regs(struct cxl_afu *afu)
513 /* read/write masks for this slice */
514 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
515 /* APC read/write masks for this slice */
516 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
517 /* for debugging with trace arrays */
518 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
519 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
524 int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
527 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
529 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
532 int cxl_update_image_control(struct cxl *adapter)
534 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
539 if (!(vsec = find_cxl_vsec(dev))) {
540 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
544 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
545 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
549 if (adapter->perst_loads_image)
550 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
552 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
554 if (adapter->perst_select_user)
555 image_state |= CXL_VSEC_PERST_SELECT_USER;
557 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
559 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
560 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
567 int cxl_pci_alloc_one_irq(struct cxl *adapter)
569 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
571 return pnv_cxl_alloc_hwirqs(dev, 1);
574 void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
576 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
578 return pnv_cxl_release_hwirqs(dev, hwirq, 1);
581 int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
582 struct cxl *adapter, unsigned int num)
584 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
586 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
589 void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
592 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
594 pnv_cxl_release_hwirq_ranges(irqs, dev);
597 static int setup_cxl_bars(struct pci_dev *dev)
599 /* Safety check in case we get backported to < 3.17 without M64 */
600 if ((p1_base(dev) < 0x100000000ULL) ||
601 (p2_base(dev) < 0x100000000ULL)) {
602 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
607 * BAR 4/5 has a special meaning for CXL and must be programmed with a
608 * special value corresponding to the CXL protocol address range.
609 * For POWER 8 that means bits 48:49 must be set to 10
611 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
612 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
617 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
618 static int switch_card_to_cxl(struct pci_dev *dev)
624 dev_info(&dev->dev, "switch card to CXL\n");
626 if (!(vsec = find_cxl_vsec(dev))) {
627 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
631 if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
632 dev_err(&dev->dev, "failed to read current mode control: %i", rc);
635 val &= ~CXL_VSEC_PROTOCOL_MASK;
636 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
637 if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
638 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
642 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
643 * we must wait 100ms after this mode switch before touching
651 static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
653 u64 p1n_base, p2n_base, afu_desc;
654 const u64 p1n_size = 0x100;
655 const u64 p2n_size = 0x1000;
657 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
658 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
659 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
660 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
662 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
664 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
667 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
673 iounmap(afu->p2n_mmio);
675 iounmap(afu->native->p1n_mmio);
677 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
681 static void pci_unmap_slice_regs(struct cxl_afu *afu)
684 iounmap(afu->p2n_mmio);
685 afu->p2n_mmio = NULL;
687 if (afu->native->p1n_mmio) {
688 iounmap(afu->native->p1n_mmio);
689 afu->native->p1n_mmio = NULL;
691 if (afu->native->afu_desc_mmio) {
692 iounmap(afu->native->afu_desc_mmio);
693 afu->native->afu_desc_mmio = NULL;
697 void cxl_pci_release_afu(struct device *dev)
699 struct cxl_afu *afu = to_cxl_afu(dev);
701 pr_devel("%s\n", __func__);
703 idr_destroy(&afu->contexts_idr);
704 cxl_release_spa(afu);
710 /* Expects AFU struct to have recently been zeroed out */
711 static int cxl_read_afu_descriptor(struct cxl_afu *afu)
715 val = AFUD_READ_INFO(afu);
716 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
717 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
718 afu->crs_num = AFUD_NUM_CRS(val);
720 if (AFUD_AFU_DIRECTED(val))
721 afu->modes_supported |= CXL_MODE_DIRECTED;
722 if (AFUD_DEDICATED_PROCESS(val))
723 afu->modes_supported |= CXL_MODE_DEDICATED;
724 if (AFUD_TIME_SLICED(val))
725 afu->modes_supported |= CXL_MODE_TIME_SLICED;
727 val = AFUD_READ_PPPSA(afu);
728 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
729 afu->psa = AFUD_PPPSA_PSA(val);
730 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
731 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
733 val = AFUD_READ_CR(afu);
734 afu->crs_len = AFUD_CR_LEN(val) * 256;
735 afu->crs_offset = AFUD_READ_CR_OFF(afu);
738 /* eb_len is in multiple of 4K */
739 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
740 afu->eb_offset = AFUD_READ_EB_OFF(afu);
742 /* eb_off is 4K aligned so lower 12 bits are always zero */
743 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
745 "Invalid AFU error buffer offset %Lx\n",
748 "Ignoring AFU error buffer in the descriptor\n");
749 /* indicate that no afu buffer exists */
756 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
761 if (afu->psa && afu->adapter->ps_size <
762 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
763 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
767 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
768 dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
770 for (i = 0; i < afu->crs_num; i++) {
771 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
772 if (rc || val == 0) {
773 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
778 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
780 * We could also check this for the dedicated process model
781 * since the architecture indicates it should be set to 1, but
782 * in that case we ignore the value and I'd rather not risk
783 * breaking any existing dedicated process AFUs that left it as
784 * 0 (not that I'm aware of any). It is clearly an error for an
785 * AFU directed AFU to set this to 0, and would have previously
786 * triggered a bug resulting in the maximum not being enforced
787 * at all since idr_alloc treats 0 as no maximum.
789 dev_err(&afu->dev, "AFU does not support any processes\n");
796 static int sanitise_afu_regs(struct cxl_afu *afu)
801 * Clear out any regs that contain either an IVTE or address or may be
802 * waiting on an acknowledgement to try to be a bit safer as we bring
805 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
806 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
807 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
808 if (cxl_ops->afu_reset(afu))
810 if (cxl_afu_disable(afu))
812 if (cxl_psl_purge(afu))
815 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
816 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
817 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
818 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
819 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
820 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
821 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
822 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
823 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
824 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
825 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
826 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
828 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
829 if (reg & CXL_PSL_DSISR_TRANS)
830 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
832 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
834 if (afu->adapter->native->sl_ops->register_serr_irq) {
835 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
838 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
839 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
842 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
844 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
845 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
851 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
854 * Called from sysfs and reads the afu error info buffer. The h/w only supports
855 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
856 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
858 ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
859 loff_t off, size_t count)
861 loff_t aligned_start, aligned_end;
862 size_t aligned_length;
864 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
866 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
869 /* calculate aligned read window */
870 count = min((size_t)(afu->eb_len - off), count);
871 aligned_start = round_down(off, 8);
872 aligned_end = round_up(off + count, 8);
873 aligned_length = aligned_end - aligned_start;
875 /* max we can copy in one read is PAGE_SIZE */
876 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
877 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
878 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
881 /* use bounce buffer for copy */
882 tbuf = (void *)__get_free_page(GFP_TEMPORARY);
886 /* perform aligned read from the mmio region */
887 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
888 memcpy(buf, tbuf + (off & 0x7), count);
890 free_page((unsigned long)tbuf);
895 static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
899 if ((rc = pci_map_slice_regs(afu, adapter, dev)))
902 if ((rc = sanitise_afu_regs(afu)))
905 /* We need to reset the AFU before we can read the AFU descriptor */
906 if ((rc = cxl_ops->afu_reset(afu)))
910 dump_afu_descriptor(afu);
912 if ((rc = cxl_read_afu_descriptor(afu)))
915 if ((rc = cxl_afu_descriptor_looks_ok(afu)))
918 if (adapter->native->sl_ops->afu_regs_init)
919 if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
922 if (adapter->native->sl_ops->register_serr_irq)
923 if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
926 if ((rc = cxl_native_register_psl_irq(afu)))
932 if (adapter->native->sl_ops->release_serr_irq)
933 adapter->native->sl_ops->release_serr_irq(afu);
935 pci_unmap_slice_regs(afu);
939 static void pci_deconfigure_afu(struct cxl_afu *afu)
941 cxl_native_release_psl_irq(afu);
942 if (afu->adapter->native->sl_ops->release_serr_irq)
943 afu->adapter->native->sl_ops->release_serr_irq(afu);
944 pci_unmap_slice_regs(afu);
947 static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
952 afu = cxl_alloc_afu(adapter, slice);
956 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
960 mutex_init(&afu->native->spa_mutex);
962 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
964 goto err_free_native;
966 rc = pci_configure_afu(afu, adapter, dev);
968 goto err_free_native;
970 /* Don't care if this fails */
971 cxl_debugfs_afu_add(afu);
974 * After we call this function we must not free the afu directly, even
975 * if it returns an error!
977 if ((rc = cxl_register_afu(afu)))
980 if ((rc = cxl_sysfs_afu_add(afu)))
983 adapter->afu[afu->slice] = afu;
985 if ((rc = cxl_pci_vphb_add(afu)))
986 dev_info(&afu->dev, "Can't register vPHB\n");
991 pci_deconfigure_afu(afu);
992 cxl_debugfs_afu_remove(afu);
993 device_unregister(&afu->dev);
1004 static void cxl_pci_remove_afu(struct cxl_afu *afu)
1006 pr_devel("%s\n", __func__);
1011 cxl_pci_vphb_remove(afu);
1012 cxl_sysfs_afu_remove(afu);
1013 cxl_debugfs_afu_remove(afu);
1015 spin_lock(&afu->adapter->afu_list_lock);
1016 afu->adapter->afu[afu->slice] = NULL;
1017 spin_unlock(&afu->adapter->afu_list_lock);
1019 cxl_context_detach_all(afu);
1020 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1022 pci_deconfigure_afu(afu);
1023 device_unregister(&afu->dev);
1026 int cxl_pci_reset(struct cxl *adapter)
1028 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1031 if (adapter->perst_same_image) {
1033 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
1037 dev_info(&dev->dev, "CXL reset\n");
1039 /* pcie_warm_reset requests a fundamental pci reset which includes a
1040 * PERST assert/deassert. PERST triggers a loading of the image
1041 * if "user" or "factory" is selected in sysfs */
1042 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
1043 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
1050 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
1052 if (pci_request_region(dev, 2, "priv 2 regs"))
1054 if (pci_request_region(dev, 0, "priv 1 regs"))
1057 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
1058 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
1060 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
1063 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
1069 iounmap(adapter->native->p1_mmio);
1070 adapter->native->p1_mmio = NULL;
1072 pci_release_region(dev, 0);
1074 pci_release_region(dev, 2);
1079 static void cxl_unmap_adapter_regs(struct cxl *adapter)
1081 if (adapter->native->p1_mmio) {
1082 iounmap(adapter->native->p1_mmio);
1083 adapter->native->p1_mmio = NULL;
1084 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
1086 if (adapter->native->p2_mmio) {
1087 iounmap(adapter->native->p2_mmio);
1088 adapter->native->p2_mmio = NULL;
1089 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
1093 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
1096 u32 afu_desc_off, afu_desc_size;
1097 u32 ps_off, ps_size;
1101 if (!(vsec = find_cxl_vsec(dev))) {
1102 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1106 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
1107 if (vseclen < CXL_VSEC_MIN_SIZE) {
1108 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
1112 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
1113 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
1114 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
1115 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
1116 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
1117 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
1118 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1119 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1121 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
1122 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1123 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1124 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1125 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1127 /* Convert everything to bytes, because there is NO WAY I'd look at the
1128 * code a month later and forget what units these are in ;-) */
1129 adapter->native->ps_off = ps_off * 64 * 1024;
1130 adapter->ps_size = ps_size * 64 * 1024;
1131 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
1132 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
1134 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
1135 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1141 * Workaround a PCIe Host Bridge defect on some cards, that can cause
1142 * malformed Transaction Layer Packet (TLP) errors to be erroneously
1143 * reported. Mask this error in the Uncorrectable Error Mask Register.
1145 * The upper nibble of the PSL revision is used to distinguish between
1146 * different cards. The affected ones have it set to 0.
1148 static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1153 if (adapter->psl_rev & 0xf000)
1155 if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1157 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1158 if (data & PCI_ERR_UNC_MALF_TLP)
1159 if (data & PCI_ERR_UNC_INTN)
1161 data |= PCI_ERR_UNC_MALF_TLP;
1162 data |= PCI_ERR_UNC_INTN;
1163 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1166 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1168 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1171 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1172 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1176 if (!adapter->slices) {
1177 /* Once we support dynamic reprogramming we can use the card if
1178 * it supports loadable AFUs */
1179 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1183 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1184 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1188 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1189 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1190 "available in BAR2: 0x%llx > 0x%llx\n",
1191 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1198 ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1200 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
1203 static void cxl_release_adapter(struct device *dev)
1205 struct cxl *adapter = to_cxl_adapter(dev);
1207 pr_devel("cxl_release_adapter\n");
1209 cxl_remove_adapter_nr(adapter);
1211 kfree(adapter->native);
1215 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1217 static int sanitise_adapter_regs(struct cxl *adapter)
1219 /* Clear PSL tberror bit by writing 1 to it */
1220 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1221 return cxl_tlb_slb_invalidate(adapter);
1224 /* This should contain *only* operations that can safely be done in
1225 * both creation and recovery.
1227 static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1231 adapter->dev.parent = &dev->dev;
1232 adapter->dev.release = cxl_release_adapter;
1233 pci_set_drvdata(dev, adapter);
1235 rc = pci_enable_device(dev);
1237 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1241 if ((rc = cxl_read_vsec(adapter, dev)))
1244 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1247 cxl_fixup_malformed_tlp(adapter, dev);
1249 if ((rc = setup_cxl_bars(dev)))
1252 if ((rc = switch_card_to_cxl(dev)))
1255 if ((rc = cxl_update_image_control(adapter)))
1258 if ((rc = cxl_map_adapter_regs(adapter, dev)))
1261 if ((rc = sanitise_adapter_regs(adapter)))
1264 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
1267 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1270 /* If recovery happened, the last step is to turn on snooping.
1271 * In the non-recovery case this has no effect */
1272 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1275 /* Ignore error, adapter init is not dependant on timebase sync */
1276 cxl_setup_psl_timebase(adapter, dev);
1278 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1284 cxl_unmap_adapter_regs(adapter);
1289 static void cxl_deconfigure_adapter(struct cxl *adapter)
1291 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1293 cxl_native_release_psl_err_irq(adapter);
1294 cxl_unmap_adapter_regs(adapter);
1296 pci_disable_device(pdev);
1299 static const struct cxl_service_layer_ops psl_ops = {
1300 .adapter_regs_init = init_implementation_adapter_psl_regs,
1301 .afu_regs_init = init_implementation_afu_psl_regs,
1302 .register_serr_irq = cxl_native_register_serr_irq,
1303 .release_serr_irq = cxl_native_release_serr_irq,
1304 .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs,
1305 .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs,
1306 .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs,
1307 .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
1308 .debugfs_stop_trace = cxl_stop_trace,
1309 .write_timebase_ctrl = write_timebase_ctrl_psl,
1310 .timebase_read = timebase_read_psl,
1311 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1312 .needs_reset_before_disable = true,
1315 static const struct cxl_service_layer_ops xsl_ops = {
1316 .adapter_regs_init = init_implementation_adapter_xsl_regs,
1317 .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs,
1318 .write_timebase_ctrl = write_timebase_ctrl_xsl,
1319 .timebase_read = timebase_read_xsl,
1320 .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
1323 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1325 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
1326 dev_info(&adapter->dev, "Device uses an XSL\n");
1327 adapter->native->sl_ops = &xsl_ops;
1329 dev_info(&adapter->dev, "Device uses a PSL\n");
1330 adapter->native->sl_ops = &psl_ops;
1335 static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1337 struct cxl *adapter;
1340 adapter = cxl_alloc_adapter();
1342 return ERR_PTR(-ENOMEM);
1344 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1345 if (!adapter->native) {
1350 set_sl_ops(adapter, dev);
1352 /* Set defaults for parameters which need to persist over
1353 * configure/reconfigure
1355 adapter->perst_loads_image = true;
1356 adapter->perst_same_image = false;
1358 rc = cxl_configure_adapter(adapter, dev);
1360 pci_disable_device(dev);
1364 /* Don't care if this one fails: */
1365 cxl_debugfs_adapter_add(adapter);
1368 * After we call this function we must not free the adapter directly,
1369 * even if it returns an error!
1371 if ((rc = cxl_register_adapter(adapter)))
1374 if ((rc = cxl_sysfs_adapter_add(adapter)))
1380 /* This should mirror cxl_remove_adapter, except without the
1383 cxl_debugfs_adapter_remove(adapter);
1384 cxl_deconfigure_adapter(adapter);
1385 device_unregister(&adapter->dev);
1389 cxl_release_adapter(&adapter->dev);
1393 static void cxl_pci_remove_adapter(struct cxl *adapter)
1395 pr_devel("cxl_remove_adapter\n");
1397 cxl_sysfs_adapter_remove(adapter);
1398 cxl_debugfs_adapter_remove(adapter);
1400 cxl_deconfigure_adapter(adapter);
1402 device_unregister(&adapter->dev);
1405 #define CXL_MAX_PCIEX_PARENT 2
1407 static int cxl_slot_is_switched(struct pci_dev *dev)
1409 struct device_node *np;
1413 if (!(np = pci_device_to_OF_node(dev))) {
1414 pr_err("cxl: np = NULL\n");
1419 np = of_get_next_parent(np);
1420 prop = of_get_property(np, "device_type", NULL);
1421 if (!prop || strcmp((char *)prop, "pciex"))
1426 return (depth > CXL_MAX_PCIEX_PARENT);
1429 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1431 struct cxl *adapter;
1435 if (cxl_pci_is_vphb_device(dev)) {
1436 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
1440 if (cxl_slot_is_switched(dev)) {
1441 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
1446 dump_cxl_config_space(dev);
1448 adapter = cxl_pci_init_adapter(dev);
1449 if (IS_ERR(adapter)) {
1450 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1451 return PTR_ERR(adapter);
1454 for (slice = 0; slice < adapter->slices; slice++) {
1455 if ((rc = pci_init_afu(adapter, slice, dev))) {
1456 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
1460 rc = cxl_afu_select_best_mode(adapter->afu[slice]);
1462 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
1468 static void cxl_remove(struct pci_dev *dev)
1470 struct cxl *adapter = pci_get_drvdata(dev);
1471 struct cxl_afu *afu;
1475 * Lock to prevent someone grabbing a ref through the adapter list as
1476 * we are removing it
1478 for (i = 0; i < adapter->slices; i++) {
1479 afu = adapter->afu[i];
1480 cxl_pci_remove_afu(afu);
1482 cxl_pci_remove_adapter(adapter);
1485 static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1486 pci_channel_state_t state)
1488 struct pci_dev *afu_dev;
1489 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1490 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1492 /* There should only be one entry, but go through the list
1495 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1496 if (!afu_dev->driver)
1499 afu_dev->error_state = state;
1501 if (afu_dev->driver->err_handler)
1502 afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
1504 /* Disconnect trumps all, NONE trumps NEED_RESET */
1505 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1506 result = PCI_ERS_RESULT_DISCONNECT;
1507 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1508 (result == PCI_ERS_RESULT_NEED_RESET))
1509 result = PCI_ERS_RESULT_NONE;
1514 static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1515 pci_channel_state_t state)
1517 struct cxl *adapter = pci_get_drvdata(pdev);
1518 struct cxl_afu *afu;
1519 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1522 /* At this point, we could still have an interrupt pending.
1523 * Let's try to get them out of the way before they do
1524 * anything we don't like.
1528 /* If we're permanently dead, give up. */
1529 if (state == pci_channel_io_perm_failure) {
1530 /* Tell the AFU drivers; but we don't care what they
1531 * say, we're going away.
1533 for (i = 0; i < adapter->slices; i++) {
1534 afu = adapter->afu[i];
1535 cxl_vphb_error_detected(afu, state);
1537 return PCI_ERS_RESULT_DISCONNECT;
1540 /* Are we reflashing?
1542 * If we reflash, we could come back as something entirely
1543 * different, including a non-CAPI card. As such, by default
1544 * we don't participate in the process. We'll be unbound and
1545 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
1548 * However, this isn't the entire story: for reliablity
1549 * reasons, we usually want to reflash the FPGA on PERST in
1550 * order to get back to a more reliable known-good state.
1552 * This causes us a bit of a problem: if we reflash we can't
1553 * trust that we'll come back the same - we could have a new
1554 * image and been PERSTed in order to load that
1555 * image. However, most of the time we actually *will* come
1556 * back the same - for example a regular EEH event.
1558 * Therefore, we allow the user to assert that the image is
1559 * indeed the same and that we should continue on into EEH
1562 if (adapter->perst_loads_image && !adapter->perst_same_image) {
1563 /* TODO take the PHB out of CXL mode */
1564 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
1565 return PCI_ERS_RESULT_NONE;
1569 * At this point, we want to try to recover. We'll always
1570 * need a complete slot reset: we don't trust any other reset.
1572 * Now, we go through each AFU:
1573 * - We send the driver, if bound, an error_detected callback.
1574 * We expect it to clean up, but it can also tell us to give
1575 * up and permanently detach the card. To simplify things, if
1576 * any bound AFU driver doesn't support EEH, we give up on EEH.
1578 * - We detach all contexts associated with the AFU. This
1579 * does not free them, but puts them into a CLOSED state
1580 * which causes any the associated files to return useful
1581 * errors to userland. It also unmaps, but does not free,
1584 * - We clean up our side: releasing and unmapping resources we hold
1585 * so we can wire them up again when the hardware comes back up.
1587 * Driver authors should note:
1589 * - Any contexts you create in your kernel driver (except
1590 * those associated with anonymous file descriptors) are
1591 * your responsibility to free and recreate. Likewise with
1592 * any attached resources.
1594 * - We will take responsibility for re-initialising the
1595 * device context (the one set up for you in
1596 * cxl_pci_enable_device_hook and accessed through
1597 * cxl_get_context). If you've attached IRQs or other
1598 * resources to it, they remains yours to free.
1600 * You can call the same functions to release resources as you
1601 * normally would: we make sure that these functions continue
1602 * to work when the hardware is down.
1606 * 1) If you normally free all your resources at the end of
1607 * each request, or if you use anonymous FDs, your
1608 * error_detected callback can simply set a flag to tell
1609 * your driver not to start any new calls. You can then
1610 * clear the flag in the resume callback.
1612 * 2) If you normally allocate your resources on startup:
1613 * * Set a flag in error_detected as above.
1614 * * Let CXL detach your contexts.
1615 * * In slot_reset, free the old resources and allocate new ones.
1616 * * In resume, clear the flag to allow things to start.
1618 for (i = 0; i < adapter->slices; i++) {
1619 afu = adapter->afu[i];
1621 result = cxl_vphb_error_detected(afu, state);
1623 /* Only continue if everyone agrees on NEED_RESET */
1624 if (result != PCI_ERS_RESULT_NEED_RESET)
1627 cxl_context_detach_all(afu);
1628 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1629 pci_deconfigure_afu(afu);
1631 cxl_deconfigure_adapter(adapter);
1636 static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1638 struct cxl *adapter = pci_get_drvdata(pdev);
1639 struct cxl_afu *afu;
1640 struct cxl_context *ctx;
1641 struct pci_dev *afu_dev;
1642 pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
1643 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1646 if (cxl_configure_adapter(adapter, pdev))
1649 for (i = 0; i < adapter->slices; i++) {
1650 afu = adapter->afu[i];
1652 if (pci_configure_afu(afu, adapter, pdev))
1655 if (cxl_afu_select_best_mode(afu))
1658 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1659 /* Reset the device context.
1660 * TODO: make this less disruptive
1662 ctx = cxl_get_context(afu_dev);
1664 if (ctx && cxl_release_context(ctx))
1667 ctx = cxl_dev_context_init(afu_dev);
1671 afu_dev->dev.archdata.cxl_ctx = ctx;
1673 if (cxl_ops->afu_check_and_enable(afu))
1676 afu_dev->error_state = pci_channel_io_normal;
1678 /* If there's a driver attached, allow it to
1679 * chime in on recovery. Drivers should check
1680 * if everything has come back OK, but
1681 * shouldn't start new work until we call
1682 * their resume function.
1684 if (!afu_dev->driver)
1687 if (afu_dev->driver->err_handler &&
1688 afu_dev->driver->err_handler->slot_reset)
1689 afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
1691 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1692 result = PCI_ERS_RESULT_DISCONNECT;
1698 /* All the bits that happen in both error_detected and cxl_remove
1699 * should be idempotent, so we don't need to worry about leaving a mix
1700 * of unconfigured and reconfigured resources.
1702 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
1703 return PCI_ERS_RESULT_DISCONNECT;
1706 static void cxl_pci_resume(struct pci_dev *pdev)
1708 struct cxl *adapter = pci_get_drvdata(pdev);
1709 struct cxl_afu *afu;
1710 struct pci_dev *afu_dev;
1713 /* Everything is back now. Drivers should restart work now.
1714 * This is not the place to be checking if everything came back up
1715 * properly, because there's no return value: do that in slot_reset.
1717 for (i = 0; i < adapter->slices; i++) {
1718 afu = adapter->afu[i];
1720 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1721 if (afu_dev->driver && afu_dev->driver->err_handler &&
1722 afu_dev->driver->err_handler->resume)
1723 afu_dev->driver->err_handler->resume(afu_dev);
1728 static const struct pci_error_handlers cxl_err_handler = {
1729 .error_detected = cxl_pci_error_detected,
1730 .slot_reset = cxl_pci_slot_reset,
1731 .resume = cxl_pci_resume,
1734 struct pci_driver cxl_pci_driver = {
1736 .id_table = cxl_pci_tbl,
1738 .remove = cxl_remove,
1739 .shutdown = cxl_remove,
1740 .err_handler = &cxl_err_handler,