]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/rdma/hfi1/chip.c
Merge remote-tracking branches 'asoc/topic/cs42xx8', 'asoc/topic/davinci', 'asoc...
[karo-tx-linux.git] / drivers / staging / rdma / hfi1 / chip.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 /*
52  * This file contains all of the code that is specific to the HFI chip
53  */
54
55 #include <linux/pci.h>
56 #include <linux/delay.h>
57 #include <linux/interrupt.h>
58 #include <linux/module.h>
59
60 #include "hfi.h"
61 #include "trace.h"
62 #include "mad.h"
63 #include "pio.h"
64 #include "sdma.h"
65 #include "eprom.h"
66 #include "efivar.h"
67
68 #define NUM_IB_PORTS 1
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define MIN_KERNEL_KCTXTS         2
126 #define FIRST_KERNEL_KCTXT        1
127 #define NUM_MAP_REGS             32
128
129 /* Bit offset into the GUID which carries HFI id information */
130 #define GUID_HFI_INDEX_SHIFT     39
131
132 /* extract the emulation revision */
133 #define emulator_rev(dd) ((dd)->irev >> 8)
134 /* parallel and serial emulation versions are 3 and 4 respectively */
135 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
136 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
137
138 /* RSM fields */
139
140 /* packet type */
141 #define IB_PACKET_TYPE         2ull
142 #define QW_SHIFT               6ull
143 /* QPN[7..1] */
144 #define QPN_WIDTH              7ull
145
146 /* LRH.BTH: QW 0, OFFSET 48 - for match */
147 #define LRH_BTH_QW             0ull
148 #define LRH_BTH_BIT_OFFSET     48ull
149 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
150 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
151 #define LRH_BTH_SELECT
152 #define LRH_BTH_MASK           3ull
153 #define LRH_BTH_VALUE          2ull
154
155 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
156 #define LRH_SC_QW              0ull
157 #define LRH_SC_BIT_OFFSET      56ull
158 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
159 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
160 #define LRH_SC_MASK            128ull
161 #define LRH_SC_VALUE           0ull
162
163 /* SC[n..0] QW 0, OFFSET 60 - for select */
164 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
165
166 /* QPN[m+n:1] QW 1, OFFSET 1 */
167 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
168
169 /* defines to build power on SC2VL table */
170 #define SC2VL_VAL( \
171         num, \
172         sc0, sc0val, \
173         sc1, sc1val, \
174         sc2, sc2val, \
175         sc3, sc3val, \
176         sc4, sc4val, \
177         sc5, sc5val, \
178         sc6, sc6val, \
179         sc7, sc7val) \
180 ( \
181         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
182         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
183         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
184         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
185         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
186         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
187         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
188         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
189 )
190
191 #define DC_SC_VL_VAL( \
192         range, \
193         e0, e0val, \
194         e1, e1val, \
195         e2, e2val, \
196         e3, e3val, \
197         e4, e4val, \
198         e5, e5val, \
199         e6, e6val, \
200         e7, e7val, \
201         e8, e8val, \
202         e9, e9val, \
203         e10, e10val, \
204         e11, e11val, \
205         e12, e12val, \
206         e13, e13val, \
207         e14, e14val, \
208         e15, e15val) \
209 ( \
210         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
211         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
212         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
213         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
214         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
215         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
216         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
217         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
218         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
219         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
220         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
221         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
222         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
223         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
224         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
225         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
226 )
227
228 /* all CceStatus sub-block freeze bits */
229 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
230                         | CCE_STATUS_RXE_FROZE_SMASK \
231                         | CCE_STATUS_TXE_FROZE_SMASK \
232                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
233 /* all CceStatus sub-block TXE pause bits */
234 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
235                         | CCE_STATUS_TXE_PAUSED_SMASK \
236                         | CCE_STATUS_SDMA_PAUSED_SMASK)
237 /* all CceStatus sub-block RXE pause bits */
238 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
239
240 /*
241  * CCE Error flags.
242  */
243 static struct flag_table cce_err_status_flags[] = {
244 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
245                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
246 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
247                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
248 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
249                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
250 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
251                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
252 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
253                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
254 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
255                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
256 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
257                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
259                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
260 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
261                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
262 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
263             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
264 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
266 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
267             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
268 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
269                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
270 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
271                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
272 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
273                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
274 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
275                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
276 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
277                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
278 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
279                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
280 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
281                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
282 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
283                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
284 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
285                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
286 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
287                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
288 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
289                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
290 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
291                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
292 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
293                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
294 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
295                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
296 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
297                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
298 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
299                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
300 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
301                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
302 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
303                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
304 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
305                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
306 /*31*/  FLAG_ENTRY0("LATriggered",
307                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
308 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
309                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
310 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
311                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
312 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
313                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
314 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
315                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
316 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
317                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
318 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
319                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
320 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
321                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
322 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
323                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
324 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
325                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
326 /*41-63 reserved*/
327 };
328
329 /*
330  * Misc Error flags
331  */
332 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
333 static struct flag_table misc_err_status_flags[] = {
334 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
335 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
336 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
337 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
338 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
339 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
340 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
341 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
342 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
343 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
344 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
345 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
346 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
347 };
348
349 /*
350  * TXE PIO Error flags and consequences
351  */
352 static struct flag_table pio_err_status_flags[] = {
353 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
354         SEC_WRITE_DROPPED,
355         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
356 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
357         SEC_SPC_FREEZE,
358         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
359 /* 2*/  FLAG_ENTRY("PioCsrParity",
360         SEC_SPC_FREEZE,
361         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
362 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
363         SEC_SPC_FREEZE,
364         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
365 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
366         SEC_SPC_FREEZE,
367         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
368 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
369         SEC_SPC_FREEZE,
370         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
371 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
372         SEC_SPC_FREEZE,
373         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
374 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
375         SEC_SPC_FREEZE,
376         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
377 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
378         SEC_SPC_FREEZE,
379         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
380 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
381         SEC_SPC_FREEZE,
382         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
383 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
384         SEC_SPC_FREEZE,
385         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
386 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
387         SEC_SPC_FREEZE,
388         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
389 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
390         SEC_SPC_FREEZE,
391         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
392 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
393         0,
394         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
395 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
396         0,
397         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
398 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
399         SEC_SPC_FREEZE,
400         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
401 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
402         SEC_SPC_FREEZE,
403         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
404 /*17*/  FLAG_ENTRY("PioInitSmIn",
405         0,
406         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
407 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
408         SEC_SPC_FREEZE,
409         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
410 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
411         SEC_SPC_FREEZE,
412         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
413 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
414         0,
415         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
416 /*21*/  FLAG_ENTRY("PioWriteDataParity",
417         SEC_SPC_FREEZE,
418         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
419 /*22*/  FLAG_ENTRY("PioStateMachine",
420         SEC_SPC_FREEZE,
421         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
422 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
423         SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
424         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
425 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
426         SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
427         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
428 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
429         SEC_SPC_FREEZE,
430         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
431 /*26*/  FLAG_ENTRY("PioVlfSopParity",
432         SEC_SPC_FREEZE,
433         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
434 /*27*/  FLAG_ENTRY("PioVlFifoParity",
435         SEC_SPC_FREEZE,
436         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
437 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
438         SEC_SPC_FREEZE,
439         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
440 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
441         SEC_SPC_FREEZE,
442         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
443 /*30-31 reserved*/
444 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
445         SEC_SPC_FREEZE,
446         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
447 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
448         SEC_SPC_FREEZE,
449         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
450 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
453 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
456 /*36-63 reserved*/
457 };
458
459 /* TXE PIO errors that cause an SPC freeze */
460 #define ALL_PIO_FREEZE_ERR \
461         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
462         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
463         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
464         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
465         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
466         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
467         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
468         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
469         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
470         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
471         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
472         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
473         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
474         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
475         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
476         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
477         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
478         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
479         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
480         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
481         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
482         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
483         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
484         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
485         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
486         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
487         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
488         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
489         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
490
491 /*
492  * TXE SDMA Error flags
493  */
494 static struct flag_table sdma_err_status_flags[] = {
495 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
496                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
497 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
498                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
499 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
500                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
501 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
502                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
503 /*04-63 reserved*/
504 };
505
506 /* TXE SDMA errors that cause an SPC freeze */
507 #define ALL_SDMA_FREEZE_ERR  \
508                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
509                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
510                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
511
512 /*
513  * TXE Egress Error flags
514  */
515 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
516 static struct flag_table egress_err_status_flags[] = {
517 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
518 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
519 /* 2 reserved */
520 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
521                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
522 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
523 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
524 /* 6 reserved */
525 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
526                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
527 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
528                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
529 /* 9-10 reserved */
530 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
531                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
532 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
533 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
534 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
535 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
536 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
537                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
538 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
539                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
540 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
541                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
542 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
543                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
544 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
545                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
546 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
547                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
548 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
549                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
550 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
551                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
552 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
553                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
554 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
555                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
556 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
557                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
558 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
559                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
560 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
561                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
562 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
563                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
564 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
565                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
566 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
567                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
568 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
569                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
570 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
571                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
572 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
573                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
574 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
575                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
576 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
577                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
578 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
579                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
580 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
581                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
582 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
583                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
584 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
585                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
586 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
587 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
588 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
589 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
590 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
591 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
592 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
593 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
594 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
595 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
596 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
597 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
598 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
599 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
600 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
601 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
602 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
603 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
604 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
605 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
606 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
607 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
608                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
609 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
610                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
611 };
612
613 /*
614  * TXE Egress Error Info flags
615  */
616 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
617 static struct flag_table egress_err_info_flags[] = {
618 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
619 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
620 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
621 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
623 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
624 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
625 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
626 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
627 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
628 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
629 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
630 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
631 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
632 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
633 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
634 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
635 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
636 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
637 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
638 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
639 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
640 };
641
642 /* TXE Egress errors that cause an SPC freeze */
643 #define ALL_TXE_EGRESS_FREEZE_ERR \
644         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
645         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
646         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
647         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
648         | SEES(TX_LAUNCH_CSR_PARITY) \
649         | SEES(TX_SBRD_CTL_CSR_PARITY) \
650         | SEES(TX_CONFIG_PARITY) \
651         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
652         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
653         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
654         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
655         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
656         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
657         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
658         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
659         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
660         | SEES(TX_CREDIT_RETURN_PARITY))
661
662 /*
663  * TXE Send error flags
664  */
665 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
666 static struct flag_table send_err_status_flags[] = {
667 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
668 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
669 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
670 };
671
672 /*
673  * TXE Send Context Error flags and consequences
674  */
675 static struct flag_table sc_err_status_flags[] = {
676 /* 0*/  FLAG_ENTRY("InconsistentSop",
677                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
678                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
679 /* 1*/  FLAG_ENTRY("DisallowedPacket",
680                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
681                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
682 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
683                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
684                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
685 /* 3*/  FLAG_ENTRY("WriteOverflow",
686                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
687                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
688 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
689                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
690                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
691 /* 5-63 reserved*/
692 };
693
694 /*
695  * RXE Receive Error flags
696  */
697 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
698 static struct flag_table rxe_err_status_flags[] = {
699 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
700 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
701 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
702 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
703 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
704 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
705 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
706 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
707 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
708 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
709 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
710 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
711 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
712 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
713 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
714 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
715 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
716                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
717 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
718 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
719 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
720                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
721 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
722                 RXES(RBUF_BLOCK_LIST_READ_COR)),
723 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
724                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
725 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
726                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
727 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
728                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
729 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
730                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
731 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
732 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
733 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
734                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
735 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
736 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
737 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
738 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
739 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
740 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
741 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
742 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
743                 RXES(RBUF_FL_INITDONE_PARITY)),
744 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
745                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
746 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
747 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
748 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
749 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
750                 RXES(LOOKUP_DES_PART1_UNC_COR)),
751 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
752                 RXES(LOOKUP_DES_PART2_PARITY)),
753 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
754 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
755 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
756 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
757 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
758 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
759 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
760 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
761 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
762 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
763 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
764 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
765 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
766 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
767 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
768 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
769 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
770 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
771 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
772 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
773 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
774 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
775 };
776
777 /* RXE errors that will trigger an SPC freeze */
778 #define ALL_RXE_FREEZE_ERR  \
779         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
780         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
781         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
782         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
783         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
784         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
785         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
786         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
787         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
788         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
789         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
790         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
791         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
792         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
793         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
794         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
795         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
796         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
797         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
798         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
799         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
800         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
801         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
802         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
803         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
804         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
805         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
806         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
807         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
808         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
809         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
810         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
811         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
812         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
813         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
814         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
815         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
816         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
817         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
818         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
819         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
823
824 #define RXE_FREEZE_ABORT_MASK \
825         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
826         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
827         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
828
829 /*
830  * DCC Error Flags
831  */
832 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
833 static struct flag_table dcc_err_flags[] = {
834         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
835         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
836         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
837         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
838         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
839         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
840         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
841         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
842         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
843         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
844         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
845         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
846         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
847         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
848         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
849         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
850         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
851         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
852         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
853         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
854         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
855         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
856         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
857         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
858         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
859         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
860         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
861         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
862         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
863         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
864         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
865         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
866         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
867         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
868         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
869         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
870         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
871         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
872         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
873         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
874         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
875         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
876         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
877         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
878         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
879         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
880 };
881
882 /*
883  * LCB error flags
884  */
885 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
886 static struct flag_table lcb_err_flags[] = {
887 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
888 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
889 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
890 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
891                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
892 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
893 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
894 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
895 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
896 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
897 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
898 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
899 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
900 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
901 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
902                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
903 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
904 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
905 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
906 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
907 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
908 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
909                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
910 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
911 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
912 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
913 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
914 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
915 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
916 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
917                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
918 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
919 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
920                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
921 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
922                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
923 };
924
925 /*
926  * DC8051 Error Flags
927  */
928 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
929 static struct flag_table dc8051_err_flags[] = {
930         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
931         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
932         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
933         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
934         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
935         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
936         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
937         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
938         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
939                 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
940         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
941 };
942
943 /*
944  * DC8051 Information Error flags
945  *
946  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
947  */
948 static struct flag_table dc8051_info_err_flags[] = {
949         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
950         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
951         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
952         FLAG_ENTRY0("Serdes internal loopback failure",
953                                         FAILED_SERDES_INTERNAL_LOOPBACK),
954         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
955         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
956         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
957         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
958         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
959         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
960         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
961         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT)
962 };
963
964 /*
965  * DC8051 Information Host Information flags
966  *
967  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
968  */
969 static struct flag_table dc8051_info_host_msg_flags[] = {
970         FLAG_ENTRY0("Host request done", 0x0001),
971         FLAG_ENTRY0("BC SMA message", 0x0002),
972         FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
973         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
974         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
975         FLAG_ENTRY0("External device config request", 0x0020),
976         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
977         FLAG_ENTRY0("LinkUp achieved", 0x0080),
978         FLAG_ENTRY0("Link going down", 0x0100),
979 };
980
981
982 static u32 encoded_size(u32 size);
983 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
984 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
985 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
986                                u8 *continuous);
987 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
988                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
989 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
990                                       u8 *remote_tx_rate, u16 *link_widths);
991 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
992                                      u8 *flag_bits, u16 *link_widths);
993 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
994                                   u8 *device_rev);
995 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
996 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
997 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
998                             u8 *tx_polarity_inversion,
999                             u8 *rx_polarity_inversion, u8 *max_rate);
1000 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1001                                 unsigned int context, u64 err_status);
1002 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1003 static void handle_dcc_err(struct hfi1_devdata *dd,
1004                            unsigned int context, u64 err_status);
1005 static void handle_lcb_err(struct hfi1_devdata *dd,
1006                            unsigned int context, u64 err_status);
1007 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1008 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void set_partition_keys(struct hfi1_pportdata *);
1016 static const char *link_state_name(u32 state);
1017 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1018                                           u32 state);
1019 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1020                            u64 *out_data);
1021 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1022 static int thermal_init(struct hfi1_devdata *dd);
1023
1024 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1025                                   int msecs);
1026 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1027 static void handle_temp_err(struct hfi1_devdata *);
1028 static void dc_shutdown(struct hfi1_devdata *);
1029 static void dc_start(struct hfi1_devdata *);
1030
1031 /*
1032  * Error interrupt table entry.  This is used as input to the interrupt
1033  * "clear down" routine used for all second tier error interrupt register.
1034  * Second tier interrupt registers have a single bit representing them
1035  * in the top-level CceIntStatus.
1036  */
1037 struct err_reg_info {
1038         u32 status;             /* status CSR offset */
1039         u32 clear;              /* clear CSR offset */
1040         u32 mask;               /* mask CSR offset */
1041         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1042         const char *desc;
1043 };
1044
1045 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1046 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1047 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1048
1049 /*
1050  * Helpers for building HFI and DC error interrupt table entries.  Different
1051  * helpers are needed because of inconsistent register names.
1052  */
1053 #define EE(reg, handler, desc) \
1054         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1055                 handler, desc }
1056 #define DC_EE1(reg, handler, desc) \
1057         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1058 #define DC_EE2(reg, handler, desc) \
1059         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1060
1061 /*
1062  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1063  * another register containing more information.
1064  */
1065 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1066 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1067 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1068 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1069 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1070 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1071 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1072 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1073 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1074         /* the rest are reserved */
1075 };
1076
1077 /*
1078  * Index into the Various section of the interrupt sources
1079  * corresponding to the Critical Temperature interrupt.
1080  */
1081 #define TCRIT_INT_SOURCE 4
1082
1083 /*
1084  * SDMA error interrupt entry - refers to another register containing more
1085  * information.
1086  */
1087 static const struct err_reg_info sdma_eng_err =
1088         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1089
1090 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1091 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1092 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1093 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1094 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1095 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1096         /* rest are reserved */
1097 };
1098
1099 /*
1100  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1101  * register can not be derived from the MTU value because 10K is not
1102  * a power of 2. Therefore, we need a constant. Everything else can
1103  * be calculated.
1104  */
1105 #define DCC_CFG_PORT_MTU_CAP_10240 7
1106
1107 /*
1108  * Table of the DC grouping of error interrupts.  Each entry refers to
1109  * another register containing more information.
1110  */
1111 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1112 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1113 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1114 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1115 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1116         /* the rest are reserved */
1117 };
1118
1119 struct cntr_entry {
1120         /*
1121          * counter name
1122          */
1123         char *name;
1124
1125         /*
1126          * csr to read for name (if applicable)
1127          */
1128         u64 csr;
1129
1130         /*
1131          * offset into dd or ppd to store the counter's value
1132          */
1133         int offset;
1134
1135         /*
1136          * flags
1137          */
1138         u8 flags;
1139
1140         /*
1141          * accessor for stat element, context either dd or ppd
1142          */
1143         u64 (*rw_cntr)(const struct cntr_entry *,
1144                                void *context,
1145                                int vl,
1146                                int mode,
1147                                u64 data);
1148 };
1149
1150 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1151 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1152
1153 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1154 { \
1155         name, \
1156         csr, \
1157         offset, \
1158         flags, \
1159         accessor \
1160 }
1161
1162 /* 32bit RXE */
1163 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1164 CNTR_ELEM(#name, \
1165           (counter * 8 + RCV_COUNTER_ARRAY32), \
1166           0, flags | CNTR_32BIT, \
1167           port_access_u32_csr)
1168
1169 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1170 CNTR_ELEM(#name, \
1171           (counter * 8 + RCV_COUNTER_ARRAY32), \
1172           0, flags | CNTR_32BIT, \
1173           dev_access_u32_csr)
1174
1175 /* 64bit RXE */
1176 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1177 CNTR_ELEM(#name, \
1178           (counter * 8 + RCV_COUNTER_ARRAY64), \
1179           0, flags, \
1180           port_access_u64_csr)
1181
1182 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1183 CNTR_ELEM(#name, \
1184           (counter * 8 + RCV_COUNTER_ARRAY64), \
1185           0, flags, \
1186           dev_access_u64_csr)
1187
1188 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1189 #define OVR_ELM(ctx) \
1190 CNTR_ELEM("RcvHdrOvr" #ctx, \
1191           (RCV_HDR_OVFL_CNT + ctx*0x100), \
1192           0, CNTR_NORMAL, port_access_u64_csr)
1193
1194 /* 32bit TXE */
1195 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196 CNTR_ELEM(#name, \
1197           (counter * 8 + SEND_COUNTER_ARRAY32), \
1198           0, flags | CNTR_32BIT, \
1199           port_access_u32_csr)
1200
1201 /* 64bit TXE */
1202 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1203 CNTR_ELEM(#name, \
1204           (counter * 8 + SEND_COUNTER_ARRAY64), \
1205           0, flags, \
1206           port_access_u64_csr)
1207
1208 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1209 CNTR_ELEM(#name,\
1210           counter * 8 + SEND_COUNTER_ARRAY64, \
1211           0, \
1212           flags, \
1213           dev_access_u64_csr)
1214
1215 /* CCE */
1216 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1217 CNTR_ELEM(#name, \
1218           (counter * 8 + CCE_COUNTER_ARRAY32), \
1219           0, flags | CNTR_32BIT, \
1220           dev_access_u32_csr)
1221
1222 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1223 CNTR_ELEM(#name, \
1224           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1225           0, flags | CNTR_32BIT, \
1226           dev_access_u32_csr)
1227
1228 /* DC */
1229 #define DC_PERF_CNTR(name, counter, flags) \
1230 CNTR_ELEM(#name, \
1231           counter, \
1232           0, \
1233           flags, \
1234           dev_access_u64_csr)
1235
1236 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1237 CNTR_ELEM(#name, \
1238           counter, \
1239           0, \
1240           flags, \
1241           dc_access_lcb_cntr)
1242
1243 /* ibp counters */
1244 #define SW_IBP_CNTR(name, cntr) \
1245 CNTR_ELEM(#name, \
1246           0, \
1247           0, \
1248           CNTR_SYNTH, \
1249           access_ibp_##cntr)
1250
1251 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1252 {
1253         u64 val;
1254
1255         if (dd->flags & HFI1_PRESENT) {
1256                 val = readq((void __iomem *)dd->kregbase + offset);
1257                 return val;
1258         }
1259         return -1;
1260 }
1261
1262 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1263 {
1264         if (dd->flags & HFI1_PRESENT)
1265                 writeq(value, (void __iomem *)dd->kregbase + offset);
1266 }
1267
1268 void __iomem *get_csr_addr(
1269         struct hfi1_devdata *dd,
1270         u32 offset)
1271 {
1272         return (void __iomem *)dd->kregbase + offset;
1273 }
1274
1275 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1276                                  int mode, u64 value)
1277 {
1278         u64 ret;
1279
1280
1281         if (mode == CNTR_MODE_R) {
1282                 ret = read_csr(dd, csr);
1283         } else if (mode == CNTR_MODE_W) {
1284                 write_csr(dd, csr, value);
1285                 ret = value;
1286         } else {
1287                 dd_dev_err(dd, "Invalid cntr register access mode");
1288                 return 0;
1289         }
1290
1291         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1292         return ret;
1293 }
1294
1295 /* Dev Access */
1296 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297                             void *context, int vl, int mode, u64 data)
1298 {
1299         struct hfi1_devdata *dd = context;
1300
1301         if (vl != CNTR_INVALID_VL)
1302                 return 0;
1303         return read_write_csr(dd, entry->csr, mode, data);
1304 }
1305
1306 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1307                             int vl, int mode, u64 data)
1308 {
1309         struct hfi1_devdata *dd = context;
1310
1311         u64 val = 0;
1312         u64 csr = entry->csr;
1313
1314         if (entry->flags & CNTR_VL) {
1315                 if (vl == CNTR_INVALID_VL)
1316                         return 0;
1317                 csr += 8 * vl;
1318         } else {
1319                 if (vl != CNTR_INVALID_VL)
1320                         return 0;
1321         }
1322
1323         val = read_write_csr(dd, csr, mode, data);
1324         return val;
1325 }
1326
1327 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1328                             int vl, int mode, u64 data)
1329 {
1330         struct hfi1_devdata *dd = context;
1331         u32 csr = entry->csr;
1332         int ret = 0;
1333
1334         if (vl != CNTR_INVALID_VL)
1335                 return 0;
1336         if (mode == CNTR_MODE_R)
1337                 ret = read_lcb_csr(dd, csr, &data);
1338         else if (mode == CNTR_MODE_W)
1339                 ret = write_lcb_csr(dd, csr, data);
1340
1341         if (ret) {
1342                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1343                 return 0;
1344         }
1345
1346         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1347         return data;
1348 }
1349
1350 /* Port Access */
1351 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1352                              int vl, int mode, u64 data)
1353 {
1354         struct hfi1_pportdata *ppd = context;
1355
1356         if (vl != CNTR_INVALID_VL)
1357                 return 0;
1358         return read_write_csr(ppd->dd, entry->csr, mode, data);
1359 }
1360
1361 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1362                              void *context, int vl, int mode, u64 data)
1363 {
1364         struct hfi1_pportdata *ppd = context;
1365         u64 val;
1366         u64 csr = entry->csr;
1367
1368         if (entry->flags & CNTR_VL) {
1369                 if (vl == CNTR_INVALID_VL)
1370                         return 0;
1371                 csr += 8 * vl;
1372         } else {
1373                 if (vl != CNTR_INVALID_VL)
1374                         return 0;
1375         }
1376         val = read_write_csr(ppd->dd, csr, mode, data);
1377         return val;
1378 }
1379
1380 /* Software defined */
1381 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1382                                 u64 data)
1383 {
1384         u64 ret;
1385
1386         if (mode == CNTR_MODE_R) {
1387                 ret = *cntr;
1388         } else if (mode == CNTR_MODE_W) {
1389                 *cntr = data;
1390                 ret = data;
1391         } else {
1392                 dd_dev_err(dd, "Invalid cntr sw access mode");
1393                 return 0;
1394         }
1395
1396         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1397
1398         return ret;
1399 }
1400
1401 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1402                                int vl, int mode, u64 data)
1403 {
1404         struct hfi1_pportdata *ppd = context;
1405
1406         if (vl != CNTR_INVALID_VL)
1407                 return 0;
1408         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1409 }
1410
1411 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1412                                int vl, int mode, u64 data)
1413 {
1414         struct hfi1_pportdata *ppd = context;
1415
1416         if (vl != CNTR_INVALID_VL)
1417                 return 0;
1418         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1419 }
1420
1421 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1422                                        void *context, int vl, int mode,
1423                                        u64 data)
1424 {
1425         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1426
1427         if (vl != CNTR_INVALID_VL)
1428                 return 0;
1429         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1430 }
1431
1432 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1433                                     void *context, int vl, int mode, u64 data)
1434 {
1435         struct hfi1_pportdata *ppd = context;
1436
1437         if (vl != CNTR_INVALID_VL)
1438                 return 0;
1439
1440         return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1441 }
1442
1443 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1444                                      void *context, int vl, int mode, u64 data)
1445 {
1446         struct hfi1_pportdata *ppd = context;
1447
1448         if (vl != CNTR_INVALID_VL)
1449                 return 0;
1450
1451         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1452                              mode, data);
1453 }
1454
1455 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1456                                      void *context, int vl, int mode, u64 data)
1457 {
1458         struct hfi1_pportdata *ppd = context;
1459
1460         if (vl != CNTR_INVALID_VL)
1461                 return 0;
1462
1463         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1464                              mode, data);
1465 }
1466
1467 u64 get_all_cpu_total(u64 __percpu *cntr)
1468 {
1469         int cpu;
1470         u64 counter = 0;
1471
1472         for_each_possible_cpu(cpu)
1473                 counter += *per_cpu_ptr(cntr, cpu);
1474         return counter;
1475 }
1476
1477 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1478                           u64 __percpu *cntr,
1479                           int vl, int mode, u64 data)
1480 {
1481
1482         u64 ret = 0;
1483
1484         if (vl != CNTR_INVALID_VL)
1485                 return 0;
1486
1487         if (mode == CNTR_MODE_R) {
1488                 ret = get_all_cpu_total(cntr) - *z_val;
1489         } else if (mode == CNTR_MODE_W) {
1490                 /* A write can only zero the counter */
1491                 if (data == 0)
1492                         *z_val = get_all_cpu_total(cntr);
1493                 else
1494                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1495         } else {
1496                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1497                 return 0;
1498         }
1499
1500         return ret;
1501 }
1502
1503 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1504                               void *context, int vl, int mode, u64 data)
1505 {
1506         struct hfi1_devdata *dd = context;
1507
1508         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1509                               mode, data);
1510 }
1511
1512 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1513                               void *context, int vl, int mode, u64 data)
1514 {
1515         struct hfi1_devdata *dd = context;
1516
1517         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1518                               mode, data);
1519 }
1520
1521 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1522                               void *context, int vl, int mode, u64 data)
1523 {
1524         struct hfi1_devdata *dd = context;
1525
1526         return dd->verbs_dev.n_piowait;
1527 }
1528
1529 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1530                               void *context, int vl, int mode, u64 data)
1531 {
1532         struct hfi1_devdata *dd = context;
1533
1534         return dd->verbs_dev.n_txwait;
1535 }
1536
1537 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1538                                void *context, int vl, int mode, u64 data)
1539 {
1540         struct hfi1_devdata *dd = context;
1541
1542         return dd->verbs_dev.n_kmem_wait;
1543 }
1544
1545 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1546                                void *context, int vl, int mode, u64 data)
1547 {
1548         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1549
1550         return dd->verbs_dev.n_send_schedule;
1551 }
1552
1553 /* Software counters for the error status bits within MISC_ERR_STATUS */
1554 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1555                                              void *context, int vl, int mode,
1556                                              u64 data)
1557 {
1558         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1559
1560         return dd->misc_err_status_cnt[12];
1561 }
1562
1563 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1564                                           void *context, int vl, int mode,
1565                                           u64 data)
1566 {
1567         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1568
1569         return dd->misc_err_status_cnt[11];
1570 }
1571
1572 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1573                                                void *context, int vl, int mode,
1574                                                u64 data)
1575 {
1576         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1577
1578         return dd->misc_err_status_cnt[10];
1579 }
1580
1581 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1582                                                  void *context, int vl,
1583                                                  int mode, u64 data)
1584 {
1585         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1586
1587         return dd->misc_err_status_cnt[9];
1588 }
1589
1590 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1591                                            void *context, int vl, int mode,
1592                                            u64 data)
1593 {
1594         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1595
1596         return dd->misc_err_status_cnt[8];
1597 }
1598
1599 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1600                                 const struct cntr_entry *entry,
1601                                 void *context, int vl, int mode, u64 data)
1602 {
1603         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1604
1605         return dd->misc_err_status_cnt[7];
1606 }
1607
1608 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1609                                                 void *context, int vl,
1610                                                 int mode, u64 data)
1611 {
1612         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1613
1614         return dd->misc_err_status_cnt[6];
1615 }
1616
1617 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1618                                               void *context, int vl, int mode,
1619                                               u64 data)
1620 {
1621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622
1623         return dd->misc_err_status_cnt[5];
1624 }
1625
1626 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1627                                             void *context, int vl, int mode,
1628                                             u64 data)
1629 {
1630         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1631
1632         return dd->misc_err_status_cnt[4];
1633 }
1634
1635 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1636                                                  void *context, int vl,
1637                                                  int mode, u64 data)
1638 {
1639         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1640
1641         return dd->misc_err_status_cnt[3];
1642 }
1643
1644 static u64 access_misc_csr_write_bad_addr_err_cnt(
1645                                 const struct cntr_entry *entry,
1646                                 void *context, int vl, int mode, u64 data)
1647 {
1648         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1649
1650         return dd->misc_err_status_cnt[2];
1651 }
1652
1653 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1654                                                  void *context, int vl,
1655                                                  int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1658
1659         return dd->misc_err_status_cnt[1];
1660 }
1661
1662 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1663                                           void *context, int vl, int mode,
1664                                           u64 data)
1665 {
1666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1667
1668         return dd->misc_err_status_cnt[0];
1669 }
1670
1671 /*
1672  * Software counter for the aggregate of
1673  * individual CceErrStatus counters
1674  */
1675 static u64 access_sw_cce_err_status_aggregated_cnt(
1676                                 const struct cntr_entry *entry,
1677                                 void *context, int vl, int mode, u64 data)
1678 {
1679         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681         return dd->sw_cce_err_status_aggregate;
1682 }
1683
1684 /*
1685  * Software counters corresponding to each of the
1686  * error status bits within CceErrStatus
1687  */
1688 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1689                                               void *context, int vl, int mode,
1690                                               u64 data)
1691 {
1692         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694         return dd->cce_err_status_cnt[40];
1695 }
1696
1697 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1698                                           void *context, int vl, int mode,
1699                                           u64 data)
1700 {
1701         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
1703         return dd->cce_err_status_cnt[39];
1704 }
1705
1706 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1707                                           void *context, int vl, int mode,
1708                                           u64 data)
1709 {
1710         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1711
1712         return dd->cce_err_status_cnt[38];
1713 }
1714
1715 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1716                                              void *context, int vl, int mode,
1717                                              u64 data)
1718 {
1719         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720
1721         return dd->cce_err_status_cnt[37];
1722 }
1723
1724 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1725                                              void *context, int vl, int mode,
1726                                              u64 data)
1727 {
1728         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730         return dd->cce_err_status_cnt[36];
1731 }
1732
1733 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1734                                 const struct cntr_entry *entry,
1735                                 void *context, int vl, int mode, u64 data)
1736 {
1737         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738
1739         return dd->cce_err_status_cnt[35];
1740 }
1741
1742 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1743                                 const struct cntr_entry *entry,
1744                                 void *context, int vl, int mode, u64 data)
1745 {
1746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748         return dd->cce_err_status_cnt[34];
1749 }
1750
1751 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1752                                                  void *context, int vl,
1753                                                  int mode, u64 data)
1754 {
1755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756
1757         return dd->cce_err_status_cnt[33];
1758 }
1759
1760 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1761                                                 void *context, int vl, int mode,
1762                                                 u64 data)
1763 {
1764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765
1766         return dd->cce_err_status_cnt[32];
1767 }
1768
1769 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1770                                    void *context, int vl, int mode, u64 data)
1771 {
1772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774         return dd->cce_err_status_cnt[31];
1775 }
1776
1777 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1778                                                void *context, int vl, int mode,
1779                                                u64 data)
1780 {
1781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783         return dd->cce_err_status_cnt[30];
1784 }
1785
1786 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1787                                               void *context, int vl, int mode,
1788                                               u64 data)
1789 {
1790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792         return dd->cce_err_status_cnt[29];
1793 }
1794
1795 static u64 access_pcic_transmit_back_parity_err_cnt(
1796                                 const struct cntr_entry *entry,
1797                                 void *context, int vl, int mode, u64 data)
1798 {
1799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801         return dd->cce_err_status_cnt[28];
1802 }
1803
1804 static u64 access_pcic_transmit_front_parity_err_cnt(
1805                                 const struct cntr_entry *entry,
1806                                 void *context, int vl, int mode, u64 data)
1807 {
1808         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810         return dd->cce_err_status_cnt[27];
1811 }
1812
1813 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1814                                              void *context, int vl, int mode,
1815                                              u64 data)
1816 {
1817         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819         return dd->cce_err_status_cnt[26];
1820 }
1821
1822 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1823                                             void *context, int vl, int mode,
1824                                             u64 data)
1825 {
1826         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828         return dd->cce_err_status_cnt[25];
1829 }
1830
1831 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1832                                               void *context, int vl, int mode,
1833                                               u64 data)
1834 {
1835         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836
1837         return dd->cce_err_status_cnt[24];
1838 }
1839
1840 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1841                                              void *context, int vl, int mode,
1842                                              u64 data)
1843 {
1844         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1845
1846         return dd->cce_err_status_cnt[23];
1847 }
1848
1849 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1850                                                  void *context, int vl,
1851                                                  int mode, u64 data)
1852 {
1853         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1854
1855         return dd->cce_err_status_cnt[22];
1856 }
1857
1858 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1859                                          void *context, int vl, int mode,
1860                                          u64 data)
1861 {
1862         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863
1864         return dd->cce_err_status_cnt[21];
1865 }
1866
1867 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1868                                 const struct cntr_entry *entry,
1869                                 void *context, int vl, int mode, u64 data)
1870 {
1871         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872
1873         return dd->cce_err_status_cnt[20];
1874 }
1875
1876 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1877                                                  void *context, int vl,
1878                                                  int mode, u64 data)
1879 {
1880         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881
1882         return dd->cce_err_status_cnt[19];
1883 }
1884
1885 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1886                                              void *context, int vl, int mode,
1887                                              u64 data)
1888 {
1889         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890
1891         return dd->cce_err_status_cnt[18];
1892 }
1893
1894 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1895                                             void *context, int vl, int mode,
1896                                             u64 data)
1897 {
1898         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899
1900         return dd->cce_err_status_cnt[17];
1901 }
1902
1903 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1904                                               void *context, int vl, int mode,
1905                                               u64 data)
1906 {
1907         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908
1909         return dd->cce_err_status_cnt[16];
1910 }
1911
1912 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1913                                              void *context, int vl, int mode,
1914                                              u64 data)
1915 {
1916         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917
1918         return dd->cce_err_status_cnt[15];
1919 }
1920
1921 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1922                                                  void *context, int vl,
1923                                                  int mode, u64 data)
1924 {
1925         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926
1927         return dd->cce_err_status_cnt[14];
1928 }
1929
1930 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1931                                              void *context, int vl, int mode,
1932                                              u64 data)
1933 {
1934         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1935
1936         return dd->cce_err_status_cnt[13];
1937 }
1938
1939 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1940                                 const struct cntr_entry *entry,
1941                                 void *context, int vl, int mode, u64 data)
1942 {
1943         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1944
1945         return dd->cce_err_status_cnt[12];
1946 }
1947
1948 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1949                                 const struct cntr_entry *entry,
1950                                 void *context, int vl, int mode, u64 data)
1951 {
1952         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1953
1954         return dd->cce_err_status_cnt[11];
1955 }
1956
1957 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
1958                                 const struct cntr_entry *entry,
1959                                 void *context, int vl, int mode, u64 data)
1960 {
1961         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1962
1963         return dd->cce_err_status_cnt[10];
1964 }
1965
1966 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
1967                                 const struct cntr_entry *entry,
1968                                 void *context, int vl, int mode, u64 data)
1969 {
1970         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1971
1972         return dd->cce_err_status_cnt[9];
1973 }
1974
1975 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
1976                                 const struct cntr_entry *entry,
1977                                 void *context, int vl, int mode, u64 data)
1978 {
1979         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1980
1981         return dd->cce_err_status_cnt[8];
1982 }
1983
1984 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
1985                                                  void *context, int vl,
1986                                                  int mode, u64 data)
1987 {
1988         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1989
1990         return dd->cce_err_status_cnt[7];
1991 }
1992
1993 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
1994                                 const struct cntr_entry *entry,
1995                                 void *context, int vl, int mode, u64 data)
1996 {
1997         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1998
1999         return dd->cce_err_status_cnt[6];
2000 }
2001
2002 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2003                                                void *context, int vl, int mode,
2004                                                u64 data)
2005 {
2006         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2007
2008         return dd->cce_err_status_cnt[5];
2009 }
2010
2011 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2012                                           void *context, int vl, int mode,
2013                                           u64 data)
2014 {
2015         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2016
2017         return dd->cce_err_status_cnt[4];
2018 }
2019
2020 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2021                                 const struct cntr_entry *entry,
2022                                 void *context, int vl, int mode, u64 data)
2023 {
2024         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2025
2026         return dd->cce_err_status_cnt[3];
2027 }
2028
2029 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2030                                                  void *context, int vl,
2031                                                  int mode, u64 data)
2032 {
2033         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2034
2035         return dd->cce_err_status_cnt[2];
2036 }
2037
2038 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2039                                                 void *context, int vl,
2040                                                 int mode, u64 data)
2041 {
2042         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2043
2044         return dd->cce_err_status_cnt[1];
2045 }
2046
2047 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2048                                          void *context, int vl, int mode,
2049                                          u64 data)
2050 {
2051         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2052
2053         return dd->cce_err_status_cnt[0];
2054 }
2055
2056 /*
2057  * Software counters corresponding to each of the
2058  * error status bits within RcvErrStatus
2059  */
2060 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2061                                         void *context, int vl, int mode,
2062                                         u64 data)
2063 {
2064         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066         return dd->rcv_err_status_cnt[63];
2067 }
2068
2069 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2070                                                 void *context, int vl,
2071                                                 int mode, u64 data)
2072 {
2073         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075         return dd->rcv_err_status_cnt[62];
2076 }
2077
2078 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2079                                                void *context, int vl, int mode,
2080                                                u64 data)
2081 {
2082         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084         return dd->rcv_err_status_cnt[61];
2085 }
2086
2087 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2088                                          void *context, int vl, int mode,
2089                                          u64 data)
2090 {
2091         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093         return dd->rcv_err_status_cnt[60];
2094 }
2095
2096 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2097                                                  void *context, int vl,
2098                                                  int mode, u64 data)
2099 {
2100         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102         return dd->rcv_err_status_cnt[59];
2103 }
2104
2105 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2106                                                  void *context, int vl,
2107                                                  int mode, u64 data)
2108 {
2109         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110
2111         return dd->rcv_err_status_cnt[58];
2112 }
2113
2114 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2115                                             void *context, int vl, int mode,
2116                                             u64 data)
2117 {
2118         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119
2120         return dd->rcv_err_status_cnt[57];
2121 }
2122
2123 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2124                                            void *context, int vl, int mode,
2125                                            u64 data)
2126 {
2127         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2128
2129         return dd->rcv_err_status_cnt[56];
2130 }
2131
2132 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2133                                            void *context, int vl, int mode,
2134                                            u64 data)
2135 {
2136         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2137
2138         return dd->rcv_err_status_cnt[55];
2139 }
2140
2141 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2142                                 const struct cntr_entry *entry,
2143                                 void *context, int vl, int mode, u64 data)
2144 {
2145         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2146
2147         return dd->rcv_err_status_cnt[54];
2148 }
2149
2150 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2151                                 const struct cntr_entry *entry,
2152                                 void *context, int vl, int mode, u64 data)
2153 {
2154         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2155
2156         return dd->rcv_err_status_cnt[53];
2157 }
2158
2159 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2160                                                  void *context, int vl,
2161                                                  int mode, u64 data)
2162 {
2163         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2164
2165         return dd->rcv_err_status_cnt[52];
2166 }
2167
2168 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2169                                                  void *context, int vl,
2170                                                  int mode, u64 data)
2171 {
2172         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2173
2174         return dd->rcv_err_status_cnt[51];
2175 }
2176
2177 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2178                                                  void *context, int vl,
2179                                                  int mode, u64 data)
2180 {
2181         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2182
2183         return dd->rcv_err_status_cnt[50];
2184 }
2185
2186 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2187                                                  void *context, int vl,
2188                                                  int mode, u64 data)
2189 {
2190         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2191
2192         return dd->rcv_err_status_cnt[49];
2193 }
2194
2195 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2196                                                  void *context, int vl,
2197                                                  int mode, u64 data)
2198 {
2199         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2200
2201         return dd->rcv_err_status_cnt[48];
2202 }
2203
2204 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2205                                                  void *context, int vl,
2206                                                  int mode, u64 data)
2207 {
2208         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2209
2210         return dd->rcv_err_status_cnt[47];
2211 }
2212
2213 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2214                                          void *context, int vl, int mode,
2215                                          u64 data)
2216 {
2217         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2218
2219         return dd->rcv_err_status_cnt[46];
2220 }
2221
2222 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2223                                 const struct cntr_entry *entry,
2224                                 void *context, int vl, int mode, u64 data)
2225 {
2226         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2227
2228         return dd->rcv_err_status_cnt[45];
2229 }
2230
2231 static u64 access_rx_lookup_csr_parity_err_cnt(
2232                                 const struct cntr_entry *entry,
2233                                 void *context, int vl, int mode, u64 data)
2234 {
2235         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2236
2237         return dd->rcv_err_status_cnt[44];
2238 }
2239
2240 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2241                                 const struct cntr_entry *entry,
2242                                 void *context, int vl, int mode, u64 data)
2243 {
2244         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2245
2246         return dd->rcv_err_status_cnt[43];
2247 }
2248
2249 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2250                                 const struct cntr_entry *entry,
2251                                 void *context, int vl, int mode, u64 data)
2252 {
2253         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2254
2255         return dd->rcv_err_status_cnt[42];
2256 }
2257
2258 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2259                                 const struct cntr_entry *entry,
2260                                 void *context, int vl, int mode, u64 data)
2261 {
2262         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2263
2264         return dd->rcv_err_status_cnt[41];
2265 }
2266
2267 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2268                                 const struct cntr_entry *entry,
2269                                 void *context, int vl, int mode, u64 data)
2270 {
2271         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2272
2273         return dd->rcv_err_status_cnt[40];
2274 }
2275
2276 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2277                                 const struct cntr_entry *entry,
2278                                 void *context, int vl, int mode, u64 data)
2279 {
2280         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2281
2282         return dd->rcv_err_status_cnt[39];
2283 }
2284
2285 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2286                                 const struct cntr_entry *entry,
2287                                 void *context, int vl, int mode, u64 data)
2288 {
2289         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2290
2291         return dd->rcv_err_status_cnt[38];
2292 }
2293
2294 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2295                                 const struct cntr_entry *entry,
2296                                 void *context, int vl, int mode, u64 data)
2297 {
2298         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2299
2300         return dd->rcv_err_status_cnt[37];
2301 }
2302
2303 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2304                                 const struct cntr_entry *entry,
2305                                 void *context, int vl, int mode, u64 data)
2306 {
2307         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2308
2309         return dd->rcv_err_status_cnt[36];
2310 }
2311
2312 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2313                                 const struct cntr_entry *entry,
2314                                 void *context, int vl, int mode, u64 data)
2315 {
2316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2317
2318         return dd->rcv_err_status_cnt[35];
2319 }
2320
2321 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2322                                 const struct cntr_entry *entry,
2323                                 void *context, int vl, int mode, u64 data)
2324 {
2325         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2326
2327         return dd->rcv_err_status_cnt[34];
2328 }
2329
2330 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2331                                 const struct cntr_entry *entry,
2332                                 void *context, int vl, int mode, u64 data)
2333 {
2334         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2335
2336         return dd->rcv_err_status_cnt[33];
2337 }
2338
2339 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2340                                         void *context, int vl, int mode,
2341                                         u64 data)
2342 {
2343         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2344
2345         return dd->rcv_err_status_cnt[32];
2346 }
2347
2348 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2349                                        void *context, int vl, int mode,
2350                                        u64 data)
2351 {
2352         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2353
2354         return dd->rcv_err_status_cnt[31];
2355 }
2356
2357 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2358                                           void *context, int vl, int mode,
2359                                           u64 data)
2360 {
2361         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2362
2363         return dd->rcv_err_status_cnt[30];
2364 }
2365
2366 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2367                                              void *context, int vl, int mode,
2368                                              u64 data)
2369 {
2370         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2371
2372         return dd->rcv_err_status_cnt[29];
2373 }
2374
2375 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2376                                                  void *context, int vl,
2377                                                  int mode, u64 data)
2378 {
2379         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2380
2381         return dd->rcv_err_status_cnt[28];
2382 }
2383
2384 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2385                                 const struct cntr_entry *entry,
2386                                 void *context, int vl, int mode, u64 data)
2387 {
2388         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2389
2390         return dd->rcv_err_status_cnt[27];
2391 }
2392
2393 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2394                                 const struct cntr_entry *entry,
2395                                 void *context, int vl, int mode, u64 data)
2396 {
2397         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2398
2399         return dd->rcv_err_status_cnt[26];
2400 }
2401
2402 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2403                                 const struct cntr_entry *entry,
2404                                 void *context, int vl, int mode, u64 data)
2405 {
2406         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2407
2408         return dd->rcv_err_status_cnt[25];
2409 }
2410
2411 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2412                                 const struct cntr_entry *entry,
2413                                 void *context, int vl, int mode, u64 data)
2414 {
2415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2416
2417         return dd->rcv_err_status_cnt[24];
2418 }
2419
2420 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2421                                 const struct cntr_entry *entry,
2422                                 void *context, int vl, int mode, u64 data)
2423 {
2424         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2425
2426         return dd->rcv_err_status_cnt[23];
2427 }
2428
2429 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2430                                 const struct cntr_entry *entry,
2431                                 void *context, int vl, int mode, u64 data)
2432 {
2433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2434
2435         return dd->rcv_err_status_cnt[22];
2436 }
2437
2438 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2439                                 const struct cntr_entry *entry,
2440                                 void *context, int vl, int mode, u64 data)
2441 {
2442         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2443
2444         return dd->rcv_err_status_cnt[21];
2445 }
2446
2447 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2448                                 const struct cntr_entry *entry,
2449                                 void *context, int vl, int mode, u64 data)
2450 {
2451         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2452
2453         return dd->rcv_err_status_cnt[20];
2454 }
2455
2456 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2457                                 const struct cntr_entry *entry,
2458                                 void *context, int vl, int mode, u64 data)
2459 {
2460         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2461
2462         return dd->rcv_err_status_cnt[19];
2463 }
2464
2465 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2466                                                  void *context, int vl,
2467                                                  int mode, u64 data)
2468 {
2469         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2470
2471         return dd->rcv_err_status_cnt[18];
2472 }
2473
2474 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2475                                                  void *context, int vl,
2476                                                  int mode, u64 data)
2477 {
2478         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2479
2480         return dd->rcv_err_status_cnt[17];
2481 }
2482
2483 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2484                                 const struct cntr_entry *entry,
2485                                 void *context, int vl, int mode, u64 data)
2486 {
2487         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2488
2489         return dd->rcv_err_status_cnt[16];
2490 }
2491
2492 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2493                                 const struct cntr_entry *entry,
2494                                 void *context, int vl, int mode, u64 data)
2495 {
2496         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2497
2498         return dd->rcv_err_status_cnt[15];
2499 }
2500
2501 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2502                                                 void *context, int vl,
2503                                                 int mode, u64 data)
2504 {
2505         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2506
2507         return dd->rcv_err_status_cnt[14];
2508 }
2509
2510 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2511                                                 void *context, int vl,
2512                                                 int mode, u64 data)
2513 {
2514         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2515
2516         return dd->rcv_err_status_cnt[13];
2517 }
2518
2519 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2520                                               void *context, int vl, int mode,
2521                                               u64 data)
2522 {
2523         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2524
2525         return dd->rcv_err_status_cnt[12];
2526 }
2527
2528 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2529                                           void *context, int vl, int mode,
2530                                           u64 data)
2531 {
2532         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2533
2534         return dd->rcv_err_status_cnt[11];
2535 }
2536
2537 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2538                                           void *context, int vl, int mode,
2539                                           u64 data)
2540 {
2541         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2542
2543         return dd->rcv_err_status_cnt[10];
2544 }
2545
2546 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2547                                                void *context, int vl, int mode,
2548                                                u64 data)
2549 {
2550         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2551
2552         return dd->rcv_err_status_cnt[9];
2553 }
2554
2555 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2556                                             void *context, int vl, int mode,
2557                                             u64 data)
2558 {
2559         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2560
2561         return dd->rcv_err_status_cnt[8];
2562 }
2563
2564 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2565                                 const struct cntr_entry *entry,
2566                                 void *context, int vl, int mode, u64 data)
2567 {
2568         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2569
2570         return dd->rcv_err_status_cnt[7];
2571 }
2572
2573 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2574                                 const struct cntr_entry *entry,
2575                                 void *context, int vl, int mode, u64 data)
2576 {
2577         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2578
2579         return dd->rcv_err_status_cnt[6];
2580 }
2581
2582 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2583                                           void *context, int vl, int mode,
2584                                           u64 data)
2585 {
2586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2587
2588         return dd->rcv_err_status_cnt[5];
2589 }
2590
2591 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2592                                           void *context, int vl, int mode,
2593                                           u64 data)
2594 {
2595         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2596
2597         return dd->rcv_err_status_cnt[4];
2598 }
2599
2600 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2601                                          void *context, int vl, int mode,
2602                                          u64 data)
2603 {
2604         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2605
2606         return dd->rcv_err_status_cnt[3];
2607 }
2608
2609 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2610                                          void *context, int vl, int mode,
2611                                          u64 data)
2612 {
2613         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2614
2615         return dd->rcv_err_status_cnt[2];
2616 }
2617
2618 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2619                                             void *context, int vl, int mode,
2620                                             u64 data)
2621 {
2622         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2623
2624         return dd->rcv_err_status_cnt[1];
2625 }
2626
2627 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2628                                          void *context, int vl, int mode,
2629                                          u64 data)
2630 {
2631         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2632
2633         return dd->rcv_err_status_cnt[0];
2634 }
2635
2636 /*
2637  * Software counters corresponding to each of the
2638  * error status bits within SendPioErrStatus
2639  */
2640 static u64 access_pio_pec_sop_head_parity_err_cnt(
2641                                 const struct cntr_entry *entry,
2642                                 void *context, int vl, int mode, u64 data)
2643 {
2644         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646         return dd->send_pio_err_status_cnt[35];
2647 }
2648
2649 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2650                                 const struct cntr_entry *entry,
2651                                 void *context, int vl, int mode, u64 data)
2652 {
2653         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655         return dd->send_pio_err_status_cnt[34];
2656 }
2657
2658 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2659                                 const struct cntr_entry *entry,
2660                                 void *context, int vl, int mode, u64 data)
2661 {
2662         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664         return dd->send_pio_err_status_cnt[33];
2665 }
2666
2667 static u64 access_pio_current_free_cnt_parity_err_cnt(
2668                                 const struct cntr_entry *entry,
2669                                 void *context, int vl, int mode, u64 data)
2670 {
2671         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673         return dd->send_pio_err_status_cnt[32];
2674 }
2675
2676 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2677                                           void *context, int vl, int mode,
2678                                           u64 data)
2679 {
2680         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682         return dd->send_pio_err_status_cnt[31];
2683 }
2684
2685 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2686                                           void *context, int vl, int mode,
2687                                           u64 data)
2688 {
2689         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690
2691         return dd->send_pio_err_status_cnt[30];
2692 }
2693
2694 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2695                                            void *context, int vl, int mode,
2696                                            u64 data)
2697 {
2698         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699
2700         return dd->send_pio_err_status_cnt[29];
2701 }
2702
2703 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2704                                 const struct cntr_entry *entry,
2705                                 void *context, int vl, int mode, u64 data)
2706 {
2707         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2708
2709         return dd->send_pio_err_status_cnt[28];
2710 }
2711
2712 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2713                                              void *context, int vl, int mode,
2714                                              u64 data)
2715 {
2716         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2717
2718         return dd->send_pio_err_status_cnt[27];
2719 }
2720
2721 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2722                                              void *context, int vl, int mode,
2723                                              u64 data)
2724 {
2725         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2726
2727         return dd->send_pio_err_status_cnt[26];
2728 }
2729
2730 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2731                                                 void *context, int vl,
2732                                                 int mode, u64 data)
2733 {
2734         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2735
2736         return dd->send_pio_err_status_cnt[25];
2737 }
2738
2739 static u64 access_pio_block_qw_count_parity_err_cnt(
2740                                 const struct cntr_entry *entry,
2741                                 void *context, int vl, int mode, u64 data)
2742 {
2743         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2744
2745         return dd->send_pio_err_status_cnt[24];
2746 }
2747
2748 static u64 access_pio_write_qw_valid_parity_err_cnt(
2749                                 const struct cntr_entry *entry,
2750                                 void *context, int vl, int mode, u64 data)
2751 {
2752         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2753
2754         return dd->send_pio_err_status_cnt[23];
2755 }
2756
2757 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2758                                             void *context, int vl, int mode,
2759                                             u64 data)
2760 {
2761         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2762
2763         return dd->send_pio_err_status_cnt[22];
2764 }
2765
2766 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2767                                                 void *context, int vl,
2768                                                 int mode, u64 data)
2769 {
2770         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2771
2772         return dd->send_pio_err_status_cnt[21];
2773 }
2774
2775 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2776                                                 void *context, int vl,
2777                                                 int mode, u64 data)
2778 {
2779         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2780
2781         return dd->send_pio_err_status_cnt[20];
2782 }
2783
2784 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2785                                                 void *context, int vl,
2786                                                 int mode, u64 data)
2787 {
2788         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2789
2790         return dd->send_pio_err_status_cnt[19];
2791 }
2792
2793 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2794                                 const struct cntr_entry *entry,
2795                                 void *context, int vl, int mode, u64 data)
2796 {
2797         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2798
2799         return dd->send_pio_err_status_cnt[18];
2800 }
2801
2802 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2803                                          void *context, int vl, int mode,
2804                                          u64 data)
2805 {
2806         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2807
2808         return dd->send_pio_err_status_cnt[17];
2809 }
2810
2811 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2812                                             void *context, int vl, int mode,
2813                                             u64 data)
2814 {
2815         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2816
2817         return dd->send_pio_err_status_cnt[16];
2818 }
2819
2820 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2821                                 const struct cntr_entry *entry,
2822                                 void *context, int vl, int mode, u64 data)
2823 {
2824         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2825
2826         return dd->send_pio_err_status_cnt[15];
2827 }
2828
2829 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2830                                 const struct cntr_entry *entry,
2831                                 void *context, int vl, int mode, u64 data)
2832 {
2833         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2834
2835         return dd->send_pio_err_status_cnt[14];
2836 }
2837
2838 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2839                                 const struct cntr_entry *entry,
2840                                 void *context, int vl, int mode, u64 data)
2841 {
2842         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2843
2844         return dd->send_pio_err_status_cnt[13];
2845 }
2846
2847 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2848                                 const struct cntr_entry *entry,
2849                                 void *context, int vl, int mode, u64 data)
2850 {
2851         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2852
2853         return dd->send_pio_err_status_cnt[12];
2854 }
2855
2856 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2857                                 const struct cntr_entry *entry,
2858                                 void *context, int vl, int mode, u64 data)
2859 {
2860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2861
2862         return dd->send_pio_err_status_cnt[11];
2863 }
2864
2865 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2866                                 const struct cntr_entry *entry,
2867                                 void *context, int vl, int mode, u64 data)
2868 {
2869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2870
2871         return dd->send_pio_err_status_cnt[10];
2872 }
2873
2874 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2875                                 const struct cntr_entry *entry,
2876                                 void *context, int vl, int mode, u64 data)
2877 {
2878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2879
2880         return dd->send_pio_err_status_cnt[9];
2881 }
2882
2883 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2884                                 const struct cntr_entry *entry,
2885                                 void *context, int vl, int mode, u64 data)
2886 {
2887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2888
2889         return dd->send_pio_err_status_cnt[8];
2890 }
2891
2892 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2893                                 const struct cntr_entry *entry,
2894                                 void *context, int vl, int mode, u64 data)
2895 {
2896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2897
2898         return dd->send_pio_err_status_cnt[7];
2899 }
2900
2901 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2902                                               void *context, int vl, int mode,
2903                                               u64 data)
2904 {
2905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2906
2907         return dd->send_pio_err_status_cnt[6];
2908 }
2909
2910 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2911                                               void *context, int vl, int mode,
2912                                               u64 data)
2913 {
2914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2915
2916         return dd->send_pio_err_status_cnt[5];
2917 }
2918
2919 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2920                                            void *context, int vl, int mode,
2921                                            u64 data)
2922 {
2923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2924
2925         return dd->send_pio_err_status_cnt[4];
2926 }
2927
2928 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2929                                            void *context, int vl, int mode,
2930                                            u64 data)
2931 {
2932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2933
2934         return dd->send_pio_err_status_cnt[3];
2935 }
2936
2937 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2938                                          void *context, int vl, int mode,
2939                                          u64 data)
2940 {
2941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2942
2943         return dd->send_pio_err_status_cnt[2];
2944 }
2945
2946 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2947                                                 void *context, int vl,
2948                                                 int mode, u64 data)
2949 {
2950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2951
2952         return dd->send_pio_err_status_cnt[1];
2953 }
2954
2955 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
2956                                              void *context, int vl, int mode,
2957                                              u64 data)
2958 {
2959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2960
2961         return dd->send_pio_err_status_cnt[0];
2962 }
2963
2964 /*
2965  * Software counters corresponding to each of the
2966  * error status bits within SendDmaErrStatus
2967  */
2968 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
2969                                 const struct cntr_entry *entry,
2970                                 void *context, int vl, int mode, u64 data)
2971 {
2972         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974         return dd->send_dma_err_status_cnt[3];
2975 }
2976
2977 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
2978                                 const struct cntr_entry *entry,
2979                                 void *context, int vl, int mode, u64 data)
2980 {
2981         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983         return dd->send_dma_err_status_cnt[2];
2984 }
2985
2986 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
2987                                           void *context, int vl, int mode,
2988                                           u64 data)
2989 {
2990         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992         return dd->send_dma_err_status_cnt[1];
2993 }
2994
2995 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
2996                                        void *context, int vl, int mode,
2997                                        u64 data)
2998 {
2999         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001         return dd->send_dma_err_status_cnt[0];
3002 }
3003
3004 /*
3005  * Software counters corresponding to each of the
3006  * error status bits within SendEgressErrStatus
3007  */
3008 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3009                                 const struct cntr_entry *entry,
3010                                 void *context, int vl, int mode, u64 data)
3011 {
3012         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014         return dd->send_egress_err_status_cnt[63];
3015 }
3016
3017 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3018                                 const struct cntr_entry *entry,
3019                                 void *context, int vl, int mode, u64 data)
3020 {
3021         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023         return dd->send_egress_err_status_cnt[62];
3024 }
3025
3026 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3027                                              void *context, int vl, int mode,
3028                                              u64 data)
3029 {
3030         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032         return dd->send_egress_err_status_cnt[61];
3033 }
3034
3035 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3036                                                  void *context, int vl,
3037                                                  int mode, u64 data)
3038 {
3039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041         return dd->send_egress_err_status_cnt[60];
3042 }
3043
3044 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3045                                 const struct cntr_entry *entry,
3046                                 void *context, int vl, int mode, u64 data)
3047 {
3048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050         return dd->send_egress_err_status_cnt[59];
3051 }
3052
3053 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3054                                         void *context, int vl, int mode,
3055                                         u64 data)
3056 {
3057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059         return dd->send_egress_err_status_cnt[58];
3060 }
3061
3062 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3063                                             void *context, int vl, int mode,
3064                                             u64 data)
3065 {
3066         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068         return dd->send_egress_err_status_cnt[57];
3069 }
3070
3071 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3072                                               void *context, int vl, int mode,
3073                                               u64 data)
3074 {
3075         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076
3077         return dd->send_egress_err_status_cnt[56];
3078 }
3079
3080 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3081                                               void *context, int vl, int mode,
3082                                               u64 data)
3083 {
3084         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3085
3086         return dd->send_egress_err_status_cnt[55];
3087 }
3088
3089 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3090                                               void *context, int vl, int mode,
3091                                               u64 data)
3092 {
3093         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3094
3095         return dd->send_egress_err_status_cnt[54];
3096 }
3097
3098 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3099                                               void *context, int vl, int mode,
3100                                               u64 data)
3101 {
3102         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3103
3104         return dd->send_egress_err_status_cnt[53];
3105 }
3106
3107 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3108                                               void *context, int vl, int mode,
3109                                               u64 data)
3110 {
3111         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3112
3113         return dd->send_egress_err_status_cnt[52];
3114 }
3115
3116 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3117                                               void *context, int vl, int mode,
3118                                               u64 data)
3119 {
3120         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121
3122         return dd->send_egress_err_status_cnt[51];
3123 }
3124
3125 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3126                                               void *context, int vl, int mode,
3127                                               u64 data)
3128 {
3129         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3130
3131         return dd->send_egress_err_status_cnt[50];
3132 }
3133
3134 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3135                                               void *context, int vl, int mode,
3136                                               u64 data)
3137 {
3138         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3139
3140         return dd->send_egress_err_status_cnt[49];
3141 }
3142
3143 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3144                                               void *context, int vl, int mode,
3145                                               u64 data)
3146 {
3147         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3148
3149         return dd->send_egress_err_status_cnt[48];
3150 }
3151
3152 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3153                                               void *context, int vl, int mode,
3154                                               u64 data)
3155 {
3156         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3157
3158         return dd->send_egress_err_status_cnt[47];
3159 }
3160
3161 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3162                                             void *context, int vl, int mode,
3163                                             u64 data)
3164 {
3165         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3166
3167         return dd->send_egress_err_status_cnt[46];
3168 }
3169
3170 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3171                                              void *context, int vl, int mode,
3172                                              u64 data)
3173 {
3174         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3175
3176         return dd->send_egress_err_status_cnt[45];
3177 }
3178
3179 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3180                                                  void *context, int vl,
3181                                                  int mode, u64 data)
3182 {
3183         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3184
3185         return dd->send_egress_err_status_cnt[44];
3186 }
3187
3188 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3189                                 const struct cntr_entry *entry,
3190                                 void *context, int vl, int mode, u64 data)
3191 {
3192         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3193
3194         return dd->send_egress_err_status_cnt[43];
3195 }
3196
3197 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3198                                         void *context, int vl, int mode,
3199                                         u64 data)
3200 {
3201         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3202
3203         return dd->send_egress_err_status_cnt[42];
3204 }
3205
3206 static u64 access_tx_credit_return_partiy_err_cnt(
3207                                 const struct cntr_entry *entry,
3208                                 void *context, int vl, int mode, u64 data)
3209 {
3210         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3211
3212         return dd->send_egress_err_status_cnt[41];
3213 }
3214
3215 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3216                                 const struct cntr_entry *entry,
3217                                 void *context, int vl, int mode, u64 data)
3218 {
3219         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3220
3221         return dd->send_egress_err_status_cnt[40];
3222 }
3223
3224 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3225                                 const struct cntr_entry *entry,
3226                                 void *context, int vl, int mode, u64 data)
3227 {
3228         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3229
3230         return dd->send_egress_err_status_cnt[39];
3231 }
3232
3233 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3234                                 const struct cntr_entry *entry,
3235                                 void *context, int vl, int mode, u64 data)
3236 {
3237         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3238
3239         return dd->send_egress_err_status_cnt[38];
3240 }
3241
3242 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3243                                 const struct cntr_entry *entry,
3244                                 void *context, int vl, int mode, u64 data)
3245 {
3246         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3247
3248         return dd->send_egress_err_status_cnt[37];
3249 }
3250
3251 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3252                                 const struct cntr_entry *entry,
3253                                 void *context, int vl, int mode, u64 data)
3254 {
3255         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3256
3257         return dd->send_egress_err_status_cnt[36];
3258 }
3259
3260 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3261                                 const struct cntr_entry *entry,
3262                                 void *context, int vl, int mode, u64 data)
3263 {
3264         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3265
3266         return dd->send_egress_err_status_cnt[35];
3267 }
3268
3269 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3270                                 const struct cntr_entry *entry,
3271                                 void *context, int vl, int mode, u64 data)
3272 {
3273         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3274
3275         return dd->send_egress_err_status_cnt[34];
3276 }
3277
3278 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3279                                 const struct cntr_entry *entry,
3280                                 void *context, int vl, int mode, u64 data)
3281 {
3282         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3283
3284         return dd->send_egress_err_status_cnt[33];
3285 }
3286
3287 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3288                                 const struct cntr_entry *entry,
3289                                 void *context, int vl, int mode, u64 data)
3290 {
3291         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3292
3293         return dd->send_egress_err_status_cnt[32];
3294 }
3295
3296 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3297                                 const struct cntr_entry *entry,
3298                                 void *context, int vl, int mode, u64 data)
3299 {
3300         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3301
3302         return dd->send_egress_err_status_cnt[31];
3303 }
3304
3305 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3306                                 const struct cntr_entry *entry,
3307                                 void *context, int vl, int mode, u64 data)
3308 {
3309         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3310
3311         return dd->send_egress_err_status_cnt[30];
3312 }
3313
3314 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3315                                 const struct cntr_entry *entry,
3316                                 void *context, int vl, int mode, u64 data)
3317 {
3318         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3319
3320         return dd->send_egress_err_status_cnt[29];
3321 }
3322
3323 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3324                                 const struct cntr_entry *entry,
3325                                 void *context, int vl, int mode, u64 data)
3326 {
3327         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3328
3329         return dd->send_egress_err_status_cnt[28];
3330 }
3331
3332 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3333                                 const struct cntr_entry *entry,
3334                                 void *context, int vl, int mode, u64 data)
3335 {
3336         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3337
3338         return dd->send_egress_err_status_cnt[27];
3339 }
3340
3341 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3342                                 const struct cntr_entry *entry,
3343                                 void *context, int vl, int mode, u64 data)
3344 {
3345         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3346
3347         return dd->send_egress_err_status_cnt[26];
3348 }
3349
3350 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3351                                 const struct cntr_entry *entry,
3352                                 void *context, int vl, int mode, u64 data)
3353 {
3354         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3355
3356         return dd->send_egress_err_status_cnt[25];
3357 }
3358
3359 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3360                                 const struct cntr_entry *entry,
3361                                 void *context, int vl, int mode, u64 data)
3362 {
3363         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3364
3365         return dd->send_egress_err_status_cnt[24];
3366 }
3367
3368 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3369                                 const struct cntr_entry *entry,
3370                                 void *context, int vl, int mode, u64 data)
3371 {
3372         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3373
3374         return dd->send_egress_err_status_cnt[23];
3375 }
3376
3377 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3378                                 const struct cntr_entry *entry,
3379                                 void *context, int vl, int mode, u64 data)
3380 {
3381         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3382
3383         return dd->send_egress_err_status_cnt[22];
3384 }
3385
3386 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3387                                 const struct cntr_entry *entry,
3388                                 void *context, int vl, int mode, u64 data)
3389 {
3390         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3391
3392         return dd->send_egress_err_status_cnt[21];
3393 }
3394
3395 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3396                                 const struct cntr_entry *entry,
3397                                 void *context, int vl, int mode, u64 data)
3398 {
3399         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3400
3401         return dd->send_egress_err_status_cnt[20];
3402 }
3403
3404 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3405                                 const struct cntr_entry *entry,
3406                                 void *context, int vl, int mode, u64 data)
3407 {
3408         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3409
3410         return dd->send_egress_err_status_cnt[19];
3411 }
3412
3413 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3414                                 const struct cntr_entry *entry,
3415                                 void *context, int vl, int mode, u64 data)
3416 {
3417         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3418
3419         return dd->send_egress_err_status_cnt[18];
3420 }
3421
3422 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3423                                 const struct cntr_entry *entry,
3424                                 void *context, int vl, int mode, u64 data)
3425 {
3426         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3427
3428         return dd->send_egress_err_status_cnt[17];
3429 }
3430
3431 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3432                                 const struct cntr_entry *entry,
3433                                 void *context, int vl, int mode, u64 data)
3434 {
3435         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3436
3437         return dd->send_egress_err_status_cnt[16];
3438 }
3439
3440 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3441                                            void *context, int vl, int mode,
3442                                            u64 data)
3443 {
3444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3445
3446         return dd->send_egress_err_status_cnt[15];
3447 }
3448
3449 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3450                                                  void *context, int vl,
3451                                                  int mode, u64 data)
3452 {
3453         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3454
3455         return dd->send_egress_err_status_cnt[14];
3456 }
3457
3458 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3459                                                void *context, int vl, int mode,
3460                                                u64 data)
3461 {
3462         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3463
3464         return dd->send_egress_err_status_cnt[13];
3465 }
3466
3467 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3468                                         void *context, int vl, int mode,
3469                                         u64 data)
3470 {
3471         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3472
3473         return dd->send_egress_err_status_cnt[12];
3474 }
3475
3476 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3477                                 const struct cntr_entry *entry,
3478                                 void *context, int vl, int mode, u64 data)
3479 {
3480         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3481
3482         return dd->send_egress_err_status_cnt[11];
3483 }
3484
3485 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3486                                              void *context, int vl, int mode,
3487                                              u64 data)
3488 {
3489         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3490
3491         return dd->send_egress_err_status_cnt[10];
3492 }
3493
3494 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3495                                             void *context, int vl, int mode,
3496                                             u64 data)
3497 {
3498         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3499
3500         return dd->send_egress_err_status_cnt[9];
3501 }
3502
3503 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3504                                 const struct cntr_entry *entry,
3505                                 void *context, int vl, int mode, u64 data)
3506 {
3507         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3508
3509         return dd->send_egress_err_status_cnt[8];
3510 }
3511
3512 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3513                                 const struct cntr_entry *entry,
3514                                 void *context, int vl, int mode, u64 data)
3515 {
3516         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3517
3518         return dd->send_egress_err_status_cnt[7];
3519 }
3520
3521 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3522                                             void *context, int vl, int mode,
3523                                             u64 data)
3524 {
3525         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3526
3527         return dd->send_egress_err_status_cnt[6];
3528 }
3529
3530 static u64 access_tx_incorrect_link_state_err_cnt(
3531                                 const struct cntr_entry *entry,
3532                                 void *context, int vl, int mode, u64 data)
3533 {
3534         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3535
3536         return dd->send_egress_err_status_cnt[5];
3537 }
3538
3539 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3540                                       void *context, int vl, int mode,
3541                                       u64 data)
3542 {
3543         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3544
3545         return dd->send_egress_err_status_cnt[4];
3546 }
3547
3548 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3549                                 const struct cntr_entry *entry,
3550                                 void *context, int vl, int mode, u64 data)
3551 {
3552         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3553
3554         return dd->send_egress_err_status_cnt[3];
3555 }
3556
3557 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3558                                             void *context, int vl, int mode,
3559                                             u64 data)
3560 {
3561         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3562
3563         return dd->send_egress_err_status_cnt[2];
3564 }
3565
3566 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3567                                 const struct cntr_entry *entry,
3568                                 void *context, int vl, int mode, u64 data)
3569 {
3570         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3571
3572         return dd->send_egress_err_status_cnt[1];
3573 }
3574
3575 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3576                                 const struct cntr_entry *entry,
3577                                 void *context, int vl, int mode, u64 data)
3578 {
3579         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3580
3581         return dd->send_egress_err_status_cnt[0];
3582 }
3583
3584 /*
3585  * Software counters corresponding to each of the
3586  * error status bits within SendErrStatus
3587  */
3588 static u64 access_send_csr_write_bad_addr_err_cnt(
3589                                 const struct cntr_entry *entry,
3590                                 void *context, int vl, int mode, u64 data)
3591 {
3592         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594         return dd->send_err_status_cnt[2];
3595 }
3596
3597 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3598                                                  void *context, int vl,
3599                                                  int mode, u64 data)
3600 {
3601         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603         return dd->send_err_status_cnt[1];
3604 }
3605
3606 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3607                                       void *context, int vl, int mode,
3608                                       u64 data)
3609 {
3610         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612         return dd->send_err_status_cnt[0];
3613 }
3614
3615 /*
3616  * Software counters corresponding to each of the
3617  * error status bits within SendCtxtErrStatus
3618  */
3619 static u64 access_pio_write_out_of_bounds_err_cnt(
3620                                 const struct cntr_entry *entry,
3621                                 void *context, int vl, int mode, u64 data)
3622 {
3623         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625         return dd->sw_ctxt_err_status_cnt[4];
3626 }
3627
3628 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3629                                              void *context, int vl, int mode,
3630                                              u64 data)
3631 {
3632         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634         return dd->sw_ctxt_err_status_cnt[3];
3635 }
3636
3637 static u64 access_pio_write_crosses_boundary_err_cnt(
3638                                 const struct cntr_entry *entry,
3639                                 void *context, int vl, int mode, u64 data)
3640 {
3641         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643         return dd->sw_ctxt_err_status_cnt[2];
3644 }
3645
3646 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3647                                                 void *context, int vl,
3648                                                 int mode, u64 data)
3649 {
3650         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652         return dd->sw_ctxt_err_status_cnt[1];
3653 }
3654
3655 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3656                                                void *context, int vl, int mode,
3657                                                u64 data)
3658 {
3659         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661         return dd->sw_ctxt_err_status_cnt[0];
3662 }
3663
3664 /*
3665  * Software counters corresponding to each of the
3666  * error status bits within SendDmaEngErrStatus
3667  */
3668 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3669                                 const struct cntr_entry *entry,
3670                                 void *context, int vl, int mode, u64 data)
3671 {
3672         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674         return dd->sw_send_dma_eng_err_status_cnt[23];
3675 }
3676
3677 static u64 access_sdma_header_storage_cor_err_cnt(
3678                                 const struct cntr_entry *entry,
3679                                 void *context, int vl, int mode, u64 data)
3680 {
3681         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3682
3683         return dd->sw_send_dma_eng_err_status_cnt[22];
3684 }
3685
3686 static u64 access_sdma_packet_tracking_cor_err_cnt(
3687                                 const struct cntr_entry *entry,
3688                                 void *context, int vl, int mode, u64 data)
3689 {
3690         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692         return dd->sw_send_dma_eng_err_status_cnt[21];
3693 }
3694
3695 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3696                                             void *context, int vl, int mode,
3697                                             u64 data)
3698 {
3699         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701         return dd->sw_send_dma_eng_err_status_cnt[20];
3702 }
3703
3704 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3705                                               void *context, int vl, int mode,
3706                                               u64 data)
3707 {
3708         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710         return dd->sw_send_dma_eng_err_status_cnt[19];
3711 }
3712
3713 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3714                                 const struct cntr_entry *entry,
3715                                 void *context, int vl, int mode, u64 data)
3716 {
3717         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719         return dd->sw_send_dma_eng_err_status_cnt[18];
3720 }
3721
3722 static u64 access_sdma_header_storage_unc_err_cnt(
3723                                 const struct cntr_entry *entry,
3724                                 void *context, int vl, int mode, u64 data)
3725 {
3726         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727
3728         return dd->sw_send_dma_eng_err_status_cnt[17];
3729 }
3730
3731 static u64 access_sdma_packet_tracking_unc_err_cnt(
3732                                 const struct cntr_entry *entry,
3733                                 void *context, int vl, int mode, u64 data)
3734 {
3735         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3736
3737         return dd->sw_send_dma_eng_err_status_cnt[16];
3738 }
3739
3740 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3741                                             void *context, int vl, int mode,
3742                                             u64 data)
3743 {
3744         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3745
3746         return dd->sw_send_dma_eng_err_status_cnt[15];
3747 }
3748
3749 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3750                                               void *context, int vl, int mode,
3751                                               u64 data)
3752 {
3753         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3754
3755         return dd->sw_send_dma_eng_err_status_cnt[14];
3756 }
3757
3758 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3759                                        void *context, int vl, int mode,
3760                                        u64 data)
3761 {
3762         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3763
3764         return dd->sw_send_dma_eng_err_status_cnt[13];
3765 }
3766
3767 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3768                                              void *context, int vl, int mode,
3769                                              u64 data)
3770 {
3771         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3772
3773         return dd->sw_send_dma_eng_err_status_cnt[12];
3774 }
3775
3776 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3777                                               void *context, int vl, int mode,
3778                                               u64 data)
3779 {
3780         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3781
3782         return dd->sw_send_dma_eng_err_status_cnt[11];
3783 }
3784
3785 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3786                                              void *context, int vl, int mode,
3787                                              u64 data)
3788 {
3789         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3790
3791         return dd->sw_send_dma_eng_err_status_cnt[10];
3792 }
3793
3794 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3795                                           void *context, int vl, int mode,
3796                                           u64 data)
3797 {
3798         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3799
3800         return dd->sw_send_dma_eng_err_status_cnt[9];
3801 }
3802
3803 static u64 access_sdma_packet_desc_overflow_err_cnt(
3804                                 const struct cntr_entry *entry,
3805                                 void *context, int vl, int mode, u64 data)
3806 {
3807         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3808
3809         return dd->sw_send_dma_eng_err_status_cnt[8];
3810 }
3811
3812 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3813                                                void *context, int vl,
3814                                                int mode, u64 data)
3815 {
3816         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3817
3818         return dd->sw_send_dma_eng_err_status_cnt[7];
3819 }
3820
3821 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3822                                     void *context, int vl, int mode, u64 data)
3823 {
3824         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3825
3826         return dd->sw_send_dma_eng_err_status_cnt[6];
3827 }
3828
3829 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3830                                         void *context, int vl, int mode,
3831                                         u64 data)
3832 {
3833         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834
3835         return dd->sw_send_dma_eng_err_status_cnt[5];
3836 }
3837
3838 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3839                                           void *context, int vl, int mode,
3840                                           u64 data)
3841 {
3842         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844         return dd->sw_send_dma_eng_err_status_cnt[4];
3845 }
3846
3847 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3848                                 const struct cntr_entry *entry,
3849                                 void *context, int vl, int mode, u64 data)
3850 {
3851         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853         return dd->sw_send_dma_eng_err_status_cnt[3];
3854 }
3855
3856 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3857                                         void *context, int vl, int mode,
3858                                         u64 data)
3859 {
3860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862         return dd->sw_send_dma_eng_err_status_cnt[2];
3863 }
3864
3865 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3866                                             void *context, int vl, int mode,
3867                                             u64 data)
3868 {
3869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871         return dd->sw_send_dma_eng_err_status_cnt[1];
3872 }
3873
3874 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3875                                         void *context, int vl, int mode,
3876                                         u64 data)
3877 {
3878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880         return dd->sw_send_dma_eng_err_status_cnt[0];
3881 }
3882
3883 #define def_access_sw_cpu(cntr) \
3884 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
3885                               void *context, int vl, int mode, u64 data)      \
3886 {                                                                             \
3887         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3888         return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr,           \
3889                               ppd->ibport_data.cntr, vl,                      \
3890                               mode, data);                                    \
3891 }
3892
3893 def_access_sw_cpu(rc_acks);
3894 def_access_sw_cpu(rc_qacks);
3895 def_access_sw_cpu(rc_delayed_comp);
3896
3897 #define def_access_ibp_counter(cntr) \
3898 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
3899                                 void *context, int vl, int mode, u64 data)    \
3900 {                                                                             \
3901         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3902                                                                               \
3903         if (vl != CNTR_INVALID_VL)                                            \
3904                 return 0;                                                     \
3905                                                                               \
3906         return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr,            \
3907                              mode, data);                                     \
3908 }
3909
3910 def_access_ibp_counter(loop_pkts);
3911 def_access_ibp_counter(rc_resends);
3912 def_access_ibp_counter(rnr_naks);
3913 def_access_ibp_counter(other_naks);
3914 def_access_ibp_counter(rc_timeouts);
3915 def_access_ibp_counter(pkt_drops);
3916 def_access_ibp_counter(dmawait);
3917 def_access_ibp_counter(rc_seqnak);
3918 def_access_ibp_counter(rc_dupreq);
3919 def_access_ibp_counter(rdma_seq);
3920 def_access_ibp_counter(unaligned);
3921 def_access_ibp_counter(seq_naks);
3922
3923 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3924 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3925 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3926                         CNTR_NORMAL),
3927 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3928                         CNTR_NORMAL),
3929 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3930                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
3931                         CNTR_NORMAL),
3932 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3933                         CNTR_NORMAL),
3934 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3935                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3936 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3937                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3938 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3939                         CNTR_NORMAL),
3940 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3941                         CNTR_NORMAL),
3942 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3943                         CNTR_NORMAL),
3944 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3945                         CNTR_NORMAL),
3946 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3947                         CNTR_NORMAL),
3948 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3949                         CNTR_NORMAL),
3950 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
3951                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
3952 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
3953                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
3954 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
3955                               CNTR_SYNTH),
3956 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
3957 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
3958                                  CNTR_SYNTH),
3959 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
3960                                   CNTR_SYNTH),
3961 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
3962                                   CNTR_SYNTH),
3963 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
3964                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
3965 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
3966                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
3967                                   CNTR_SYNTH),
3968 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
3969                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
3970 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
3971                                CNTR_SYNTH),
3972 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
3973                               CNTR_SYNTH),
3974 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
3975                                CNTR_SYNTH),
3976 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
3977                                  CNTR_SYNTH),
3978 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
3979                                 CNTR_SYNTH),
3980 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
3981                                 CNTR_SYNTH),
3982 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
3983                                CNTR_SYNTH),
3984 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
3985                                  CNTR_SYNTH | CNTR_VL),
3986 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
3987                                 CNTR_SYNTH | CNTR_VL),
3988 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
3989 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
3990                                  CNTR_SYNTH | CNTR_VL),
3991 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
3992 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
3993                                  CNTR_SYNTH | CNTR_VL),
3994 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
3995                               CNTR_SYNTH),
3996 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
3997                                  CNTR_SYNTH | CNTR_VL),
3998 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
3999                                 CNTR_SYNTH),
4000 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4001                                    CNTR_SYNTH | CNTR_VL),
4002 [C_DC_TOTAL_CRC] =
4003         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4004                          CNTR_SYNTH),
4005 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4006                                   CNTR_SYNTH),
4007 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4008                                   CNTR_SYNTH),
4009 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4010                                   CNTR_SYNTH),
4011 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4012                                   CNTR_SYNTH),
4013 [C_DC_CRC_MULT_LN] =
4014         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4015                          CNTR_SYNTH),
4016 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4017                                     CNTR_SYNTH),
4018 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4019                                     CNTR_SYNTH),
4020 [C_DC_SEQ_CRC_CNT] =
4021         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4022                          CNTR_SYNTH),
4023 [C_DC_ESC0_ONLY_CNT] =
4024         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4025                          CNTR_SYNTH),
4026 [C_DC_ESC0_PLUS1_CNT] =
4027         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4028                          CNTR_SYNTH),
4029 [C_DC_ESC0_PLUS2_CNT] =
4030         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4031                          CNTR_SYNTH),
4032 [C_DC_REINIT_FROM_PEER_CNT] =
4033         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4034                          CNTR_SYNTH),
4035 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4036                                   CNTR_SYNTH),
4037 [C_DC_MISC_FLG_CNT] =
4038         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4039                          CNTR_SYNTH),
4040 [C_DC_PRF_GOOD_LTP_CNT] =
4041         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4042 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4043         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4044                          CNTR_SYNTH),
4045 [C_DC_PRF_RX_FLIT_CNT] =
4046         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4047 [C_DC_PRF_TX_FLIT_CNT] =
4048         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4049 [C_DC_PRF_CLK_CNTR] =
4050         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4051 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4052         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4053 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4054         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4055                          CNTR_SYNTH),
4056 [C_DC_PG_STS_TX_SBE_CNT] =
4057         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4058 [C_DC_PG_STS_TX_MBE_CNT] =
4059         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4060                          CNTR_SYNTH),
4061 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4062                             access_sw_cpu_intr),
4063 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4064                             access_sw_cpu_rcv_limit),
4065 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4066                             access_sw_vtx_wait),
4067 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4068                             access_sw_pio_wait),
4069 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4070                             access_sw_kmem_wait),
4071 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4072                             access_sw_send_schedule),
4073 /* MISC_ERR_STATUS */
4074 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4075                                 CNTR_NORMAL,
4076                                 access_misc_pll_lock_fail_err_cnt),
4077 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4078                                 CNTR_NORMAL,
4079                                 access_misc_mbist_fail_err_cnt),
4080 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4081                                 CNTR_NORMAL,
4082                                 access_misc_invalid_eep_cmd_err_cnt),
4083 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4084                                 CNTR_NORMAL,
4085                                 access_misc_efuse_done_parity_err_cnt),
4086 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4087                                 CNTR_NORMAL,
4088                                 access_misc_efuse_write_err_cnt),
4089 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4090                                 0, CNTR_NORMAL,
4091                                 access_misc_efuse_read_bad_addr_err_cnt),
4092 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4093                                 CNTR_NORMAL,
4094                                 access_misc_efuse_csr_parity_err_cnt),
4095 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4096                                 CNTR_NORMAL,
4097                                 access_misc_fw_auth_failed_err_cnt),
4098 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4099                                 CNTR_NORMAL,
4100                                 access_misc_key_mismatch_err_cnt),
4101 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4102                                 CNTR_NORMAL,
4103                                 access_misc_sbus_write_failed_err_cnt),
4104 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4105                                 CNTR_NORMAL,
4106                                 access_misc_csr_write_bad_addr_err_cnt),
4107 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4108                                 CNTR_NORMAL,
4109                                 access_misc_csr_read_bad_addr_err_cnt),
4110 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4111                                 CNTR_NORMAL,
4112                                 access_misc_csr_parity_err_cnt),
4113 /* CceErrStatus */
4114 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4115                                 CNTR_NORMAL,
4116                                 access_sw_cce_err_status_aggregated_cnt),
4117 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4118                                 CNTR_NORMAL,
4119                                 access_cce_msix_csr_parity_err_cnt),
4120 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4121                                 CNTR_NORMAL,
4122                                 access_cce_int_map_unc_err_cnt),
4123 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4124                                 CNTR_NORMAL,
4125                                 access_cce_int_map_cor_err_cnt),
4126 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4127                                 CNTR_NORMAL,
4128                                 access_cce_msix_table_unc_err_cnt),
4129 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4130                                 CNTR_NORMAL,
4131                                 access_cce_msix_table_cor_err_cnt),
4132 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4133                                 0, CNTR_NORMAL,
4134                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4135 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4136                                 0, CNTR_NORMAL,
4137                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4138 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4139                                 CNTR_NORMAL,
4140                                 access_cce_seg_write_bad_addr_err_cnt),
4141 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4142                                 CNTR_NORMAL,
4143                                 access_cce_seg_read_bad_addr_err_cnt),
4144 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4145                                 CNTR_NORMAL,
4146                                 access_la_triggered_cnt),
4147 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4148                                 CNTR_NORMAL,
4149                                 access_cce_trgt_cpl_timeout_err_cnt),
4150 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4151                                 CNTR_NORMAL,
4152                                 access_pcic_receive_parity_err_cnt),
4153 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4154                                 CNTR_NORMAL,
4155                                 access_pcic_transmit_back_parity_err_cnt),
4156 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4157                                 0, CNTR_NORMAL,
4158                                 access_pcic_transmit_front_parity_err_cnt),
4159 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4160                                 CNTR_NORMAL,
4161                                 access_pcic_cpl_dat_q_unc_err_cnt),
4162 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4163                                 CNTR_NORMAL,
4164                                 access_pcic_cpl_hd_q_unc_err_cnt),
4165 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4166                                 CNTR_NORMAL,
4167                                 access_pcic_post_dat_q_unc_err_cnt),
4168 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4169                                 CNTR_NORMAL,
4170                                 access_pcic_post_hd_q_unc_err_cnt),
4171 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4172                                 CNTR_NORMAL,
4173                                 access_pcic_retry_sot_mem_unc_err_cnt),
4174 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4175                                 CNTR_NORMAL,
4176                                 access_pcic_retry_mem_unc_err),
4177 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4178                                 CNTR_NORMAL,
4179                                 access_pcic_n_post_dat_q_parity_err_cnt),
4180 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4181                                 CNTR_NORMAL,
4182                                 access_pcic_n_post_h_q_parity_err_cnt),
4183 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4184                                 CNTR_NORMAL,
4185                                 access_pcic_cpl_dat_q_cor_err_cnt),
4186 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4187                                 CNTR_NORMAL,
4188                                 access_pcic_cpl_hd_q_cor_err_cnt),
4189 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4190                                 CNTR_NORMAL,
4191                                 access_pcic_post_dat_q_cor_err_cnt),
4192 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4193                                 CNTR_NORMAL,
4194                                 access_pcic_post_hd_q_cor_err_cnt),
4195 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4196                                 CNTR_NORMAL,
4197                                 access_pcic_retry_sot_mem_cor_err_cnt),
4198 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4199                                 CNTR_NORMAL,
4200                                 access_pcic_retry_mem_cor_err_cnt),
4201 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4202                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4203                                 CNTR_NORMAL,
4204                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4205 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4206                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4207                                 CNTR_NORMAL,
4208                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4209                                 ),
4210 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4211                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4212                         CNTR_NORMAL,
4213                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4214 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4215                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4216                         CNTR_NORMAL,
4217                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4218 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4219                         0, CNTR_NORMAL,
4220                         access_cce_cli2_async_fifo_parity_err_cnt),
4221 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4222                         CNTR_NORMAL,
4223                         access_cce_csr_cfg_bus_parity_err_cnt),
4224 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4225                         0, CNTR_NORMAL,
4226                         access_cce_cli0_async_fifo_parity_err_cnt),
4227 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4228                         CNTR_NORMAL,
4229                         access_cce_rspd_data_parity_err_cnt),
4230 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4231                         CNTR_NORMAL,
4232                         access_cce_trgt_access_err_cnt),
4233 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4234                         0, CNTR_NORMAL,
4235                         access_cce_trgt_async_fifo_parity_err_cnt),
4236 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4237                         CNTR_NORMAL,
4238                         access_cce_csr_write_bad_addr_err_cnt),
4239 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4240                         CNTR_NORMAL,
4241                         access_cce_csr_read_bad_addr_err_cnt),
4242 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4243                         CNTR_NORMAL,
4244                         access_ccs_csr_parity_err_cnt),
4245
4246 /* RcvErrStatus */
4247 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4248                         CNTR_NORMAL,
4249                         access_rx_csr_parity_err_cnt),
4250 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4251                         CNTR_NORMAL,
4252                         access_rx_csr_write_bad_addr_err_cnt),
4253 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4254                         CNTR_NORMAL,
4255                         access_rx_csr_read_bad_addr_err_cnt),
4256 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4257                         CNTR_NORMAL,
4258                         access_rx_dma_csr_unc_err_cnt),
4259 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4260                         CNTR_NORMAL,
4261                         access_rx_dma_dq_fsm_encoding_err_cnt),
4262 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4263                         CNTR_NORMAL,
4264                         access_rx_dma_eq_fsm_encoding_err_cnt),
4265 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4266                         CNTR_NORMAL,
4267                         access_rx_dma_csr_parity_err_cnt),
4268 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4269                         CNTR_NORMAL,
4270                         access_rx_rbuf_data_cor_err_cnt),
4271 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4272                         CNTR_NORMAL,
4273                         access_rx_rbuf_data_unc_err_cnt),
4274 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4275                         CNTR_NORMAL,
4276                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4277 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4278                         CNTR_NORMAL,
4279                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4280 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4281                         CNTR_NORMAL,
4282                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4283 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4284                         CNTR_NORMAL,
4285                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4286 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4287                         CNTR_NORMAL,
4288                         access_rx_rbuf_desc_part2_cor_err_cnt),
4289 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4290                         CNTR_NORMAL,
4291                         access_rx_rbuf_desc_part2_unc_err_cnt),
4292 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4293                         CNTR_NORMAL,
4294                         access_rx_rbuf_desc_part1_cor_err_cnt),
4295 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4296                         CNTR_NORMAL,
4297                         access_rx_rbuf_desc_part1_unc_err_cnt),
4298 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4299                         CNTR_NORMAL,
4300                         access_rx_hq_intr_fsm_err_cnt),
4301 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4302                         CNTR_NORMAL,
4303                         access_rx_hq_intr_csr_parity_err_cnt),
4304 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4305                         CNTR_NORMAL,
4306                         access_rx_lookup_csr_parity_err_cnt),
4307 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4308                         CNTR_NORMAL,
4309                         access_rx_lookup_rcv_array_cor_err_cnt),
4310 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4311                         CNTR_NORMAL,
4312                         access_rx_lookup_rcv_array_unc_err_cnt),
4313 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4314                         0, CNTR_NORMAL,
4315                         access_rx_lookup_des_part2_parity_err_cnt),
4316 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4317                         0, CNTR_NORMAL,
4318                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4319 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4320                         CNTR_NORMAL,
4321                         access_rx_lookup_des_part1_unc_err_cnt),
4322 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4323                         CNTR_NORMAL,
4324                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4325 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4326                         CNTR_NORMAL,
4327                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4328 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4329                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4330                         CNTR_NORMAL,
4331                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4332 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4333                         0, CNTR_NORMAL,
4334                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4335 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4336                         0, CNTR_NORMAL,
4337                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4338 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4339                         CNTR_NORMAL,
4340                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4341 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4342                         CNTR_NORMAL,
4343                         access_rx_rbuf_empty_err_cnt),
4344 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4345                         CNTR_NORMAL,
4346                         access_rx_rbuf_full_err_cnt),
4347 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4348                         CNTR_NORMAL,
4349                         access_rbuf_bad_lookup_err_cnt),
4350 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4351                         CNTR_NORMAL,
4352                         access_rbuf_ctx_id_parity_err_cnt),
4353 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4354                         CNTR_NORMAL,
4355                         access_rbuf_csr_qeopdw_parity_err_cnt),
4356 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4357                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4358                         CNTR_NORMAL,
4359                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4360 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4361                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4362                         CNTR_NORMAL,
4363                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4364 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4365                         0, CNTR_NORMAL,
4366                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4367 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4368                         0, CNTR_NORMAL,
4369                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4370 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4371                         0, 0, CNTR_NORMAL,
4372                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4373 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4374                         0, CNTR_NORMAL,
4375                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4376 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4377                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4378                         CNTR_NORMAL,
4379                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4380 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4381                         0, CNTR_NORMAL,
4382                         access_rx_rbuf_block_list_read_cor_err_cnt),
4383 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4384                         0, CNTR_NORMAL,
4385                         access_rx_rbuf_block_list_read_unc_err_cnt),
4386 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4387                         CNTR_NORMAL,
4388                         access_rx_rbuf_lookup_des_cor_err_cnt),
4389 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4390                         CNTR_NORMAL,
4391                         access_rx_rbuf_lookup_des_unc_err_cnt),
4392 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4393                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4394                         CNTR_NORMAL,
4395                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4396 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4397                         CNTR_NORMAL,
4398                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4399 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4400                         CNTR_NORMAL,
4401                         access_rx_rbuf_free_list_cor_err_cnt),
4402 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4403                         CNTR_NORMAL,
4404                         access_rx_rbuf_free_list_unc_err_cnt),
4405 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4406                         CNTR_NORMAL,
4407                         access_rx_rcv_fsm_encoding_err_cnt),
4408 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4409                         CNTR_NORMAL,
4410                         access_rx_dma_flag_cor_err_cnt),
4411 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4412                         CNTR_NORMAL,
4413                         access_rx_dma_flag_unc_err_cnt),
4414 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4415                         CNTR_NORMAL,
4416                         access_rx_dc_sop_eop_parity_err_cnt),
4417 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4418                         CNTR_NORMAL,
4419                         access_rx_rcv_csr_parity_err_cnt),
4420 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4421                         CNTR_NORMAL,
4422                         access_rx_rcv_qp_map_table_cor_err_cnt),
4423 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4424                         CNTR_NORMAL,
4425                         access_rx_rcv_qp_map_table_unc_err_cnt),
4426 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4427                         CNTR_NORMAL,
4428                         access_rx_rcv_data_cor_err_cnt),
4429 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4430                         CNTR_NORMAL,
4431                         access_rx_rcv_data_unc_err_cnt),
4432 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4433                         CNTR_NORMAL,
4434                         access_rx_rcv_hdr_cor_err_cnt),
4435 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4436                         CNTR_NORMAL,
4437                         access_rx_rcv_hdr_unc_err_cnt),
4438 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4439                         CNTR_NORMAL,
4440                         access_rx_dc_intf_parity_err_cnt),
4441 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4442                         CNTR_NORMAL,
4443                         access_rx_dma_csr_cor_err_cnt),
4444 /* SendPioErrStatus */
4445 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_pio_pec_sop_head_parity_err_cnt),
4448 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4449                         CNTR_NORMAL,
4450                         access_pio_pcc_sop_head_parity_err_cnt),
4451 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4452                         0, 0, CNTR_NORMAL,
4453                         access_pio_last_returned_cnt_parity_err_cnt),
4454 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4455                         0, CNTR_NORMAL,
4456                         access_pio_current_free_cnt_parity_err_cnt),
4457 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4458                         CNTR_NORMAL,
4459                         access_pio_reserved_31_err_cnt),
4460 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4461                         CNTR_NORMAL,
4462                         access_pio_reserved_30_err_cnt),
4463 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4464                         CNTR_NORMAL,
4465                         access_pio_ppmc_sop_len_err_cnt),
4466 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4467                         CNTR_NORMAL,
4468                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4469 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4470                         CNTR_NORMAL,
4471                         access_pio_vl_fifo_parity_err_cnt),
4472 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4473                         CNTR_NORMAL,
4474                         access_pio_vlf_sop_parity_err_cnt),
4475 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4476                         CNTR_NORMAL,
4477                         access_pio_vlf_v1_len_parity_err_cnt),
4478 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4479                         CNTR_NORMAL,
4480                         access_pio_block_qw_count_parity_err_cnt),
4481 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4482                         CNTR_NORMAL,
4483                         access_pio_write_qw_valid_parity_err_cnt),
4484 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4485                         CNTR_NORMAL,
4486                         access_pio_state_machine_err_cnt),
4487 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4488                         CNTR_NORMAL,
4489                         access_pio_write_data_parity_err_cnt),
4490 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4491                         CNTR_NORMAL,
4492                         access_pio_host_addr_mem_cor_err_cnt),
4493 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4494                         CNTR_NORMAL,
4495                         access_pio_host_addr_mem_unc_err_cnt),
4496 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4497                         CNTR_NORMAL,
4498                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4499 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4500                         CNTR_NORMAL,
4501                         access_pio_init_sm_in_err_cnt),
4502 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4503                         CNTR_NORMAL,
4504                         access_pio_ppmc_pbl_fifo_err_cnt),
4505 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4506                         0, CNTR_NORMAL,
4507                         access_pio_credit_ret_fifo_parity_err_cnt),
4508 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4509                         CNTR_NORMAL,
4510                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4511 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4512                         CNTR_NORMAL,
4513                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4514 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4515                         CNTR_NORMAL,
4516                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4517 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4518                         CNTR_NORMAL,
4519                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4520 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4521                         CNTR_NORMAL,
4522                         access_pio_sm_pkt_reset_parity_err_cnt),
4523 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4524                         CNTR_NORMAL,
4525                         access_pio_pkt_evict_fifo_parity_err_cnt),
4526 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4527                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4528                         CNTR_NORMAL,
4529                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4530 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4531                         CNTR_NORMAL,
4532                         access_pio_sbrdctl_crrel_parity_err_cnt),
4533 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4534                         CNTR_NORMAL,
4535                         access_pio_pec_fifo_parity_err_cnt),
4536 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4537                         CNTR_NORMAL,
4538                         access_pio_pcc_fifo_parity_err_cnt),
4539 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4540                         CNTR_NORMAL,
4541                         access_pio_sb_mem_fifo1_err_cnt),
4542 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4543                         CNTR_NORMAL,
4544                         access_pio_sb_mem_fifo0_err_cnt),
4545 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4546                         CNTR_NORMAL,
4547                         access_pio_csr_parity_err_cnt),
4548 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4549                         CNTR_NORMAL,
4550                         access_pio_write_addr_parity_err_cnt),
4551 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4552                         CNTR_NORMAL,
4553                         access_pio_write_bad_ctxt_err_cnt),
4554 /* SendDmaErrStatus */
4555 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4556                         0, CNTR_NORMAL,
4557                         access_sdma_pcie_req_tracking_cor_err_cnt),
4558 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4559                         0, CNTR_NORMAL,
4560                         access_sdma_pcie_req_tracking_unc_err_cnt),
4561 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4562                         CNTR_NORMAL,
4563                         access_sdma_csr_parity_err_cnt),
4564 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4565                         CNTR_NORMAL,
4566                         access_sdma_rpy_tag_err_cnt),
4567 /* SendEgressErrStatus */
4568 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4569                         CNTR_NORMAL,
4570                         access_tx_read_pio_memory_csr_unc_err_cnt),
4571 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4572                         0, CNTR_NORMAL,
4573                         access_tx_read_sdma_memory_csr_err_cnt),
4574 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4575                         CNTR_NORMAL,
4576                         access_tx_egress_fifo_cor_err_cnt),
4577 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4578                         CNTR_NORMAL,
4579                         access_tx_read_pio_memory_cor_err_cnt),
4580 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4581                         CNTR_NORMAL,
4582                         access_tx_read_sdma_memory_cor_err_cnt),
4583 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4584                         CNTR_NORMAL,
4585                         access_tx_sb_hdr_cor_err_cnt),
4586 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4587                         CNTR_NORMAL,
4588                         access_tx_credit_overrun_err_cnt),
4589 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4590                         CNTR_NORMAL,
4591                         access_tx_launch_fifo8_cor_err_cnt),
4592 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4593                         CNTR_NORMAL,
4594                         access_tx_launch_fifo7_cor_err_cnt),
4595 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4596                         CNTR_NORMAL,
4597                         access_tx_launch_fifo6_cor_err_cnt),
4598 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4599                         CNTR_NORMAL,
4600                         access_tx_launch_fifo5_cor_err_cnt),
4601 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4602                         CNTR_NORMAL,
4603                         access_tx_launch_fifo4_cor_err_cnt),
4604 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4605                         CNTR_NORMAL,
4606                         access_tx_launch_fifo3_cor_err_cnt),
4607 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4608                         CNTR_NORMAL,
4609                         access_tx_launch_fifo2_cor_err_cnt),
4610 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4611                         CNTR_NORMAL,
4612                         access_tx_launch_fifo1_cor_err_cnt),
4613 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4614                         CNTR_NORMAL,
4615                         access_tx_launch_fifo0_cor_err_cnt),
4616 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4617                         CNTR_NORMAL,
4618                         access_tx_credit_return_vl_err_cnt),
4619 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4620                         CNTR_NORMAL,
4621                         access_tx_hcrc_insertion_err_cnt),
4622 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4623                         CNTR_NORMAL,
4624                         access_tx_egress_fifo_unc_err_cnt),
4625 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4626                         CNTR_NORMAL,
4627                         access_tx_read_pio_memory_unc_err_cnt),
4628 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4629                         CNTR_NORMAL,
4630                         access_tx_read_sdma_memory_unc_err_cnt),
4631 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4632                         CNTR_NORMAL,
4633                         access_tx_sb_hdr_unc_err_cnt),
4634 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4635                         CNTR_NORMAL,
4636                         access_tx_credit_return_partiy_err_cnt),
4637 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4638                         0, 0, CNTR_NORMAL,
4639                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4640 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4641                         0, 0, CNTR_NORMAL,
4642                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4643 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4644                         0, 0, CNTR_NORMAL,
4645                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4646 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4647                         0, 0, CNTR_NORMAL,
4648                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4649 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4650                         0, 0, CNTR_NORMAL,
4651                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4652 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4653                         0, 0, CNTR_NORMAL,
4654                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4655 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4656                         0, 0, CNTR_NORMAL,
4657                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4658 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4659                         0, 0, CNTR_NORMAL,
4660                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4661 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4662                         0, 0, CNTR_NORMAL,
4663                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4664 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4665                         0, 0, CNTR_NORMAL,
4666                         access_tx_sdma15_disallowed_packet_err_cnt),
4667 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4668                         0, 0, CNTR_NORMAL,
4669                         access_tx_sdma14_disallowed_packet_err_cnt),
4670 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4671                         0, 0, CNTR_NORMAL,
4672                         access_tx_sdma13_disallowed_packet_err_cnt),
4673 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4674                         0, 0, CNTR_NORMAL,
4675                         access_tx_sdma12_disallowed_packet_err_cnt),
4676 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4677                         0, 0, CNTR_NORMAL,
4678                         access_tx_sdma11_disallowed_packet_err_cnt),
4679 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4680                         0, 0, CNTR_NORMAL,
4681                         access_tx_sdma10_disallowed_packet_err_cnt),
4682 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4683                         0, 0, CNTR_NORMAL,
4684                         access_tx_sdma9_disallowed_packet_err_cnt),
4685 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4686                         0, 0, CNTR_NORMAL,
4687                         access_tx_sdma8_disallowed_packet_err_cnt),
4688 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4689                         0, 0, CNTR_NORMAL,
4690                         access_tx_sdma7_disallowed_packet_err_cnt),
4691 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4692                         0, 0, CNTR_NORMAL,
4693                         access_tx_sdma6_disallowed_packet_err_cnt),
4694 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4695                         0, 0, CNTR_NORMAL,
4696                         access_tx_sdma5_disallowed_packet_err_cnt),
4697 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4698                         0, 0, CNTR_NORMAL,
4699                         access_tx_sdma4_disallowed_packet_err_cnt),
4700 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4701                         0, 0, CNTR_NORMAL,
4702                         access_tx_sdma3_disallowed_packet_err_cnt),
4703 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4704                         0, 0, CNTR_NORMAL,
4705                         access_tx_sdma2_disallowed_packet_err_cnt),
4706 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4707                         0, 0, CNTR_NORMAL,
4708                         access_tx_sdma1_disallowed_packet_err_cnt),
4709 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4710                         0, 0, CNTR_NORMAL,
4711                         access_tx_sdma0_disallowed_packet_err_cnt),
4712 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4713                         CNTR_NORMAL,
4714                         access_tx_config_parity_err_cnt),
4715 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4716                         CNTR_NORMAL,
4717                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4718 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4719                         CNTR_NORMAL,
4720                         access_tx_launch_csr_parity_err_cnt),
4721 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4722                         CNTR_NORMAL,
4723                         access_tx_illegal_vl_err_cnt),
4724 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4725                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4726                         CNTR_NORMAL,
4727                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4728 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4729                         CNTR_NORMAL,
4730                         access_egress_reserved_10_err_cnt),
4731 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4732                         CNTR_NORMAL,
4733                         access_egress_reserved_9_err_cnt),
4734 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4735                         0, 0, CNTR_NORMAL,
4736                         access_tx_sdma_launch_intf_parity_err_cnt),
4737 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4738                         CNTR_NORMAL,
4739                         access_tx_pio_launch_intf_parity_err_cnt),
4740 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4741                         CNTR_NORMAL,
4742                         access_egress_reserved_6_err_cnt),
4743 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4744                         CNTR_NORMAL,
4745                         access_tx_incorrect_link_state_err_cnt),
4746 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4747                         CNTR_NORMAL,
4748                         access_tx_linkdown_err_cnt),
4749 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4750                         "EgressFifoUnderrunOrParityErr", 0, 0,
4751                         CNTR_NORMAL,
4752                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4753 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4754                         CNTR_NORMAL,
4755                         access_egress_reserved_2_err_cnt),
4756 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4757                         CNTR_NORMAL,
4758                         access_tx_pkt_integrity_mem_unc_err_cnt),
4759 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4760                         CNTR_NORMAL,
4761                         access_tx_pkt_integrity_mem_cor_err_cnt),
4762 /* SendErrStatus */
4763 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4764                         CNTR_NORMAL,
4765                         access_send_csr_write_bad_addr_err_cnt),
4766 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4767                         CNTR_NORMAL,
4768                         access_send_csr_read_bad_addr_err_cnt),
4769 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4770                         CNTR_NORMAL,
4771                         access_send_csr_parity_cnt),
4772 /* SendCtxtErrStatus */
4773 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4774                         CNTR_NORMAL,
4775                         access_pio_write_out_of_bounds_err_cnt),
4776 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4777                         CNTR_NORMAL,
4778                         access_pio_write_overflow_err_cnt),
4779 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4780                         0, 0, CNTR_NORMAL,
4781                         access_pio_write_crosses_boundary_err_cnt),
4782 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4783                         CNTR_NORMAL,
4784                         access_pio_disallowed_packet_err_cnt),
4785 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4786                         CNTR_NORMAL,
4787                         access_pio_inconsistent_sop_err_cnt),
4788 /* SendDmaEngErrStatus */
4789 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4790                         0, 0, CNTR_NORMAL,
4791                         access_sdma_header_request_fifo_cor_err_cnt),
4792 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_sdma_header_storage_cor_err_cnt),
4795 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_sdma_packet_tracking_cor_err_cnt),
4798 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_sdma_assembly_cor_err_cnt),
4801 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_sdma_desc_table_cor_err_cnt),
4804 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4805                         0, 0, CNTR_NORMAL,
4806                         access_sdma_header_request_fifo_unc_err_cnt),
4807 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_sdma_header_storage_unc_err_cnt),
4810 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_sdma_packet_tracking_unc_err_cnt),
4813 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_sdma_assembly_unc_err_cnt),
4816 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_sdma_desc_table_unc_err_cnt),
4819 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_sdma_timeout_err_cnt),
4822 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_sdma_header_length_err_cnt),
4825 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_sdma_header_address_err_cnt),
4828 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_sdma_header_select_err_cnt),
4831 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_sdma_reserved_9_err_cnt),
4834 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_sdma_packet_desc_overflow_err_cnt),
4837 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_sdma_length_mismatch_err_cnt),
4840 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_sdma_halt_err_cnt),
4843 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4844                         CNTR_NORMAL,
4845                         access_sdma_mem_read_err_cnt),
4846 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4847                         CNTR_NORMAL,
4848                         access_sdma_first_desc_err_cnt),
4849 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4850                         CNTR_NORMAL,
4851                         access_sdma_tail_out_of_bounds_err_cnt),
4852 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4853                         CNTR_NORMAL,
4854                         access_sdma_too_long_err_cnt),
4855 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4856                         CNTR_NORMAL,
4857                         access_sdma_gen_mismatch_err_cnt),
4858 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4859                         CNTR_NORMAL,
4860                         access_sdma_wrong_dw_err_cnt),
4861 };
4862
4863 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4864 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4865                         CNTR_NORMAL),
4866 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4867                         CNTR_NORMAL),
4868 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4869                         CNTR_NORMAL),
4870 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4871                         CNTR_NORMAL),
4872 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4873                         CNTR_NORMAL),
4874 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4875                         CNTR_NORMAL),
4876 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4877                         CNTR_NORMAL),
4878 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4879 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4880 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4881 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4882                         CNTR_SYNTH | CNTR_VL),
4883 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4884                         CNTR_SYNTH | CNTR_VL),
4885 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4886                         CNTR_SYNTH | CNTR_VL),
4887 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4888 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4889 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4890                         access_sw_link_dn_cnt),
4891 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4892                         access_sw_link_up_cnt),
4893 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4894                                  access_sw_unknown_frame_cnt),
4895 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4896                         access_sw_xmit_discards),
4897 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4898                         CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4899                         access_sw_xmit_discards),
4900 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4901                         access_xmit_constraint_errs),
4902 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4903                         access_rcv_constraint_errs),
4904 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4905 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4906 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4907 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4908 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4909 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4910 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4911 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4912 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4913 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4914 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4915 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4916 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4917                                access_sw_cpu_rc_acks),
4918 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4919                                access_sw_cpu_rc_qacks),
4920 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4921                                access_sw_cpu_rc_delayed_comp),
4922 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4923 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4924 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4925 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4926 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4927 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4928 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4929 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4930 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4931 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4932 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4933 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4934 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
4935 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
4936 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
4937 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
4938 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
4939 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
4940 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
4941 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
4942 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
4943 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
4944 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
4945 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
4946 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
4947 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
4948 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
4949 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
4950 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
4951 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
4952 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
4953 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
4954 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
4955 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
4956 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
4957 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
4958 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
4959 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
4960 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
4961 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
4962 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
4963 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
4964 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
4965 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
4966 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
4967 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
4968 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
4969 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
4970 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
4971 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
4972 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
4973 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
4974 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
4975 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
4976 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
4977 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
4978 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
4979 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
4980 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
4981 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
4982 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
4983 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
4984 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
4985 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
4986 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
4987 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
4988 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
4989 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
4990 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
4991 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
4992 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
4993 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
4994 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
4995 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
4996 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
4997 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
4998 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
4999 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5000 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5001 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5002 };
5003
5004 /* ======================================================================== */
5005
5006 /* return true if this is chip revision revision a */
5007 int is_ax(struct hfi1_devdata *dd)
5008 {
5009         u8 chip_rev_minor =
5010                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5011                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5012         return (chip_rev_minor & 0xf0) == 0;
5013 }
5014
5015 /* return true if this is chip revision revision b */
5016 int is_bx(struct hfi1_devdata *dd)
5017 {
5018         u8 chip_rev_minor =
5019                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5020                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5021         return (chip_rev_minor & 0xF0) == 0x10;
5022 }
5023
5024 /*
5025  * Append string s to buffer buf.  Arguments curp and len are the current
5026  * position and remaining length, respectively.
5027  *
5028  * return 0 on success, 1 on out of room
5029  */
5030 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5031 {
5032         char *p = *curp;
5033         int len = *lenp;
5034         int result = 0; /* success */
5035         char c;
5036
5037         /* add a comma, if first in the buffer */
5038         if (p != buf) {
5039                 if (len == 0) {
5040                         result = 1; /* out of room */
5041                         goto done;
5042                 }
5043                 *p++ = ',';
5044                 len--;
5045         }
5046
5047         /* copy the string */
5048         while ((c = *s++) != 0) {
5049                 if (len == 0) {
5050                         result = 1; /* out of room */
5051                         goto done;
5052                 }
5053                 *p++ = c;
5054                 len--;
5055         }
5056
5057 done:
5058         /* write return values */
5059         *curp = p;
5060         *lenp = len;
5061
5062         return result;
5063 }
5064
5065 /*
5066  * Using the given flag table, print a comma separated string into
5067  * the buffer.  End in '*' if the buffer is too short.
5068  */
5069 static char *flag_string(char *buf, int buf_len, u64 flags,
5070                                 struct flag_table *table, int table_size)
5071 {
5072         char extra[32];
5073         char *p = buf;
5074         int len = buf_len;
5075         int no_room = 0;
5076         int i;
5077
5078         /* make sure there is at least 2 so we can form "*" */
5079         if (len < 2)
5080                 return "";
5081
5082         len--;  /* leave room for a nul */
5083         for (i = 0; i < table_size; i++) {
5084                 if (flags & table[i].flag) {
5085                         no_room = append_str(buf, &p, &len, table[i].str);
5086                         if (no_room)
5087                                 break;
5088                         flags &= ~table[i].flag;
5089                 }
5090         }
5091
5092         /* any undocumented bits left? */
5093         if (!no_room && flags) {
5094                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5095                 no_room = append_str(buf, &p, &len, extra);
5096         }
5097
5098         /* add * if ran out of room */
5099         if (no_room) {
5100                 /* may need to back up to add space for a '*' */
5101                 if (len == 0)
5102                         --p;
5103                 *p++ = '*';
5104         }
5105
5106         /* add final nul - space already allocated above */
5107         *p = 0;
5108         return buf;
5109 }
5110
5111 /* first 8 CCE error interrupt source names */
5112 static const char * const cce_misc_names[] = {
5113         "CceErrInt",            /* 0 */
5114         "RxeErrInt",            /* 1 */
5115         "MiscErrInt",           /* 2 */
5116         "Reserved3",            /* 3 */
5117         "PioErrInt",            /* 4 */
5118         "SDmaErrInt",           /* 5 */
5119         "EgressErrInt",         /* 6 */
5120         "TxeErrInt"             /* 7 */
5121 };
5122
5123 /*
5124  * Return the miscellaneous error interrupt name.
5125  */
5126 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5127 {
5128         if (source < ARRAY_SIZE(cce_misc_names))
5129                 strncpy(buf, cce_misc_names[source], bsize);
5130         else
5131                 snprintf(buf,
5132                         bsize,
5133                         "Reserved%u",
5134                         source + IS_GENERAL_ERR_START);
5135
5136         return buf;
5137 }
5138
5139 /*
5140  * Return the SDMA engine error interrupt name.
5141  */
5142 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5143 {
5144         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5145         return buf;
5146 }
5147
5148 /*
5149  * Return the send context error interrupt name.
5150  */
5151 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5152 {
5153         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5154         return buf;
5155 }
5156
5157 static const char * const various_names[] = {
5158         "PbcInt",
5159         "GpioAssertInt",
5160         "Qsfp1Int",
5161         "Qsfp2Int",
5162         "TCritInt"
5163 };
5164
5165 /*
5166  * Return the various interrupt name.
5167  */
5168 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5169 {
5170         if (source < ARRAY_SIZE(various_names))
5171                 strncpy(buf, various_names[source], bsize);
5172         else
5173                 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5174         return buf;
5175 }
5176
5177 /*
5178  * Return the DC interrupt name.
5179  */
5180 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5181 {
5182         static const char * const dc_int_names[] = {
5183                 "common",
5184                 "lcb",
5185                 "8051",
5186                 "lbm"   /* local block merge */
5187         };
5188
5189         if (source < ARRAY_SIZE(dc_int_names))
5190                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5191         else
5192                 snprintf(buf, bsize, "DCInt%u", source);
5193         return buf;
5194 }
5195
5196 static const char * const sdma_int_names[] = {
5197         "SDmaInt",
5198         "SdmaIdleInt",
5199         "SdmaProgressInt",
5200 };
5201
5202 /*
5203  * Return the SDMA engine interrupt name.
5204  */
5205 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5206 {
5207         /* what interrupt */
5208         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5209         /* which engine */
5210         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5211
5212         if (likely(what < 3))
5213                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5214         else
5215                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5216         return buf;
5217 }
5218
5219 /*
5220  * Return the receive available interrupt name.
5221  */
5222 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5223 {
5224         snprintf(buf, bsize, "RcvAvailInt%u", source);
5225         return buf;
5226 }
5227
5228 /*
5229  * Return the receive urgent interrupt name.
5230  */
5231 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5232 {
5233         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5234         return buf;
5235 }
5236
5237 /*
5238  * Return the send credit interrupt name.
5239  */
5240 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5241 {
5242         snprintf(buf, bsize, "SendCreditInt%u", source);
5243         return buf;
5244 }
5245
5246 /*
5247  * Return the reserved interrupt name.
5248  */
5249 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5250 {
5251         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5252         return buf;
5253 }
5254
5255 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5256 {
5257         return flag_string(buf, buf_len, flags,
5258                         cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5259 }
5260
5261 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5262 {
5263         return flag_string(buf, buf_len, flags,
5264                         rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5265 }
5266
5267 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5268 {
5269         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5270                         ARRAY_SIZE(misc_err_status_flags));
5271 }
5272
5273 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5274 {
5275         return flag_string(buf, buf_len, flags,
5276                         pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5277 }
5278
5279 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5280 {
5281         return flag_string(buf, buf_len, flags,
5282                         sdma_err_status_flags,
5283                         ARRAY_SIZE(sdma_err_status_flags));
5284 }
5285
5286 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5287 {
5288         return flag_string(buf, buf_len, flags,
5289                 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5290 }
5291
5292 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5293 {
5294         return flag_string(buf, buf_len, flags,
5295                 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5296 }
5297
5298 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5299 {
5300         return flag_string(buf, buf_len, flags,
5301                         send_err_status_flags,
5302                         ARRAY_SIZE(send_err_status_flags));
5303 }
5304
5305 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5306 {
5307         char buf[96];
5308         int i = 0;
5309
5310         /*
5311          * For most these errors, there is nothing that can be done except
5312          * report or record it.
5313          */
5314         dd_dev_info(dd, "CCE Error: %s\n",
5315                 cce_err_status_string(buf, sizeof(buf), reg));
5316
5317         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5318             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5319                 /* this error requires a manual drop into SPC freeze mode */
5320                 /* then a fix up */
5321                 start_freeze_handling(dd->pport, FREEZE_SELF);
5322         }
5323
5324         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5325                 if (reg & (1ull << i)) {
5326                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5327                         /* maintain a counter over all cce_err_status errors */
5328                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5329                 }
5330         }
5331 }
5332
5333 /*
5334  * Check counters for receive errors that do not have an interrupt
5335  * associated with them.
5336  */
5337 #define RCVERR_CHECK_TIME 10
5338 static void update_rcverr_timer(unsigned long opaque)
5339 {
5340         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5341         struct hfi1_pportdata *ppd = dd->pport;
5342         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5343
5344         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5345                 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5346                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5347                 set_link_down_reason(ppd,
5348                   OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5349                         OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5350                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5351         }
5352         dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5353
5354         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5355 }
5356
5357 static int init_rcverr(struct hfi1_devdata *dd)
5358 {
5359         setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5360         /* Assume the hardware counter has been reset */
5361         dd->rcv_ovfl_cnt = 0;
5362         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5363 }
5364
5365 static void free_rcverr(struct hfi1_devdata *dd)
5366 {
5367         if (dd->rcverr_timer.data)
5368                 del_timer_sync(&dd->rcverr_timer);
5369         dd->rcverr_timer.data = 0;
5370 }
5371
5372 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5373 {
5374         char buf[96];
5375         int i = 0;
5376
5377         dd_dev_info(dd, "Receive Error: %s\n",
5378                 rxe_err_status_string(buf, sizeof(buf), reg));
5379
5380         if (reg & ALL_RXE_FREEZE_ERR) {
5381                 int flags = 0;
5382
5383                 /*
5384                  * Freeze mode recovery is disabled for the errors
5385                  * in RXE_FREEZE_ABORT_MASK
5386                  */
5387                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5388                         flags = FREEZE_ABORT;
5389
5390                 start_freeze_handling(dd->pport, flags);
5391         }
5392
5393         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5394                 if (reg & (1ull << i))
5395                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5396         }
5397 }
5398
5399 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5400 {
5401         char buf[96];
5402         int i = 0;
5403
5404         dd_dev_info(dd, "Misc Error: %s",
5405                 misc_err_status_string(buf, sizeof(buf), reg));
5406         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5407                 if (reg & (1ull << i))
5408                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5409         }
5410 }
5411
5412 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5413 {
5414         char buf[96];
5415         int i = 0;
5416
5417         dd_dev_info(dd, "PIO Error: %s\n",
5418                 pio_err_status_string(buf, sizeof(buf), reg));
5419
5420         if (reg & ALL_PIO_FREEZE_ERR)
5421                 start_freeze_handling(dd->pport, 0);
5422
5423         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5424                 if (reg & (1ull << i))
5425                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5426         }
5427 }
5428
5429 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5430 {
5431         char buf[96];
5432         int i = 0;
5433
5434         dd_dev_info(dd, "SDMA Error: %s\n",
5435                 sdma_err_status_string(buf, sizeof(buf), reg));
5436
5437         if (reg & ALL_SDMA_FREEZE_ERR)
5438                 start_freeze_handling(dd->pport, 0);
5439
5440         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5441                 if (reg & (1ull << i))
5442                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5443         }
5444 }
5445
5446 static void count_port_inactive(struct hfi1_devdata *dd)
5447 {
5448         struct hfi1_pportdata *ppd = dd->pport;
5449
5450         if (ppd->port_xmit_discards < ~(u64)0)
5451                 ppd->port_xmit_discards++;
5452 }
5453
5454 /*
5455  * We have had a "disallowed packet" error during egress. Determine the
5456  * integrity check which failed, and update relevant error counter, etc.
5457  *
5458  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5459  * bit of state per integrity check, and so we can miss the reason for an
5460  * egress error if more than one packet fails the same integrity check
5461  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5462  */
5463 static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5464 {
5465         struct hfi1_pportdata *ppd = dd->pport;
5466         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5467         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5468         char buf[96];
5469
5470         /* clear down all observed info as quickly as possible after read */
5471         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5472
5473         dd_dev_info(dd,
5474                 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5475                 info, egress_err_info_string(buf, sizeof(buf), info), src);
5476
5477         /* Eventually add other counters for each bit */
5478
5479         if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5480                 if (ppd->port_xmit_discards < ~(u64)0)
5481                         ppd->port_xmit_discards++;
5482         }
5483 }
5484
5485 /*
5486  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5487  * register. Does it represent a 'port inactive' error?
5488  */
5489 static inline int port_inactive_err(u64 posn)
5490 {
5491         return (posn >= SEES(TX_LINKDOWN) &&
5492                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5493 }
5494
5495 /*
5496  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5497  * register. Does it represent a 'disallowed packet' error?
5498  */
5499 static inline int disallowed_pkt_err(u64 posn)
5500 {
5501         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5502                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5503 }
5504
5505 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5506 {
5507         u64 reg_copy = reg, handled = 0;
5508         char buf[96];
5509         int i = 0;
5510
5511         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5512                 start_freeze_handling(dd->pport, 0);
5513         if (is_ax(dd) && (reg &
5514                     SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5515                     && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5516                 start_freeze_handling(dd->pport, 0);
5517
5518         while (reg_copy) {
5519                 int posn = fls64(reg_copy);
5520                 /*
5521                  * fls64() returns a 1-based offset, but we generally
5522                  * want 0-based offsets.
5523                  */
5524                 int shift = posn - 1;
5525
5526                 if (port_inactive_err(shift)) {
5527                         count_port_inactive(dd);
5528                         handled |= (1ULL << shift);
5529                 } else if (disallowed_pkt_err(shift)) {
5530                         handle_send_egress_err_info(dd);
5531                         handled |= (1ULL << shift);
5532                 }
5533                 clear_bit(shift, (unsigned long *)&reg_copy);
5534         }
5535
5536         reg &= ~handled;
5537
5538         if (reg)
5539                 dd_dev_info(dd, "Egress Error: %s\n",
5540                         egress_err_status_string(buf, sizeof(buf), reg));
5541
5542         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5543                 if (reg & (1ull << i))
5544                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5545         }
5546 }
5547
5548 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5549 {
5550         char buf[96];
5551         int i = 0;
5552
5553         dd_dev_info(dd, "Send Error: %s\n",
5554                 send_err_status_string(buf, sizeof(buf), reg));
5555
5556         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5557                 if (reg & (1ull << i))
5558                         incr_cntr64(&dd->send_err_status_cnt[i]);
5559         }
5560 }
5561
5562 /*
5563  * The maximum number of times the error clear down will loop before
5564  * blocking a repeating error.  This value is arbitrary.
5565  */
5566 #define MAX_CLEAR_COUNT 20
5567
5568 /*
5569  * Clear and handle an error register.  All error interrupts are funneled
5570  * through here to have a central location to correctly handle single-
5571  * or multi-shot errors.
5572  *
5573  * For non per-context registers, call this routine with a context value
5574  * of 0 so the per-context offset is zero.
5575  *
5576  * If the handler loops too many times, assume that something is wrong
5577  * and can't be fixed, so mask the error bits.
5578  */
5579 static void interrupt_clear_down(struct hfi1_devdata *dd,
5580                                  u32 context,
5581                                  const struct err_reg_info *eri)
5582 {
5583         u64 reg;
5584         u32 count;
5585
5586         /* read in a loop until no more errors are seen */
5587         count = 0;
5588         while (1) {
5589                 reg = read_kctxt_csr(dd, context, eri->status);
5590                 if (reg == 0)
5591                         break;
5592                 write_kctxt_csr(dd, context, eri->clear, reg);
5593                 if (likely(eri->handler))
5594                         eri->handler(dd, context, reg);
5595                 count++;
5596                 if (count > MAX_CLEAR_COUNT) {
5597                         u64 mask;
5598
5599                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5600                                 eri->desc, reg);
5601                         /*
5602                          * Read-modify-write so any other masked bits
5603                          * remain masked.
5604                          */
5605                         mask = read_kctxt_csr(dd, context, eri->mask);
5606                         mask &= ~reg;
5607                         write_kctxt_csr(dd, context, eri->mask, mask);
5608                         break;
5609                 }
5610         }
5611 }
5612
5613 /*
5614  * CCE block "misc" interrupt.  Source is < 16.
5615  */
5616 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5617 {
5618         const struct err_reg_info *eri = &misc_errs[source];
5619
5620         if (eri->handler) {
5621                 interrupt_clear_down(dd, 0, eri);
5622         } else {
5623                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5624                         source);
5625         }
5626 }
5627
5628 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5629 {
5630         return flag_string(buf, buf_len, flags,
5631                         sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5632 }
5633
5634 /*
5635  * Send context error interrupt.  Source (hw_context) is < 160.
5636  *
5637  * All send context errors cause the send context to halt.  The normal
5638  * clear-down mechanism cannot be used because we cannot clear the
5639  * error bits until several other long-running items are done first.
5640  * This is OK because with the context halted, nothing else is going
5641  * to happen on it anyway.
5642  */
5643 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5644                                 unsigned int hw_context)
5645 {
5646         struct send_context_info *sci;
5647         struct send_context *sc;
5648         char flags[96];
5649         u64 status;
5650         u32 sw_index;
5651         int i = 0;
5652
5653         sw_index = dd->hw_to_sw[hw_context];
5654         if (sw_index >= dd->num_send_contexts) {
5655                 dd_dev_err(dd,
5656                         "out of range sw index %u for send context %u\n",
5657                         sw_index, hw_context);
5658                 return;
5659         }
5660         sci = &dd->send_contexts[sw_index];
5661         sc = sci->sc;
5662         if (!sc) {
5663                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5664                         sw_index, hw_context);
5665                 return;
5666         }
5667
5668         /* tell the software that a halt has begun */
5669         sc_stop(sc, SCF_HALTED);
5670
5671         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5672
5673         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5674                 send_context_err_status_string(flags, sizeof(flags), status));
5675
5676         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5677                 handle_send_egress_err_info(dd);
5678
5679         /*
5680          * Automatically restart halted kernel contexts out of interrupt
5681          * context.  User contexts must ask the driver to restart the context.
5682          */
5683         if (sc->type != SC_USER)
5684                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5685
5686         /*
5687          * Update the counters for the corresponding status bits.
5688          * Note that these particular counters are aggregated over all
5689          * 160 contexts.
5690          */
5691         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5692                 if (status & (1ull << i))
5693                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5694         }
5695 }
5696
5697 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5698                                 unsigned int source, u64 status)
5699 {
5700         struct sdma_engine *sde;
5701         int i = 0;
5702
5703         sde = &dd->per_sdma[source];
5704 #ifdef CONFIG_SDMA_VERBOSITY
5705         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5706                    slashstrip(__FILE__), __LINE__, __func__);
5707         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5708                    sde->this_idx, source, (unsigned long long)status);
5709 #endif
5710         sdma_engine_error(sde, status);
5711
5712         /*
5713         * Update the counters for the corresponding status bits.
5714         * Note that these particular counters are aggregated over
5715         * all 16 DMA engines.
5716         */
5717         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5718                 if (status & (1ull << i))
5719                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5720         }
5721 }
5722
5723 /*
5724  * CCE block SDMA error interrupt.  Source is < 16.
5725  */
5726 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5727 {
5728 #ifdef CONFIG_SDMA_VERBOSITY
5729         struct sdma_engine *sde = &dd->per_sdma[source];
5730
5731         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5732                    slashstrip(__FILE__), __LINE__, __func__);
5733         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5734                    source);
5735         sdma_dumpstate(sde);
5736 #endif
5737         interrupt_clear_down(dd, source, &sdma_eng_err);
5738 }
5739
5740 /*
5741  * CCE block "various" interrupt.  Source is < 8.
5742  */
5743 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5744 {
5745         const struct err_reg_info *eri = &various_err[source];
5746
5747         /*
5748          * TCritInt cannot go through interrupt_clear_down()
5749          * because it is not a second tier interrupt. The handler
5750          * should be called directly.
5751          */
5752         if (source == TCRIT_INT_SOURCE)
5753                 handle_temp_err(dd);
5754         else if (eri->handler)
5755                 interrupt_clear_down(dd, 0, eri);
5756         else
5757                 dd_dev_info(dd,
5758                         "%s: Unimplemented/reserved interrupt %d\n",
5759                         __func__, source);
5760 }
5761
5762 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5763 {
5764         /* source is always zero */
5765         struct hfi1_pportdata *ppd = dd->pport;
5766         unsigned long flags;
5767         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5768
5769         if (reg & QSFP_HFI0_MODPRST_N) {
5770
5771                 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5772                                 __func__);
5773
5774                 if (!qsfp_mod_present(ppd)) {
5775                         ppd->driver_link_ready = 0;
5776                         /*
5777                          * Cable removed, reset all our information about the
5778                          * cache and cable capabilities
5779                          */
5780
5781                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5782                         /*
5783                          * We don't set cache_refresh_required here as we expect
5784                          * an interrupt when a cable is inserted
5785                          */
5786                         ppd->qsfp_info.cache_valid = 0;
5787                         ppd->qsfp_info.qsfp_interrupt_functional = 0;
5788                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5789                                                 flags);
5790                         write_csr(dd,
5791                                         dd->hfi1_id ?
5792                                                 ASIC_QSFP2_INVERT :
5793                                                 ASIC_QSFP1_INVERT,
5794                                 qsfp_int_mgmt);
5795                         if (ppd->host_link_state == HLS_DN_POLL) {
5796                                 /*
5797                                  * The link is still in POLL. This means
5798                                  * that the normal link down processing
5799                                  * will not happen. We have to do it here
5800                                  * before turning the DC off.
5801                                  */
5802                                 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5803                         }
5804                 } else {
5805                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5806                         ppd->qsfp_info.cache_valid = 0;
5807                         ppd->qsfp_info.cache_refresh_required = 1;
5808                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5809                                                 flags);
5810
5811                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
5812                         write_csr(dd,
5813                                         dd->hfi1_id ?
5814                                                 ASIC_QSFP2_INVERT :
5815                                                 ASIC_QSFP1_INVERT,
5816                                 qsfp_int_mgmt);
5817                 }
5818         }
5819
5820         if (reg & QSFP_HFI0_INT_N) {
5821
5822                 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5823                                 __func__);
5824                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5825                 ppd->qsfp_info.check_interrupt_flags = 1;
5826                 ppd->qsfp_info.qsfp_interrupt_functional = 1;
5827                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5828         }
5829
5830         /* Schedule the QSFP work only if there is a cable attached. */
5831         if (qsfp_mod_present(ppd))
5832                 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5833 }
5834
5835 static int request_host_lcb_access(struct hfi1_devdata *dd)
5836 {
5837         int ret;
5838
5839         ret = do_8051_command(dd, HCMD_MISC,
5840                 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5841                 NULL);
5842         if (ret != HCMD_SUCCESS) {
5843                 dd_dev_err(dd, "%s: command failed with error %d\n",
5844                         __func__, ret);
5845         }
5846         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5847 }
5848
5849 static int request_8051_lcb_access(struct hfi1_devdata *dd)
5850 {
5851         int ret;
5852
5853         ret = do_8051_command(dd, HCMD_MISC,
5854                 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5855                 NULL);
5856         if (ret != HCMD_SUCCESS) {
5857                 dd_dev_err(dd, "%s: command failed with error %d\n",
5858                         __func__, ret);
5859         }
5860         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5861 }
5862
5863 /*
5864  * Set the LCB selector - allow host access.  The DCC selector always
5865  * points to the host.
5866  */
5867 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5868 {
5869         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5870                                 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5871                                 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5872 }
5873
5874 /*
5875  * Clear the LCB selector - allow 8051 access.  The DCC selector always
5876  * points to the host.
5877  */
5878 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5879 {
5880         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5881                                 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5882 }
5883
5884 /*
5885  * Acquire LCB access from the 8051.  If the host already has access,
5886  * just increment a counter.  Otherwise, inform the 8051 that the
5887  * host is taking access.
5888  *
5889  * Returns:
5890  *      0 on success
5891  *      -EBUSY if the 8051 has control and cannot be disturbed
5892  *      -errno if unable to acquire access from the 8051
5893  */
5894 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5895 {
5896         struct hfi1_pportdata *ppd = dd->pport;
5897         int ret = 0;
5898
5899         /*
5900          * Use the host link state lock so the operation of this routine
5901          * { link state check, selector change, count increment } can occur
5902          * as a unit against a link state change.  Otherwise there is a
5903          * race between the state change and the count increment.
5904          */
5905         if (sleep_ok) {
5906                 mutex_lock(&ppd->hls_lock);
5907         } else {
5908                 while (!mutex_trylock(&ppd->hls_lock))
5909                         udelay(1);
5910         }
5911
5912         /* this access is valid only when the link is up */
5913         if ((ppd->host_link_state & HLS_UP) == 0) {
5914                 dd_dev_info(dd, "%s: link state %s not up\n",
5915                         __func__, link_state_name(ppd->host_link_state));
5916                 ret = -EBUSY;
5917                 goto done;
5918         }
5919
5920         if (dd->lcb_access_count == 0) {
5921                 ret = request_host_lcb_access(dd);
5922                 if (ret) {
5923                         dd_dev_err(dd,
5924                                 "%s: unable to acquire LCB access, err %d\n",
5925                                 __func__, ret);
5926                         goto done;
5927                 }
5928                 set_host_lcb_access(dd);
5929         }
5930         dd->lcb_access_count++;
5931 done:
5932         mutex_unlock(&ppd->hls_lock);
5933         return ret;
5934 }
5935
5936 /*
5937  * Release LCB access by decrementing the use count.  If the count is moving
5938  * from 1 to 0, inform 8051 that it has control back.
5939  *
5940  * Returns:
5941  *      0 on success
5942  *      -errno if unable to release access to the 8051
5943  */
5944 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5945 {
5946         int ret = 0;
5947
5948         /*
5949          * Use the host link state lock because the acquire needed it.
5950          * Here, we only need to keep { selector change, count decrement }
5951          * as a unit.
5952          */
5953         if (sleep_ok) {
5954                 mutex_lock(&dd->pport->hls_lock);
5955         } else {
5956                 while (!mutex_trylock(&dd->pport->hls_lock))
5957                         udelay(1);
5958         }
5959
5960         if (dd->lcb_access_count == 0) {
5961                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
5962                         __func__);
5963                 goto done;
5964         }
5965
5966         if (dd->lcb_access_count == 1) {
5967                 set_8051_lcb_access(dd);
5968                 ret = request_8051_lcb_access(dd);
5969                 if (ret) {
5970                         dd_dev_err(dd,
5971                                 "%s: unable to release LCB access, err %d\n",
5972                                 __func__, ret);
5973                         /* restore host access if the grant didn't work */
5974                         set_host_lcb_access(dd);
5975                         goto done;
5976                 }
5977         }
5978         dd->lcb_access_count--;
5979 done:
5980         mutex_unlock(&dd->pport->hls_lock);
5981         return ret;
5982 }
5983
5984 /*
5985  * Initialize LCB access variables and state.  Called during driver load,
5986  * after most of the initialization is finished.
5987  *
5988  * The DC default is LCB access on for the host.  The driver defaults to
5989  * leaving access to the 8051.  Assign access now - this constrains the call
5990  * to this routine to be after all LCB set-up is done.  In particular, after
5991  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
5992  */
5993 static void init_lcb_access(struct hfi1_devdata *dd)
5994 {
5995         dd->lcb_access_count = 0;
5996 }
5997
5998 /*
5999  * Write a response back to a 8051 request.
6000  */
6001 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6002 {
6003         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6004                 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6005                 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6006                 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6007 }
6008
6009 /*
6010  * Handle requests from the 8051.
6011  */
6012 static void handle_8051_request(struct hfi1_devdata *dd)
6013 {
6014         u64 reg;
6015         u16 data;
6016         u8 type;
6017
6018         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6019         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6020                 return; /* no request */
6021
6022         /* zero out COMPLETED so the response is seen */
6023         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6024
6025         /* extract request details */
6026         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6027                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6028         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6029                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6030
6031         switch (type) {
6032         case HREQ_LOAD_CONFIG:
6033         case HREQ_SAVE_CONFIG:
6034         case HREQ_READ_CONFIG:
6035         case HREQ_SET_TX_EQ_ABS:
6036         case HREQ_SET_TX_EQ_REL:
6037         case HREQ_ENABLE:
6038                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6039                         type);
6040                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6041                 break;
6042
6043         case HREQ_CONFIG_DONE:
6044                 hreq_response(dd, HREQ_SUCCESS, 0);
6045                 break;
6046
6047         case HREQ_INTERFACE_TEST:
6048                 hreq_response(dd, HREQ_SUCCESS, data);
6049                 break;
6050
6051         default:
6052                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6053                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6054                 break;
6055         }
6056 }
6057
6058 static void write_global_credit(struct hfi1_devdata *dd,
6059                                 u8 vau, u16 total, u16 shared)
6060 {
6061         write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6062                 ((u64)total
6063                         << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6064                 | ((u64)shared
6065                         << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6066                 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6067 }
6068
6069 /*
6070  * Set up initial VL15 credits of the remote.  Assumes the rest of
6071  * the CM credit registers are zero from a previous global or credit reset .
6072  */
6073 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6074 {
6075         /* leave shared count at zero for both global and VL15 */
6076         write_global_credit(dd, vau, vl15buf, 0);
6077
6078         /* We may need some credits for another VL when sending packets
6079          * with the snoop interface. Dividing it down the middle for VL15
6080          * and VL0 should suffice.
6081          */
6082         if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6083                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6084                     << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6085                 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6086                     << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6087         } else {
6088                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6089                         << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6090         }
6091 }
6092
6093 /*
6094  * Zero all credit details from the previous connection and
6095  * reset the CM manager's internal counters.
6096  */
6097 void reset_link_credits(struct hfi1_devdata *dd)
6098 {
6099         int i;
6100
6101         /* remove all previous VL credit limits */
6102         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6103                 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6104         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6105         write_global_credit(dd, 0, 0, 0);
6106         /* reset the CM block */
6107         pio_send_control(dd, PSC_CM_RESET);
6108 }
6109
6110 /* convert a vCU to a CU */
6111 static u32 vcu_to_cu(u8 vcu)
6112 {
6113         return 1 << vcu;
6114 }
6115
6116 /* convert a CU to a vCU */
6117 static u8 cu_to_vcu(u32 cu)
6118 {
6119         return ilog2(cu);
6120 }
6121
6122 /* convert a vAU to an AU */
6123 static u32 vau_to_au(u8 vau)
6124 {
6125         return 8 * (1 << vau);
6126 }
6127
6128 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6129 {
6130         ppd->sm_trap_qp = 0x0;
6131         ppd->sa_qp = 0x1;
6132 }
6133
6134 /*
6135  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6136  */
6137 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6138 {
6139         u64 reg;
6140
6141         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6142         write_csr(dd, DC_LCB_CFG_RUN, 0);
6143         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6144         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6145                 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6146         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6147         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6148         reg = read_csr(dd, DCC_CFG_RESET);
6149         write_csr(dd, DCC_CFG_RESET,
6150                 reg
6151                 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6152                 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6153         (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6154         if (!abort) {
6155                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6156                 write_csr(dd, DCC_CFG_RESET, reg);
6157                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6158         }
6159 }
6160
6161 /*
6162  * This routine should be called after the link has been transitioned to
6163  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6164  * reset).
6165  *
6166  * The expectation is that the caller of this routine would have taken
6167  * care of properly transitioning the link into the correct state.
6168  */
6169 static void dc_shutdown(struct hfi1_devdata *dd)
6170 {
6171         unsigned long flags;
6172
6173         spin_lock_irqsave(&dd->dc8051_lock, flags);
6174         if (dd->dc_shutdown) {
6175                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6176                 return;
6177         }
6178         dd->dc_shutdown = 1;
6179         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6180         /* Shutdown the LCB */
6181         lcb_shutdown(dd, 1);
6182         /* Going to OFFLINE would have causes the 8051 to put the
6183          * SerDes into reset already. Just need to shut down the 8051,
6184          * itself. */
6185         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6186 }
6187
6188 /* Calling this after the DC has been brought out of reset should not
6189  * do any damage. */
6190 static void dc_start(struct hfi1_devdata *dd)
6191 {
6192         unsigned long flags;
6193         int ret;
6194
6195         spin_lock_irqsave(&dd->dc8051_lock, flags);
6196         if (!dd->dc_shutdown)
6197                 goto done;
6198         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6199         /* Take the 8051 out of reset */
6200         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6201         /* Wait until 8051 is ready */
6202         ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6203         if (ret) {
6204                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6205                         __func__);
6206         }
6207         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6208         write_csr(dd, DCC_CFG_RESET, 0x10);
6209         /* lcb_shutdown() with abort=1 does not restore these */
6210         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6211         spin_lock_irqsave(&dd->dc8051_lock, flags);
6212         dd->dc_shutdown = 0;
6213 done:
6214         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6215 }
6216
6217 /*
6218  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6219  */
6220 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6221 {
6222         u64 rx_radr, tx_radr;
6223         u32 version;
6224
6225         if (dd->icode != ICODE_FPGA_EMULATION)
6226                 return;
6227
6228         /*
6229          * These LCB defaults on emulator _s are good, nothing to do here:
6230          *      LCB_CFG_TX_FIFOS_RADR
6231          *      LCB_CFG_RX_FIFOS_RADR
6232          *      LCB_CFG_LN_DCLK
6233          *      LCB_CFG_IGNORE_LOST_RCLK
6234          */
6235         if (is_emulator_s(dd))
6236                 return;
6237         /* else this is _p */
6238
6239         version = emulator_rev(dd);
6240         if (!is_ax(dd))
6241                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6242
6243         if (version <= 0x12) {
6244                 /* release 0x12 and below */
6245
6246                 /*
6247                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6248                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6249                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6250                  */
6251                 rx_radr =
6252                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6253                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6254                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6255                 /*
6256                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6257                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6258                  */
6259                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6260         } else if (version <= 0x18) {
6261                 /* release 0x13 up to 0x18 */
6262                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6263                 rx_radr =
6264                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6265                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6266                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6267                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6268         } else if (version == 0x19) {
6269                 /* release 0x19 */
6270                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6271                 rx_radr =
6272                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6273                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6274                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6275                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6276         } else if (version == 0x1a) {
6277                 /* release 0x1a */
6278                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6279                 rx_radr =
6280                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6281                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6282                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6283                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6284                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6285         } else {
6286                 /* release 0x1b and higher */
6287                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6288                 rx_radr =
6289                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6290                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6291                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6292                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6293         }
6294
6295         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6296         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6297         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6298                 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6299         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6300 }
6301
6302 /*
6303  * Handle a SMA idle message
6304  *
6305  * This is a work-queue function outside of the interrupt.
6306  */
6307 void handle_sma_message(struct work_struct *work)
6308 {
6309         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6310                                                         sma_message_work);
6311         struct hfi1_devdata *dd = ppd->dd;
6312         u64 msg;
6313         int ret;
6314
6315         /* msg is bytes 1-4 of the 40-bit idle message - the command code
6316            is stripped off */
6317         ret = read_idle_sma(dd, &msg);
6318         if (ret)
6319                 return;
6320         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6321         /*
6322          * React to the SMA message.  Byte[1] (0 for us) is the command.
6323          */
6324         switch (msg & 0xff) {
6325         case SMA_IDLE_ARM:
6326                 /*
6327                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6328                  * State Transitions
6329                  *
6330                  * Only expected in INIT or ARMED, discard otherwise.
6331                  */
6332                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6333                         ppd->neighbor_normal = 1;
6334                 break;
6335         case SMA_IDLE_ACTIVE:
6336                 /*
6337                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6338                  * State Transitions
6339                  *
6340                  * Can activate the node.  Discard otherwise.
6341                  */
6342                 if (ppd->host_link_state == HLS_UP_ARMED
6343                                         && ppd->is_active_optimize_enabled) {
6344                         ppd->neighbor_normal = 1;
6345                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6346                         if (ret)
6347                                 dd_dev_err(
6348                                         dd,
6349                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6350                                         __func__);
6351                 }
6352                 break;
6353         default:
6354                 dd_dev_err(dd,
6355                         "%s: received unexpected SMA idle message 0x%llx\n",
6356                         __func__, msg);
6357                 break;
6358         }
6359 }
6360
6361 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6362 {
6363         u64 rcvctrl;
6364         unsigned long flags;
6365
6366         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6367         rcvctrl = read_csr(dd, RCV_CTRL);
6368         rcvctrl |= add;
6369         rcvctrl &= ~clear;
6370         write_csr(dd, RCV_CTRL, rcvctrl);
6371         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6372 }
6373
6374 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6375 {
6376         adjust_rcvctrl(dd, add, 0);
6377 }
6378
6379 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6380 {
6381         adjust_rcvctrl(dd, 0, clear);
6382 }
6383
6384 /*
6385  * Called from all interrupt handlers to start handling an SPC freeze.
6386  */
6387 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6388 {
6389         struct hfi1_devdata *dd = ppd->dd;
6390         struct send_context *sc;
6391         int i;
6392
6393         if (flags & FREEZE_SELF)
6394                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6395
6396         /* enter frozen mode */
6397         dd->flags |= HFI1_FROZEN;
6398
6399         /* notify all SDMA engines that they are going into a freeze */
6400         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6401
6402         /* do halt pre-handling on all enabled send contexts */
6403         for (i = 0; i < dd->num_send_contexts; i++) {
6404                 sc = dd->send_contexts[i].sc;
6405                 if (sc && (sc->flags & SCF_ENABLED))
6406                         sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6407         }
6408
6409         /* Send context are frozen. Notify user space */
6410         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6411
6412         if (flags & FREEZE_ABORT) {
6413                 dd_dev_err(dd,
6414                            "Aborted freeze recovery. Please REBOOT system\n");
6415                 return;
6416         }
6417         /* queue non-interrupt handler */
6418         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6419 }
6420
6421 /*
6422  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6423  * depending on the "freeze" parameter.
6424  *
6425  * No need to return an error if it times out, our only option
6426  * is to proceed anyway.
6427  */
6428 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6429 {
6430         unsigned long timeout;
6431         u64 reg;
6432
6433         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6434         while (1) {
6435                 reg = read_csr(dd, CCE_STATUS);
6436                 if (freeze) {
6437                         /* waiting until all indicators are set */
6438                         if ((reg & ALL_FROZE) == ALL_FROZE)
6439                                 return; /* all done */
6440                 } else {
6441                         /* waiting until all indicators are clear */
6442                         if ((reg & ALL_FROZE) == 0)
6443                                 return; /* all done */
6444                 }
6445
6446                 if (time_after(jiffies, timeout)) {
6447                         dd_dev_err(dd,
6448                                 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6449                                 freeze ? "" : "un",
6450                                 reg & ALL_FROZE,
6451                                 freeze ? ALL_FROZE : 0ull);
6452                         return;
6453                 }
6454                 usleep_range(80, 120);
6455         }
6456 }
6457
6458 /*
6459  * Do all freeze handling for the RXE block.
6460  */
6461 static void rxe_freeze(struct hfi1_devdata *dd)
6462 {
6463         int i;
6464
6465         /* disable port */
6466         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6467
6468         /* disable all receive contexts */
6469         for (i = 0; i < dd->num_rcv_contexts; i++)
6470                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6471 }
6472
6473 /*
6474  * Unfreeze handling for the RXE block - kernel contexts only.
6475  * This will also enable the port.  User contexts will do unfreeze
6476  * handling on a per-context basis as they call into the driver.
6477  *
6478  */
6479 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6480 {
6481         int i;
6482
6483         /* enable all kernel contexts */
6484         for (i = 0; i < dd->n_krcv_queues; i++)
6485                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6486
6487         /* enable port */
6488         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6489 }
6490
6491 /*
6492  * Non-interrupt SPC freeze handling.
6493  *
6494  * This is a work-queue function outside of the triggering interrupt.
6495  */
6496 void handle_freeze(struct work_struct *work)
6497 {
6498         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6499                                                                 freeze_work);
6500         struct hfi1_devdata *dd = ppd->dd;
6501
6502         /* wait for freeze indicators on all affected blocks */
6503         wait_for_freeze_status(dd, 1);
6504
6505         /* SPC is now frozen */
6506
6507         /* do send PIO freeze steps */
6508         pio_freeze(dd);
6509
6510         /* do send DMA freeze steps */
6511         sdma_freeze(dd);
6512
6513         /* do send egress freeze steps - nothing to do */
6514
6515         /* do receive freeze steps */
6516         rxe_freeze(dd);
6517
6518         /*
6519          * Unfreeze the hardware - clear the freeze, wait for each
6520          * block's frozen bit to clear, then clear the frozen flag.
6521          */
6522         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6523         wait_for_freeze_status(dd, 0);
6524
6525         if (is_ax(dd)) {
6526                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6527                 wait_for_freeze_status(dd, 1);
6528                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6529                 wait_for_freeze_status(dd, 0);
6530         }
6531
6532         /* do send PIO unfreeze steps for kernel contexts */
6533         pio_kernel_unfreeze(dd);
6534
6535         /* do send DMA unfreeze steps */
6536         sdma_unfreeze(dd);
6537
6538         /* do send egress unfreeze steps - nothing to do */
6539
6540         /* do receive unfreeze steps for kernel contexts */
6541         rxe_kernel_unfreeze(dd);
6542
6543         /*
6544          * The unfreeze procedure touches global device registers when
6545          * it disables and re-enables RXE. Mark the device unfrozen
6546          * after all that is done so other parts of the driver waiting
6547          * for the device to unfreeze don't do things out of order.
6548          *
6549          * The above implies that the meaning of HFI1_FROZEN flag is
6550          * "Device has gone into freeze mode and freeze mode handling
6551          * is still in progress."
6552          *
6553          * The flag will be removed when freeze mode processing has
6554          * completed.
6555          */
6556         dd->flags &= ~HFI1_FROZEN;
6557         wake_up(&dd->event_queue);
6558
6559         /* no longer frozen */
6560 }
6561
6562 /*
6563  * Handle a link up interrupt from the 8051.
6564  *
6565  * This is a work-queue function outside of the interrupt.
6566  */
6567 void handle_link_up(struct work_struct *work)
6568 {
6569         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6570                                                                 link_up_work);
6571         set_link_state(ppd, HLS_UP_INIT);
6572
6573         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6574         read_ltp_rtt(ppd->dd);
6575         /*
6576          * OPA specifies that certain counters are cleared on a transition
6577          * to link up, so do that.
6578          */
6579         clear_linkup_counters(ppd->dd);
6580         /*
6581          * And (re)set link up default values.
6582          */
6583         set_linkup_defaults(ppd);
6584
6585         /* enforce link speed enabled */
6586         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6587                 /* oops - current speed is not enabled, bounce */
6588                 dd_dev_err(ppd->dd,
6589                         "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6590                         ppd->link_speed_active, ppd->link_speed_enabled);
6591                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6592                         OPA_LINKDOWN_REASON_SPEED_POLICY);
6593                 set_link_state(ppd, HLS_DN_OFFLINE);
6594                 start_link(ppd);
6595         }
6596 }
6597
6598 /* Several pieces of LNI information were cached for SMA in ppd.
6599  * Reset these on link down */
6600 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6601 {
6602         ppd->neighbor_guid = 0;
6603         ppd->neighbor_port_number = 0;
6604         ppd->neighbor_type = 0;
6605         ppd->neighbor_fm_security = 0;
6606 }
6607
6608 /*
6609  * Handle a link down interrupt from the 8051.
6610  *
6611  * This is a work-queue function outside of the interrupt.
6612  */
6613 void handle_link_down(struct work_struct *work)
6614 {
6615         u8 lcl_reason, neigh_reason = 0;
6616         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6617                                                                 link_down_work);
6618
6619         /* go offline first, then deal with reasons */
6620         set_link_state(ppd, HLS_DN_OFFLINE);
6621
6622         lcl_reason = 0;
6623         read_planned_down_reason_code(ppd->dd, &neigh_reason);
6624
6625         /*
6626          * If no reason, assume peer-initiated but missed
6627          * LinkGoingDown idle flits.
6628          */
6629         if (neigh_reason == 0)
6630                 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6631
6632         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6633
6634         reset_neighbor_info(ppd);
6635
6636         /* disable the port */
6637         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6638
6639         /* If there is no cable attached, turn the DC off. Otherwise,
6640          * start the link bring up. */
6641         if (!qsfp_mod_present(ppd))
6642                 dc_shutdown(ppd->dd);
6643         else
6644                 start_link(ppd);
6645 }
6646
6647 void handle_link_bounce(struct work_struct *work)
6648 {
6649         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6650                                                         link_bounce_work);
6651
6652         /*
6653          * Only do something if the link is currently up.
6654          */
6655         if (ppd->host_link_state & HLS_UP) {
6656                 set_link_state(ppd, HLS_DN_OFFLINE);
6657                 start_link(ppd);
6658         } else {
6659                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6660                         __func__, link_state_name(ppd->host_link_state));
6661         }
6662 }
6663
6664 /*
6665  * Mask conversion: Capability exchange to Port LTP.  The capability
6666  * exchange has an implicit 16b CRC that is mandatory.
6667  */
6668 static int cap_to_port_ltp(int cap)
6669 {
6670         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6671
6672         if (cap & CAP_CRC_14B)
6673                 port_ltp |= PORT_LTP_CRC_MODE_14;
6674         if (cap & CAP_CRC_48B)
6675                 port_ltp |= PORT_LTP_CRC_MODE_48;
6676         if (cap & CAP_CRC_12B_16B_PER_LANE)
6677                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6678
6679         return port_ltp;
6680 }
6681
6682 /*
6683  * Convert an OPA Port LTP mask to capability mask
6684  */
6685 int port_ltp_to_cap(int port_ltp)
6686 {
6687         int cap_mask = 0;
6688
6689         if (port_ltp & PORT_LTP_CRC_MODE_14)
6690                 cap_mask |= CAP_CRC_14B;
6691         if (port_ltp & PORT_LTP_CRC_MODE_48)
6692                 cap_mask |= CAP_CRC_48B;
6693         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6694                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6695
6696         return cap_mask;
6697 }
6698
6699 /*
6700  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6701  */
6702 static int lcb_to_port_ltp(int lcb_crc)
6703 {
6704         int port_ltp = 0;
6705
6706         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6707                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6708         else if (lcb_crc == LCB_CRC_48B)
6709                 port_ltp = PORT_LTP_CRC_MODE_48;
6710         else if (lcb_crc == LCB_CRC_14B)
6711                 port_ltp = PORT_LTP_CRC_MODE_14;
6712         else
6713                 port_ltp = PORT_LTP_CRC_MODE_16;
6714
6715         return port_ltp;
6716 }
6717
6718 /*
6719  * Our neighbor has indicated that we are allowed to act as a fabric
6720  * manager, so place the full management partition key in the second
6721  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6722  * that we should already have the limited management partition key in
6723  * array element 1, and also that the port is not yet up when
6724  * add_full_mgmt_pkey() is invoked.
6725  */
6726 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6727 {
6728         struct hfi1_devdata *dd = ppd->dd;
6729
6730         /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6731         if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6732                 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6733                             __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6734         ppd->pkeys[2] = FULL_MGMT_P_KEY;
6735         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6736 }
6737
6738 /*
6739  * Convert the given link width to the OPA link width bitmask.
6740  */
6741 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6742 {
6743         switch (width) {
6744         case 0:
6745                 /*
6746                  * Simulator and quick linkup do not set the width.
6747                  * Just set it to 4x without complaint.
6748                  */
6749                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6750                         return OPA_LINK_WIDTH_4X;
6751                 return 0; /* no lanes up */
6752         case 1: return OPA_LINK_WIDTH_1X;
6753         case 2: return OPA_LINK_WIDTH_2X;
6754         case 3: return OPA_LINK_WIDTH_3X;
6755         default:
6756                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6757                         __func__, width);
6758                 /* fall through */
6759         case 4: return OPA_LINK_WIDTH_4X;
6760         }
6761 }
6762
6763 /*
6764  * Do a population count on the bottom nibble.
6765  */
6766 static const u8 bit_counts[16] = {
6767         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6768 };
6769 static inline u8 nibble_to_count(u8 nibble)
6770 {
6771         return bit_counts[nibble & 0xf];
6772 }
6773
6774 /*
6775  * Read the active lane information from the 8051 registers and return
6776  * their widths.
6777  *
6778  * Active lane information is found in these 8051 registers:
6779  *      enable_lane_tx
6780  *      enable_lane_rx
6781  */
6782 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6783                             u16 *rx_width)
6784 {
6785         u16 tx, rx;
6786         u8 enable_lane_rx;
6787         u8 enable_lane_tx;
6788         u8 tx_polarity_inversion;
6789         u8 rx_polarity_inversion;
6790         u8 max_rate;
6791
6792         /* read the active lanes */
6793         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6794                                 &rx_polarity_inversion, &max_rate);
6795         read_local_lni(dd, &enable_lane_rx);
6796
6797         /* convert to counts */
6798         tx = nibble_to_count(enable_lane_tx);
6799         rx = nibble_to_count(enable_lane_rx);
6800
6801         /*
6802          * Set link_speed_active here, overriding what was set in
6803          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
6804          * set the max_rate field in handle_verify_cap until v0.19.
6805          */
6806         if ((dd->icode == ICODE_RTL_SILICON)
6807                                 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6808                 /* max_rate: 0 = 12.5G, 1 = 25G */
6809                 switch (max_rate) {
6810                 case 0:
6811                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6812                         break;
6813                 default:
6814                         dd_dev_err(dd,
6815                                 "%s: unexpected max rate %d, using 25Gb\n",
6816                                 __func__, (int)max_rate);
6817                         /* fall through */
6818                 case 1:
6819                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6820                         break;
6821                 }
6822         }
6823
6824         dd_dev_info(dd,
6825                 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6826                 enable_lane_tx, tx, enable_lane_rx, rx);
6827         *tx_width = link_width_to_bits(dd, tx);
6828         *rx_width = link_width_to_bits(dd, rx);
6829 }
6830
6831 /*
6832  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6833  * Valid after the end of VerifyCap and during LinkUp.  Does not change
6834  * after link up.  I.e. look elsewhere for downgrade information.
6835  *
6836  * Bits are:
6837  *      + bits [7:4] contain the number of active transmitters
6838  *      + bits [3:0] contain the number of active receivers
6839  * These are numbers 1 through 4 and can be different values if the
6840  * link is asymmetric.
6841  *
6842  * verify_cap_local_fm_link_width[0] retains its original value.
6843  */
6844 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6845                               u16 *rx_width)
6846 {
6847         u16 widths, tx, rx;
6848         u8 misc_bits, local_flags;
6849         u16 active_tx, active_rx;
6850
6851         read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6852         tx = widths >> 12;
6853         rx = (widths >> 8) & 0xf;
6854
6855         *tx_width = link_width_to_bits(dd, tx);
6856         *rx_width = link_width_to_bits(dd, rx);
6857
6858         /* print the active widths */
6859         get_link_widths(dd, &active_tx, &active_rx);
6860 }
6861
6862 /*
6863  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6864  * hardware information when the link first comes up.
6865  *
6866  * The link width is not available until after VerifyCap.AllFramesReceived
6867  * (the trigger for handle_verify_cap), so this is outside that routine
6868  * and should be called when the 8051 signals linkup.
6869  */
6870 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6871 {
6872         u16 tx_width, rx_width;
6873
6874         /* get end-of-LNI link widths */
6875         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6876
6877         /* use tx_width as the link is supposed to be symmetric on link up */
6878         ppd->link_width_active = tx_width;
6879         /* link width downgrade active (LWD.A) starts out matching LW.A */
6880         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6881         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6882         /* per OPA spec, on link up LWD.E resets to LWD.S */
6883         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6884         /* cache the active egress rate (units {10^6 bits/sec]) */
6885         ppd->current_egress_rate = active_egress_rate(ppd);
6886 }
6887
6888 /*
6889  * Handle a verify capabilities interrupt from the 8051.
6890  *
6891  * This is a work-queue function outside of the interrupt.
6892  */
6893 void handle_verify_cap(struct work_struct *work)
6894 {
6895         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6896                                                                 link_vc_work);
6897         struct hfi1_devdata *dd = ppd->dd;
6898         u64 reg;
6899         u8 power_management;
6900         u8 continious;
6901         u8 vcu;
6902         u8 vau;
6903         u8 z;
6904         u16 vl15buf;
6905         u16 link_widths;
6906         u16 crc_mask;
6907         u16 crc_val;
6908         u16 device_id;
6909         u16 active_tx, active_rx;
6910         u8 partner_supported_crc;
6911         u8 remote_tx_rate;
6912         u8 device_rev;
6913
6914         set_link_state(ppd, HLS_VERIFY_CAP);
6915
6916         lcb_shutdown(dd, 0);
6917         adjust_lcb_for_fpga_serdes(dd);
6918
6919         /*
6920          * These are now valid:
6921          *      remote VerifyCap fields in the general LNI config
6922          *      CSR DC8051_STS_REMOTE_GUID
6923          *      CSR DC8051_STS_REMOTE_NODE_TYPE
6924          *      CSR DC8051_STS_REMOTE_FM_SECURITY
6925          *      CSR DC8051_STS_REMOTE_PORT_NO
6926          */
6927
6928         read_vc_remote_phy(dd, &power_management, &continious);
6929         read_vc_remote_fabric(
6930                 dd,
6931                 &vau,
6932                 &z,
6933                 &vcu,
6934                 &vl15buf,
6935                 &partner_supported_crc);
6936         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
6937         read_remote_device_id(dd, &device_id, &device_rev);
6938         /*
6939          * And the 'MgmtAllowed' information, which is exchanged during
6940          * LNI, is also be available at this point.
6941          */
6942         read_mgmt_allowed(dd, &ppd->mgmt_allowed);
6943         /* print the active widths */
6944         get_link_widths(dd, &active_tx, &active_rx);
6945         dd_dev_info(dd,
6946                 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
6947                 (int)power_management, (int)continious);
6948         dd_dev_info(dd,
6949                 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
6950                 (int)vau,
6951                 (int)z,
6952                 (int)vcu,
6953                 (int)vl15buf,
6954                 (int)partner_supported_crc);
6955         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
6956                 (u32)remote_tx_rate, (u32)link_widths);
6957         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
6958                 (u32)device_id, (u32)device_rev);
6959         /*
6960          * The peer vAU value just read is the peer receiver value.  HFI does
6961          * not support a transmit vAU of 0 (AU == 8).  We advertised that
6962          * with Z=1 in the fabric capabilities sent to the peer.  The peer
6963          * will see our Z=1, and, if it advertised a vAU of 0, will move its
6964          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
6965          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
6966          * subject to the Z value exception.
6967          */
6968         if (vau == 0)
6969                 vau = 1;
6970         set_up_vl15(dd, vau, vl15buf);
6971
6972         /* set up the LCB CRC mode */
6973         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
6974
6975         /* order is important: use the lowest bit in common */
6976         if (crc_mask & CAP_CRC_14B)
6977                 crc_val = LCB_CRC_14B;
6978         else if (crc_mask & CAP_CRC_48B)
6979                 crc_val = LCB_CRC_48B;
6980         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
6981                 crc_val = LCB_CRC_12B_16B_PER_LANE;
6982         else
6983                 crc_val = LCB_CRC_16B;
6984
6985         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
6986         write_csr(dd, DC_LCB_CFG_CRC_MODE,
6987                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
6988
6989         /* set (14b only) or clear sideband credit */
6990         reg = read_csr(dd, SEND_CM_CTRL);
6991         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
6992                 write_csr(dd, SEND_CM_CTRL,
6993                         reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6994         } else {
6995                 write_csr(dd, SEND_CM_CTRL,
6996                         reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6997         }
6998
6999         ppd->link_speed_active = 0;     /* invalid value */
7000         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7001                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7002                 switch (remote_tx_rate) {
7003                 case 0:
7004                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7005                         break;
7006                 case 1:
7007                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7008                         break;
7009                 }
7010         } else {
7011                 /* actual rate is highest bit of the ANDed rates */
7012                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7013
7014                 if (rate & 2)
7015                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7016                 else if (rate & 1)
7017                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7018         }
7019         if (ppd->link_speed_active == 0) {
7020                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7021                         __func__, (int)remote_tx_rate);
7022                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7023         }
7024
7025         /*
7026          * Cache the values of the supported, enabled, and active
7027          * LTP CRC modes to return in 'portinfo' queries. But the bit
7028          * flags that are returned in the portinfo query differ from
7029          * what's in the link_crc_mask, crc_sizes, and crc_val
7030          * variables. Convert these here.
7031          */
7032         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7033                 /* supported crc modes */
7034         ppd->port_ltp_crc_mode |=
7035                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7036                 /* enabled crc modes */
7037         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7038                 /* active crc mode */
7039
7040         /* set up the remote credit return table */
7041         assign_remote_cm_au_table(dd, vcu);
7042
7043         /*
7044          * The LCB is reset on entry to handle_verify_cap(), so this must
7045          * be applied on every link up.
7046          *
7047          * Adjust LCB error kill enable to kill the link if
7048          * these RBUF errors are seen:
7049          *      REPLAY_BUF_MBE_SMASK
7050          *      FLIT_INPUT_BUF_MBE_SMASK
7051          */
7052         if (is_ax(dd)) {                        /* fixed in B0 */
7053                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7054                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7055                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7056                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7057         }
7058
7059         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7060         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7061
7062         /* give 8051 access to the LCB CSRs */
7063         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7064         set_8051_lcb_access(dd);
7065
7066         ppd->neighbor_guid =
7067                 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7068         ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7069                                         DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7070         ppd->neighbor_type =
7071                 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7072                 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7073         ppd->neighbor_fm_security =
7074                 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7075                 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7076         dd_dev_info(dd,
7077                 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7078                 ppd->neighbor_guid, ppd->neighbor_type,
7079                 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7080         if (ppd->mgmt_allowed)
7081                 add_full_mgmt_pkey(ppd);
7082
7083         /* tell the 8051 to go to LinkUp */
7084         set_link_state(ppd, HLS_GOING_UP);
7085 }
7086
7087 /*
7088  * Apply the link width downgrade enabled policy against the current active
7089  * link widths.
7090  *
7091  * Called when the enabled policy changes or the active link widths change.
7092  */
7093 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7094 {
7095         int do_bounce = 0;
7096         int tries;
7097         u16 lwde;
7098         u16 tx, rx;
7099
7100         /* use the hls lock to avoid a race with actual link up */
7101         tries = 0;
7102 retry:
7103         mutex_lock(&ppd->hls_lock);
7104         /* only apply if the link is up */
7105         if (!(ppd->host_link_state & HLS_UP)) {
7106                 /* still going up..wait and retry */
7107                 if (ppd->host_link_state & HLS_GOING_UP) {
7108                         if (++tries < 1000) {
7109                                 mutex_unlock(&ppd->hls_lock);
7110                                 usleep_range(100, 120); /* arbitrary */
7111                                 goto retry;
7112                         }
7113                         dd_dev_err(ppd->dd,
7114                                    "%s: giving up waiting for link state change\n",
7115                                    __func__);
7116                 }
7117                 goto done;
7118         }
7119
7120         lwde = ppd->link_width_downgrade_enabled;
7121
7122         if (refresh_widths) {
7123                 get_link_widths(ppd->dd, &tx, &rx);
7124                 ppd->link_width_downgrade_tx_active = tx;
7125                 ppd->link_width_downgrade_rx_active = rx;
7126         }
7127
7128         if (lwde == 0) {
7129                 /* downgrade is disabled */
7130
7131                 /* bounce if not at starting active width */
7132                 if ((ppd->link_width_active !=
7133                                         ppd->link_width_downgrade_tx_active)
7134                                 || (ppd->link_width_active !=
7135                                         ppd->link_width_downgrade_rx_active)) {
7136                         dd_dev_err(ppd->dd,
7137                                 "Link downgrade is disabled and link has downgraded, downing link\n");
7138                         dd_dev_err(ppd->dd,
7139                                 "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7140                                 ppd->link_width_active,
7141                                 ppd->link_width_downgrade_tx_active,
7142                                 ppd->link_width_downgrade_rx_active);
7143                         do_bounce = 1;
7144                 }
7145         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7146                 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7147                 /* Tx or Rx is outside the enabled policy */
7148                 dd_dev_err(ppd->dd,
7149                         "Link is outside of downgrade allowed, downing link\n");
7150                 dd_dev_err(ppd->dd,
7151                         "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7152                         lwde,
7153                         ppd->link_width_downgrade_tx_active,
7154                         ppd->link_width_downgrade_rx_active);
7155                 do_bounce = 1;
7156         }
7157
7158 done:
7159         mutex_unlock(&ppd->hls_lock);
7160
7161         if (do_bounce) {
7162                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7163                   OPA_LINKDOWN_REASON_WIDTH_POLICY);
7164                 set_link_state(ppd, HLS_DN_OFFLINE);
7165                 start_link(ppd);
7166         }
7167 }
7168
7169 /*
7170  * Handle a link downgrade interrupt from the 8051.
7171  *
7172  * This is a work-queue function outside of the interrupt.
7173  */
7174 void handle_link_downgrade(struct work_struct *work)
7175 {
7176         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7177                                                         link_downgrade_work);
7178
7179         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7180         apply_link_downgrade_policy(ppd, 1);
7181 }
7182
7183 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7184 {
7185         return flag_string(buf, buf_len, flags, dcc_err_flags,
7186                 ARRAY_SIZE(dcc_err_flags));
7187 }
7188
7189 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7190 {
7191         return flag_string(buf, buf_len, flags, lcb_err_flags,
7192                 ARRAY_SIZE(lcb_err_flags));
7193 }
7194
7195 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7196 {
7197         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7198                 ARRAY_SIZE(dc8051_err_flags));
7199 }
7200
7201 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7202 {
7203         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7204                 ARRAY_SIZE(dc8051_info_err_flags));
7205 }
7206
7207 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7208 {
7209         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7210                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7211 }
7212
7213 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7214 {
7215         struct hfi1_pportdata *ppd = dd->pport;
7216         u64 info, err, host_msg;
7217         int queue_link_down = 0;
7218         char buf[96];
7219
7220         /* look at the flags */
7221         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7222                 /* 8051 information set by firmware */
7223                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7224                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7225                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7226                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7227                 host_msg = (info >>
7228                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7229                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7230
7231                 /*
7232                  * Handle error flags.
7233                  */
7234                 if (err & FAILED_LNI) {
7235                         /*
7236                          * LNI error indications are cleared by the 8051
7237                          * only when starting polling.  Only pay attention
7238                          * to them when in the states that occur during
7239                          * LNI.
7240                          */
7241                         if (ppd->host_link_state
7242                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7243                                 queue_link_down = 1;
7244                                 dd_dev_info(dd, "Link error: %s\n",
7245                                         dc8051_info_err_string(buf,
7246                                                 sizeof(buf),
7247                                                 err & FAILED_LNI));
7248                         }
7249                         err &= ~(u64)FAILED_LNI;
7250                 }
7251                 /* unknown frames can happen durning LNI, just count */
7252                 if (err & UNKNOWN_FRAME) {
7253                         ppd->unknown_frame_count++;
7254                         err &= ~(u64)UNKNOWN_FRAME;
7255                 }
7256                 if (err) {
7257                         /* report remaining errors, but do not do anything */
7258                         dd_dev_err(dd, "8051 info error: %s\n",
7259                                 dc8051_info_err_string(buf, sizeof(buf), err));
7260                 }
7261
7262                 /*
7263                  * Handle host message flags.
7264                  */
7265                 if (host_msg & HOST_REQ_DONE) {
7266                         /*
7267                          * Presently, the driver does a busy wait for
7268                          * host requests to complete.  This is only an
7269                          * informational message.
7270                          * NOTE: The 8051 clears the host message
7271                          * information *on the next 8051 command*.
7272                          * Therefore, when linkup is achieved,
7273                          * this flag will still be set.
7274                          */
7275                         host_msg &= ~(u64)HOST_REQ_DONE;
7276                 }
7277                 if (host_msg & BC_SMA_MSG) {
7278                         queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7279                         host_msg &= ~(u64)BC_SMA_MSG;
7280                 }
7281                 if (host_msg & LINKUP_ACHIEVED) {
7282                         dd_dev_info(dd, "8051: Link up\n");
7283                         queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7284                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7285                 }
7286                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7287                         handle_8051_request(dd);
7288                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7289                 }
7290                 if (host_msg & VERIFY_CAP_FRAME) {
7291                         queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7292                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7293                 }
7294                 if (host_msg & LINK_GOING_DOWN) {
7295                         const char *extra = "";
7296                         /* no downgrade action needed if going down */
7297                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7298                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7299                                 extra = " (ignoring downgrade)";
7300                         }
7301                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7302                         queue_link_down = 1;
7303                         host_msg &= ~(u64)LINK_GOING_DOWN;
7304                 }
7305                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7306                         queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7307                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7308                 }
7309                 if (host_msg) {
7310                         /* report remaining messages, but do not do anything */
7311                         dd_dev_info(dd, "8051 info host message: %s\n",
7312                                 dc8051_info_host_msg_string(buf, sizeof(buf),
7313                                         host_msg));
7314                 }
7315
7316                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7317         }
7318         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7319                 /*
7320                  * Lost the 8051 heartbeat.  If this happens, we
7321                  * receive constant interrupts about it.  Disable
7322                  * the interrupt after the first.
7323                  */
7324                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7325                 write_csr(dd, DC_DC8051_ERR_EN,
7326                         read_csr(dd, DC_DC8051_ERR_EN)
7327                           & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7328
7329                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7330         }
7331         if (reg) {
7332                 /* report the error, but do not do anything */
7333                 dd_dev_err(dd, "8051 error: %s\n",
7334                         dc8051_err_string(buf, sizeof(buf), reg));
7335         }
7336
7337         if (queue_link_down) {
7338                 /* if the link is already going down or disabled, do not
7339                  * queue another */
7340                 if ((ppd->host_link_state
7341                                     & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7342                                 || ppd->link_enabled == 0) {
7343                         dd_dev_info(dd, "%s: not queuing link down\n",
7344                                 __func__);
7345                 } else {
7346                         queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7347                 }
7348         }
7349 }
7350
7351 static const char * const fm_config_txt[] = {
7352 [0] =
7353         "BadHeadDist: Distance violation between two head flits",
7354 [1] =
7355         "BadTailDist: Distance violation between two tail flits",
7356 [2] =
7357         "BadCtrlDist: Distance violation between two credit control flits",
7358 [3] =
7359         "BadCrdAck: Credits return for unsupported VL",
7360 [4] =
7361         "UnsupportedVLMarker: Received VL Marker",
7362 [5] =
7363         "BadPreempt: Exceeded the preemption nesting level",
7364 [6] =
7365         "BadControlFlit: Received unsupported control flit",
7366 /* no 7 */
7367 [8] =
7368         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7369 };
7370
7371 static const char * const port_rcv_txt[] = {
7372 [1] =
7373         "BadPktLen: Illegal PktLen",
7374 [2] =
7375         "PktLenTooLong: Packet longer than PktLen",
7376 [3] =
7377         "PktLenTooShort: Packet shorter than PktLen",
7378 [4] =
7379         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7380 [5] =
7381         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7382 [6] =
7383         "BadL2: Illegal L2 opcode",
7384 [7] =
7385         "BadSC: Unsupported SC",
7386 [9] =
7387         "BadRC: Illegal RC",
7388 [11] =
7389         "PreemptError: Preempting with same VL",
7390 [12] =
7391         "PreemptVL15: Preempting a VL15 packet",
7392 };
7393
7394 #define OPA_LDR_FMCONFIG_OFFSET 16
7395 #define OPA_LDR_PORTRCV_OFFSET 0
7396 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7397 {
7398         u64 info, hdr0, hdr1;
7399         const char *extra;
7400         char buf[96];
7401         struct hfi1_pportdata *ppd = dd->pport;
7402         u8 lcl_reason = 0;
7403         int do_bounce = 0;
7404
7405         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7406                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7407                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7408                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7409                         /* set status bit */
7410                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7411                 }
7412                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7413         }
7414
7415         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7416                 struct hfi1_pportdata *ppd = dd->pport;
7417                 /* this counter saturates at (2^32) - 1 */
7418                 if (ppd->link_downed < (u32)UINT_MAX)
7419                         ppd->link_downed++;
7420                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7421         }
7422
7423         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7424                 u8 reason_valid = 1;
7425
7426                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7427                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7428                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7429                         /* set status bit */
7430                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7431                 }
7432                 switch (info) {
7433                 case 0:
7434                 case 1:
7435                 case 2:
7436                 case 3:
7437                 case 4:
7438                 case 5:
7439                 case 6:
7440                         extra = fm_config_txt[info];
7441                         break;
7442                 case 8:
7443                         extra = fm_config_txt[info];
7444                         if (ppd->port_error_action &
7445                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7446                                 do_bounce = 1;
7447                                 /*
7448                                  * lcl_reason cannot be derived from info
7449                                  * for this error
7450                                  */
7451                                 lcl_reason =
7452                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7453                         }
7454                         break;
7455                 default:
7456                         reason_valid = 0;
7457                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7458                         extra = buf;
7459                         break;
7460                 }
7461
7462                 if (reason_valid && !do_bounce) {
7463                         do_bounce = ppd->port_error_action &
7464                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7465                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7466                 }
7467
7468                 /* just report this */
7469                 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7470                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7471         }
7472
7473         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7474                 u8 reason_valid = 1;
7475
7476                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7477                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7478                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7479                 if (!(dd->err_info_rcvport.status_and_code &
7480                       OPA_EI_STATUS_SMASK)) {
7481                         dd->err_info_rcvport.status_and_code =
7482                                 info & OPA_EI_CODE_SMASK;
7483                         /* set status bit */
7484                         dd->err_info_rcvport.status_and_code |=
7485                                 OPA_EI_STATUS_SMASK;
7486                         /* save first 2 flits in the packet that caused
7487                          * the error */
7488                          dd->err_info_rcvport.packet_flit1 = hdr0;
7489                          dd->err_info_rcvport.packet_flit2 = hdr1;
7490                 }
7491                 switch (info) {
7492                 case 1:
7493                 case 2:
7494                 case 3:
7495                 case 4:
7496                 case 5:
7497                 case 6:
7498                 case 7:
7499                 case 9:
7500                 case 11:
7501                 case 12:
7502                         extra = port_rcv_txt[info];
7503                         break;
7504                 default:
7505                         reason_valid = 0;
7506                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7507                         extra = buf;
7508                         break;
7509                 }
7510
7511                 if (reason_valid && !do_bounce) {
7512                         do_bounce = ppd->port_error_action &
7513                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7514                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7515                 }
7516
7517                 /* just report this */
7518                 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7519                 dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7520                         hdr0, hdr1);
7521
7522                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7523         }
7524
7525         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7526                 /* informative only */
7527                 dd_dev_info(dd, "8051 access to LCB blocked\n");
7528                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7529         }
7530         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7531                 /* informative only */
7532                 dd_dev_info(dd, "host access to LCB blocked\n");
7533                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7534         }
7535
7536         /* report any remaining errors */
7537         if (reg)
7538                 dd_dev_info(dd, "DCC Error: %s\n",
7539                         dcc_err_string(buf, sizeof(buf), reg));
7540
7541         if (lcl_reason == 0)
7542                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7543
7544         if (do_bounce) {
7545                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7546                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7547                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7548         }
7549 }
7550
7551 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7552 {
7553         char buf[96];
7554
7555         dd_dev_info(dd, "LCB Error: %s\n",
7556                 lcb_err_string(buf, sizeof(buf), reg));
7557 }
7558
7559 /*
7560  * CCE block DC interrupt.  Source is < 8.
7561  */
7562 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7563 {
7564         const struct err_reg_info *eri = &dc_errs[source];
7565
7566         if (eri->handler) {
7567                 interrupt_clear_down(dd, 0, eri);
7568         } else if (source == 3 /* dc_lbm_int */) {
7569                 /*
7570                  * This indicates that a parity error has occurred on the
7571                  * address/control lines presented to the LBM.  The error
7572                  * is a single pulse, there is no associated error flag,
7573                  * and it is non-maskable.  This is because if a parity
7574                  * error occurs on the request the request is dropped.
7575                  * This should never occur, but it is nice to know if it
7576                  * ever does.
7577                  */
7578                 dd_dev_err(dd, "Parity error in DC LBM block\n");
7579         } else {
7580                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7581         }
7582 }
7583
7584 /*
7585  * TX block send credit interrupt.  Source is < 160.
7586  */
7587 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7588 {
7589         sc_group_release_update(dd, source);
7590 }
7591
7592 /*
7593  * TX block SDMA interrupt.  Source is < 48.
7594  *
7595  * SDMA interrupts are grouped by type:
7596  *
7597  *       0 -  N-1 = SDma
7598  *       N - 2N-1 = SDmaProgress
7599  *      2N - 3N-1 = SDmaIdle
7600  */
7601 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7602 {
7603         /* what interrupt */
7604         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7605         /* which engine */
7606         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7607
7608 #ifdef CONFIG_SDMA_VERBOSITY
7609         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7610                    slashstrip(__FILE__), __LINE__, __func__);
7611         sdma_dumpstate(&dd->per_sdma[which]);
7612 #endif
7613
7614         if (likely(what < 3 && which < dd->num_sdma)) {
7615                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7616         } else {
7617                 /* should not happen */
7618                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7619         }
7620 }
7621
7622 /*
7623  * RX block receive available interrupt.  Source is < 160.
7624  */
7625 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7626 {
7627         struct hfi1_ctxtdata *rcd;
7628         char *err_detail;
7629
7630         if (likely(source < dd->num_rcv_contexts)) {
7631                 rcd = dd->rcd[source];
7632                 if (rcd) {
7633                         if (source < dd->first_user_ctxt)
7634                                 rcd->do_interrupt(rcd, 0);
7635                         else
7636                                 handle_user_interrupt(rcd);
7637                         return; /* OK */
7638                 }
7639                 /* received an interrupt, but no rcd */
7640                 err_detail = "dataless";
7641         } else {
7642                 /* received an interrupt, but are not using that context */
7643                 err_detail = "out of range";
7644         }
7645         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7646                 err_detail, source);
7647 }
7648
7649 /*
7650  * RX block receive urgent interrupt.  Source is < 160.
7651  */
7652 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7653 {
7654         struct hfi1_ctxtdata *rcd;
7655         char *err_detail;
7656
7657         if (likely(source < dd->num_rcv_contexts)) {
7658                 rcd = dd->rcd[source];
7659                 if (rcd) {
7660                         /* only pay attention to user urgent interrupts */
7661                         if (source >= dd->first_user_ctxt)
7662                                 handle_user_interrupt(rcd);
7663                         return; /* OK */
7664                 }
7665                 /* received an interrupt, but no rcd */
7666                 err_detail = "dataless";
7667         } else {
7668                 /* received an interrupt, but are not using that context */
7669                 err_detail = "out of range";
7670         }
7671         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7672                 err_detail, source);
7673 }
7674
7675 /*
7676  * Reserved range interrupt.  Should not be called in normal operation.
7677  */
7678 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7679 {
7680         char name[64];
7681
7682         dd_dev_err(dd, "unexpected %s interrupt\n",
7683                                 is_reserved_name(name, sizeof(name), source));
7684 }
7685
7686 static const struct is_table is_table[] = {
7687 /* start                     end
7688                                 name func               interrupt func */
7689 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
7690                                 is_misc_err_name,       is_misc_err_int },
7691 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
7692                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
7693 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7694                                 is_sendctxt_err_name,   is_sendctxt_err_int },
7695 { IS_SDMA_START,             IS_SDMA_END,
7696                                 is_sdma_eng_name,       is_sdma_eng_int },
7697 { IS_VARIOUS_START,          IS_VARIOUS_END,
7698                                 is_various_name,        is_various_int },
7699 { IS_DC_START,       IS_DC_END,
7700                                 is_dc_name,             is_dc_int },
7701 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
7702                                 is_rcv_avail_name,      is_rcv_avail_int },
7703 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
7704                                 is_rcv_urgent_name,     is_rcv_urgent_int },
7705 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
7706                                 is_send_credit_name,    is_send_credit_int},
7707 { IS_RESERVED_START,     IS_RESERVED_END,
7708                                 is_reserved_name,       is_reserved_int},
7709 };
7710
7711 /*
7712  * Interrupt source interrupt - called when the given source has an interrupt.
7713  * Source is a bit index into an array of 64-bit integers.
7714  */
7715 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7716 {
7717         const struct is_table *entry;
7718
7719         /* avoids a double compare by walking the table in-order */
7720         for (entry = &is_table[0]; entry->is_name; entry++) {
7721                 if (source < entry->end) {
7722                         trace_hfi1_interrupt(dd, entry, source);
7723                         entry->is_int(dd, source - entry->start);
7724                         return;
7725                 }
7726         }
7727         /* fell off the end */
7728         dd_dev_err(dd, "invalid interrupt source %u\n", source);
7729 }
7730
7731 /*
7732  * General interrupt handler.  This is able to correctly handle
7733  * all interrupts in case INTx is used.
7734  */
7735 static irqreturn_t general_interrupt(int irq, void *data)
7736 {
7737         struct hfi1_devdata *dd = data;
7738         u64 regs[CCE_NUM_INT_CSRS];
7739         u32 bit;
7740         int i;
7741
7742         this_cpu_inc(*dd->int_counter);
7743
7744         /* phase 1: scan and clear all handled interrupts */
7745         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7746                 if (dd->gi_mask[i] == 0) {
7747                         regs[i] = 0;    /* used later */
7748                         continue;
7749                 }
7750                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7751                                 dd->gi_mask[i];
7752                 /* only clear if anything is set */
7753                 if (regs[i])
7754                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7755         }
7756
7757         /* phase 2: call the appropriate handler */
7758         for_each_set_bit(bit, (unsigned long *)&regs[0],
7759                                                 CCE_NUM_INT_CSRS*64) {
7760                 is_interrupt(dd, bit);
7761         }
7762
7763         return IRQ_HANDLED;
7764 }
7765
7766 static irqreturn_t sdma_interrupt(int irq, void *data)
7767 {
7768         struct sdma_engine *sde = data;
7769         struct hfi1_devdata *dd = sde->dd;
7770         u64 status;
7771
7772 #ifdef CONFIG_SDMA_VERBOSITY
7773         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7774                    slashstrip(__FILE__), __LINE__, __func__);
7775         sdma_dumpstate(sde);
7776 #endif
7777
7778         this_cpu_inc(*dd->int_counter);
7779
7780         /* This read_csr is really bad in the hot path */
7781         status = read_csr(dd,
7782                         CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7783                         & sde->imask;
7784         if (likely(status)) {
7785                 /* clear the interrupt(s) */
7786                 write_csr(dd,
7787                         CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7788                         status);
7789
7790                 /* handle the interrupt(s) */
7791                 sdma_engine_interrupt(sde, status);
7792         } else
7793                 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7794                         sde->this_idx);
7795
7796         return IRQ_HANDLED;
7797 }
7798
7799 /*
7800  * Clear the receive interrupt, forcing the write and making sure
7801  * we have data from the chip, pushing everything in front of it
7802  * back to the host.
7803  */
7804 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7805 {
7806         struct hfi1_devdata *dd = rcd->dd;
7807         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7808
7809         mmiowb();       /* make sure everything before is written */
7810         write_csr(dd, addr, rcd->imask);
7811         /* force the above write on the chip and get a value back */
7812         (void)read_csr(dd, addr);
7813 }
7814
7815 /* force the receive interrupt */
7816 static inline void force_recv_intr(struct hfi1_ctxtdata *rcd)
7817 {
7818         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7819 }
7820
7821 /* return non-zero if a packet is present */
7822 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7823 {
7824         if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7825                 return (rcd->seq_cnt ==
7826                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7827
7828         /* else is RDMA rtail */
7829         return (rcd->head != get_rcvhdrtail(rcd));
7830 }
7831
7832 /*
7833  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
7834  * This routine will try to handle packets immediately (latency), but if
7835  * it finds too many, it will invoke the thread handler (bandwitdh).  The
7836  * chip receive interupt is *not* cleared down until this or the thread (if
7837  * invoked) is finished.  The intent is to avoid extra interrupts while we
7838  * are processing packets anyway.
7839  */
7840 static irqreturn_t receive_context_interrupt(int irq, void *data)
7841 {
7842         struct hfi1_ctxtdata *rcd = data;
7843         struct hfi1_devdata *dd = rcd->dd;
7844         int disposition;
7845         int present;
7846
7847         trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7848         this_cpu_inc(*dd->int_counter);
7849
7850         /* receive interrupt remains blocked while processing packets */
7851         disposition = rcd->do_interrupt(rcd, 0);
7852
7853         /*
7854          * Too many packets were seen while processing packets in this
7855          * IRQ handler.  Invoke the handler thread.  The receive interrupt
7856          * remains blocked.
7857          */
7858         if (disposition == RCV_PKT_LIMIT)
7859                 return IRQ_WAKE_THREAD;
7860
7861         /*
7862          * The packet processor detected no more packets.  Clear the receive
7863          * interrupt and recheck for a packet packet that may have arrived
7864          * after the previous check and interrupt clear.  If a packet arrived,
7865          * force another interrupt.
7866          */
7867         clear_recv_intr(rcd);
7868         present = check_packet_present(rcd);
7869         if (present)
7870                 force_recv_intr(rcd);
7871
7872         return IRQ_HANDLED;
7873 }
7874
7875 /*
7876  * Receive packet thread handler.  This expects to be invoked with the
7877  * receive interrupt still blocked.
7878  */
7879 static irqreturn_t receive_context_thread(int irq, void *data)
7880 {
7881         struct hfi1_ctxtdata *rcd = data;
7882         int present;
7883
7884         /* receive interrupt is still blocked from the IRQ handler */
7885         (void)rcd->do_interrupt(rcd, 1);
7886
7887         /*
7888          * The packet processor will only return if it detected no more
7889          * packets.  Hold IRQs here so we can safely clear the interrupt and
7890          * recheck for a packet that may have arrived after the previous
7891          * check and the interrupt clear.  If a packet arrived, force another
7892          * interrupt.
7893          */
7894         local_irq_disable();
7895         clear_recv_intr(rcd);
7896         present = check_packet_present(rcd);
7897         if (present)
7898                 force_recv_intr(rcd);
7899         local_irq_enable();
7900
7901         return IRQ_HANDLED;
7902 }
7903
7904 /* ========================================================================= */
7905
7906 u32 read_physical_state(struct hfi1_devdata *dd)
7907 {
7908         u64 reg;
7909
7910         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
7911         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
7912                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
7913 }
7914
7915 static u32 read_logical_state(struct hfi1_devdata *dd)
7916 {
7917         u64 reg;
7918
7919         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7920         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
7921                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
7922 }
7923
7924 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
7925 {
7926         u64 reg;
7927
7928         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7929         /* clear current state, set new state */
7930         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
7931         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
7932         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
7933 }
7934
7935 /*
7936  * Use the 8051 to read a LCB CSR.
7937  */
7938 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
7939 {
7940         u32 regno;
7941         int ret;
7942
7943         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
7944                 if (acquire_lcb_access(dd, 0) == 0) {
7945                         *data = read_csr(dd, addr);
7946                         release_lcb_access(dd, 0);
7947                         return 0;
7948                 }
7949                 return -EBUSY;
7950         }
7951
7952         /* register is an index of LCB registers: (offset - base) / 8 */
7953         regno = (addr - DC_LCB_CFG_RUN) >> 3;
7954         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
7955         if (ret != HCMD_SUCCESS)
7956                 return -EBUSY;
7957         return 0;
7958 }
7959
7960 /*
7961  * Read an LCB CSR.  Access may not be in host control, so check.
7962  * Return 0 on success, -EBUSY on failure.
7963  */
7964 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
7965 {
7966         struct hfi1_pportdata *ppd = dd->pport;
7967
7968         /* if up, go through the 8051 for the value */
7969         if (ppd->host_link_state & HLS_UP)
7970                 return read_lcb_via_8051(dd, addr, data);
7971         /* if going up or down, no access */
7972         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
7973                 return -EBUSY;
7974         /* otherwise, host has access */
7975         *data = read_csr(dd, addr);
7976         return 0;
7977 }
7978
7979 /*
7980  * Use the 8051 to write a LCB CSR.
7981  */
7982 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
7983 {
7984         u32 regno;
7985         int ret;
7986
7987         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
7988             (dd->dc8051_ver < dc8051_ver(0, 20))) {
7989                 if (acquire_lcb_access(dd, 0) == 0) {
7990                         write_csr(dd, addr, data);
7991                         release_lcb_access(dd, 0);
7992                         return 0;
7993                 }
7994                 return -EBUSY;
7995         }
7996
7997         /* register is an index of LCB registers: (offset - base) / 8 */
7998         regno = (addr - DC_LCB_CFG_RUN) >> 3;
7999         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8000         if (ret != HCMD_SUCCESS)
8001                 return -EBUSY;
8002         return 0;
8003 }
8004
8005 /*
8006  * Write an LCB CSR.  Access may not be in host control, so check.
8007  * Return 0 on success, -EBUSY on failure.
8008  */
8009 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8010 {
8011         struct hfi1_pportdata *ppd = dd->pport;
8012
8013         /* if up, go through the 8051 for the value */
8014         if (ppd->host_link_state & HLS_UP)
8015                 return write_lcb_via_8051(dd, addr, data);
8016         /* if going up or down, no access */
8017         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8018                 return -EBUSY;
8019         /* otherwise, host has access */
8020         write_csr(dd, addr, data);
8021         return 0;
8022 }
8023
8024 /*
8025  * Returns:
8026  *      < 0 = Linux error, not able to get access
8027  *      > 0 = 8051 command RETURN_CODE
8028  */
8029 static int do_8051_command(
8030         struct hfi1_devdata *dd,
8031         u32 type,
8032         u64 in_data,
8033         u64 *out_data)
8034 {
8035         u64 reg, completed;
8036         int return_code;
8037         unsigned long flags;
8038         unsigned long timeout;
8039
8040         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8041
8042         /*
8043          * Alternative to holding the lock for a long time:
8044          * - keep busy wait - have other users bounce off
8045          */
8046         spin_lock_irqsave(&dd->dc8051_lock, flags);
8047
8048         /* We can't send any commands to the 8051 if it's in reset */
8049         if (dd->dc_shutdown) {
8050                 return_code = -ENODEV;
8051                 goto fail;
8052         }
8053
8054         /*
8055          * If an 8051 host command timed out previously, then the 8051 is
8056          * stuck.
8057          *
8058          * On first timeout, attempt to reset and restart the entire DC
8059          * block (including 8051). (Is this too big of a hammer?)
8060          *
8061          * If the 8051 times out a second time, the reset did not bring it
8062          * back to healthy life. In that case, fail any subsequent commands.
8063          */
8064         if (dd->dc8051_timed_out) {
8065                 if (dd->dc8051_timed_out > 1) {
8066                         dd_dev_err(dd,
8067                                    "Previous 8051 host command timed out, skipping command %u\n",
8068                                    type);
8069                         return_code = -ENXIO;
8070                         goto fail;
8071                 }
8072                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8073                 dc_shutdown(dd);
8074                 dc_start(dd);
8075                 spin_lock_irqsave(&dd->dc8051_lock, flags);
8076         }
8077
8078         /*
8079          * If there is no timeout, then the 8051 command interface is
8080          * waiting for a command.
8081          */
8082
8083         /*
8084          * When writing a LCB CSR, out_data contains the full value to
8085          * to be written, while in_data contains the relative LCB
8086          * address in 7:0.  Do the work here, rather than the caller,
8087          * of distrubting the write data to where it needs to go:
8088          *
8089          * Write data
8090          *   39:00 -> in_data[47:8]
8091          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8092          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8093          */
8094         if (type == HCMD_WRITE_LCB_CSR) {
8095                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8096                 reg = ((((*out_data) >> 40) & 0xff) <<
8097                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8098                       | ((((*out_data) >> 48) & 0xffff) <<
8099                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8100                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8101         }
8102
8103         /*
8104          * Do two writes: the first to stabilize the type and req_data, the
8105          * second to activate.
8106          */
8107         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8108                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8109                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8110                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8111         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8112         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8113         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8114
8115         /* wait for completion, alternate: interrupt */
8116         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8117         while (1) {
8118                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8119                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8120                 if (completed)
8121                         break;
8122                 if (time_after(jiffies, timeout)) {
8123                         dd->dc8051_timed_out++;
8124                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8125                         if (out_data)
8126                                 *out_data = 0;
8127                         return_code = -ETIMEDOUT;
8128                         goto fail;
8129                 }
8130                 udelay(2);
8131         }
8132
8133         if (out_data) {
8134                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8135                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8136                 if (type == HCMD_READ_LCB_CSR) {
8137                         /* top 16 bits are in a different register */
8138                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8139                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8140                                 << (48
8141                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8142                 }
8143         }
8144         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8145                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8146         dd->dc8051_timed_out = 0;
8147         /*
8148          * Clear command for next user.
8149          */
8150         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8151
8152 fail:
8153         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8154
8155         return return_code;
8156 }
8157
8158 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8159 {
8160         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8161 }
8162
8163 static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8164                             u8 lane_id, u32 config_data)
8165 {
8166         u64 data;
8167         int ret;
8168
8169         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8170                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8171                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8172         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8173         if (ret != HCMD_SUCCESS) {
8174                 dd_dev_err(dd,
8175                         "load 8051 config: field id %d, lane %d, err %d\n",
8176                         (int)field_id, (int)lane_id, ret);
8177         }
8178         return ret;
8179 }
8180
8181 /*
8182  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8183  * set the result, even on error.
8184  * Return 0 on success, -errno on failure
8185  */
8186 static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8187                             u32 *result)
8188 {
8189         u64 big_data;
8190         u32 addr;
8191         int ret;
8192
8193         /* address start depends on the lane_id */
8194         if (lane_id < 4)
8195                 addr = (4 * NUM_GENERAL_FIELDS)
8196                         + (lane_id * 4 * NUM_LANE_FIELDS);
8197         else
8198                 addr = 0;
8199         addr += field_id * 4;
8200
8201         /* read is in 8-byte chunks, hardware will truncate the address down */
8202         ret = read_8051_data(dd, addr, 8, &big_data);
8203
8204         if (ret == 0) {
8205                 /* extract the 4 bytes we want */
8206                 if (addr & 0x4)
8207                         *result = (u32)(big_data >> 32);
8208                 else
8209                         *result = (u32)big_data;
8210         } else {
8211                 *result = 0;
8212                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8213                         __func__, lane_id, field_id);
8214         }
8215
8216         return ret;
8217 }
8218
8219 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8220                               u8 continuous)
8221 {
8222         u32 frame;
8223
8224         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8225                 | power_management << POWER_MANAGEMENT_SHIFT;
8226         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8227                                 GENERAL_CONFIG, frame);
8228 }
8229
8230 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8231                                  u16 vl15buf, u8 crc_sizes)
8232 {
8233         u32 frame;
8234
8235         frame = (u32)vau << VAU_SHIFT
8236                 | (u32)z << Z_SHIFT
8237                 | (u32)vcu << VCU_SHIFT
8238                 | (u32)vl15buf << VL15BUF_SHIFT
8239                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8240         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8241                                 GENERAL_CONFIG, frame);
8242 }
8243
8244 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8245                                      u8 *flag_bits, u16 *link_widths)
8246 {
8247         u32 frame;
8248
8249         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8250                                 &frame);
8251         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8252         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8253         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8254 }
8255
8256 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8257                                      u8 misc_bits,
8258                                      u8 flag_bits,
8259                                      u16 link_widths)
8260 {
8261         u32 frame;
8262
8263         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8264                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8265                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8266         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8267                      frame);
8268 }
8269
8270 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8271                                  u8 device_rev)
8272 {
8273         u32 frame;
8274
8275         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8276                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8277         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8278 }
8279
8280 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8281                                   u8 *device_rev)
8282 {
8283         u32 frame;
8284
8285         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8286         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8287         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8288                         & REMOTE_DEVICE_REV_MASK;
8289 }
8290
8291 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8292 {
8293         u32 frame;
8294
8295         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8296         *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8297         *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8298 }
8299
8300 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8301                                u8 *continuous)
8302 {
8303         u32 frame;
8304
8305         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8306         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8307                                         & POWER_MANAGEMENT_MASK;
8308         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8309                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8310 }
8311
8312 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8313                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8314 {
8315         u32 frame;
8316
8317         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8318         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8319         *z = (frame >> Z_SHIFT) & Z_MASK;
8320         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8321         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8322         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8323 }
8324
8325 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8326                                       u8 *remote_tx_rate,
8327                                       u16 *link_widths)
8328 {
8329         u32 frame;
8330
8331         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8332                                 &frame);
8333         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8334                                 & REMOTE_TX_RATE_MASK;
8335         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8336 }
8337
8338 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8339 {
8340         u32 frame;
8341
8342         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8343         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8344 }
8345
8346 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8347 {
8348         u32 frame;
8349
8350         read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8351         *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8352 }
8353
8354 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8355 {
8356         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8357 }
8358
8359 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8360 {
8361         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8362 }
8363
8364 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8365 {
8366         u32 frame;
8367         int ret;
8368
8369         *link_quality = 0;
8370         if (dd->pport->host_link_state & HLS_UP) {
8371                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8372                                         &frame);
8373                 if (ret == 0)
8374                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
8375                                                 & LINK_QUALITY_MASK;
8376         }
8377 }
8378
8379 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8380 {
8381         u32 frame;
8382
8383         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8384         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8385 }
8386
8387 static int read_tx_settings(struct hfi1_devdata *dd,
8388                             u8 *enable_lane_tx,
8389                             u8 *tx_polarity_inversion,
8390                             u8 *rx_polarity_inversion,
8391                             u8 *max_rate)
8392 {
8393         u32 frame;
8394         int ret;
8395
8396         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8397         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8398                                 & ENABLE_LANE_TX_MASK;
8399         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8400                                 & TX_POLARITY_INVERSION_MASK;
8401         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8402                                 & RX_POLARITY_INVERSION_MASK;
8403         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8404         return ret;
8405 }
8406
8407 static int write_tx_settings(struct hfi1_devdata *dd,
8408                              u8 enable_lane_tx,
8409                              u8 tx_polarity_inversion,
8410                              u8 rx_polarity_inversion,
8411                              u8 max_rate)
8412 {
8413         u32 frame;
8414
8415         /* no need to mask, all variable sizes match field widths */
8416         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8417                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8418                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8419                 | max_rate << MAX_RATE_SHIFT;
8420         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8421 }
8422
8423 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8424 {
8425         u32 frame, version, prod_id;
8426         int ret, lane;
8427
8428         /* 4 lanes */
8429         for (lane = 0; lane < 4; lane++) {
8430                 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8431                 if (ret) {
8432                         dd_dev_err(
8433                                 dd,
8434                                 "Unable to read lane %d firmware details\n",
8435                                 lane);
8436                         continue;
8437                 }
8438                 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8439                                         & SPICO_ROM_VERSION_MASK;
8440                 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8441                                         & SPICO_ROM_PROD_ID_MASK;
8442                 dd_dev_info(dd,
8443                         "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8444                         lane, version, prod_id);
8445         }
8446 }
8447
8448 /*
8449  * Read an idle LCB message.
8450  *
8451  * Returns 0 on success, -EINVAL on error
8452  */
8453 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8454 {
8455         int ret;
8456
8457         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8458                 type, data_out);
8459         if (ret != HCMD_SUCCESS) {
8460                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8461                         (u32)type, ret);
8462                 return -EINVAL;
8463         }
8464         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8465         /* return only the payload as we already know the type */
8466         *data_out >>= IDLE_PAYLOAD_SHIFT;
8467         return 0;
8468 }
8469
8470 /*
8471  * Read an idle SMA message.  To be done in response to a notification from
8472  * the 8051.
8473  *
8474  * Returns 0 on success, -EINVAL on error
8475  */
8476 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8477 {
8478         return read_idle_message(dd,
8479                         (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8480 }
8481
8482 /*
8483  * Send an idle LCB message.
8484  *
8485  * Returns 0 on success, -EINVAL on error
8486  */
8487 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8488 {
8489         int ret;
8490
8491         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8492         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8493         if (ret != HCMD_SUCCESS) {
8494                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8495                         data, ret);
8496                 return -EINVAL;
8497         }
8498         return 0;
8499 }
8500
8501 /*
8502  * Send an idle SMA message.
8503  *
8504  * Returns 0 on success, -EINVAL on error
8505  */
8506 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8507 {
8508         u64 data;
8509
8510         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8511                 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8512         return send_idle_message(dd, data);
8513 }
8514
8515 /*
8516  * Initialize the LCB then do a quick link up.  This may or may not be
8517  * in loopback.
8518  *
8519  * return 0 on success, -errno on error
8520  */
8521 static int do_quick_linkup(struct hfi1_devdata *dd)
8522 {
8523         u64 reg;
8524         unsigned long timeout;
8525         int ret;
8526
8527         lcb_shutdown(dd, 0);
8528
8529         if (loopback) {
8530                 /* LCB_CFG_LOOPBACK.VAL = 2 */
8531                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8532                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8533                         IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8534                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8535         }
8536
8537         /* start the LCBs */
8538         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8539         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8540
8541         /* simulator only loopback steps */
8542         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8543                 /* LCB_CFG_RUN.EN = 1 */
8544                 write_csr(dd, DC_LCB_CFG_RUN,
8545                         1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8546
8547                 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8548                 timeout = jiffies + msecs_to_jiffies(10);
8549                 while (1) {
8550                         reg = read_csr(dd,
8551                                 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8552                         if (reg)
8553                                 break;
8554                         if (time_after(jiffies, timeout)) {
8555                                 dd_dev_err(dd,
8556                                         "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8557                                 return -ETIMEDOUT;
8558                         }
8559                         udelay(2);
8560                 }
8561
8562                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8563                         1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8564         }
8565
8566         if (!loopback) {
8567                 /*
8568                  * When doing quick linkup and not in loopback, both
8569                  * sides must be done with LCB set-up before either
8570                  * starts the quick linkup.  Put a delay here so that
8571                  * both sides can be started and have a chance to be
8572                  * done with LCB set up before resuming.
8573                  */
8574                 dd_dev_err(dd,
8575                         "Pausing for peer to be finished with LCB set up\n");
8576                 msleep(5000);
8577                 dd_dev_err(dd,
8578                         "Continuing with quick linkup\n");
8579         }
8580
8581         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8582         set_8051_lcb_access(dd);
8583
8584         /*
8585          * State "quick" LinkUp request sets the physical link state to
8586          * LinkUp without a verify capability sequence.
8587          * This state is in simulator v37 and later.
8588          */
8589         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8590         if (ret != HCMD_SUCCESS) {
8591                 dd_dev_err(dd,
8592                         "%s: set physical link state to quick LinkUp failed with return %d\n",
8593                         __func__, ret);
8594
8595                 set_host_lcb_access(dd);
8596                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8597
8598                 if (ret >= 0)
8599                         ret = -EINVAL;
8600                 return ret;
8601         }
8602
8603         return 0; /* success */
8604 }
8605
8606 /*
8607  * Set the SerDes to internal loopback mode.
8608  * Returns 0 on success, -errno on error.
8609  */
8610 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8611 {
8612         int ret;
8613
8614         ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8615         if (ret == HCMD_SUCCESS)
8616                 return 0;
8617         dd_dev_err(dd,
8618                 "Set physical link state to SerDes Loopback failed with return %d\n",
8619                 ret);
8620         if (ret >= 0)
8621                 ret = -EINVAL;
8622         return ret;
8623 }
8624
8625 /*
8626  * Do all special steps to set up loopback.
8627  */
8628 static int init_loopback(struct hfi1_devdata *dd)
8629 {
8630         dd_dev_info(dd, "Entering loopback mode\n");
8631
8632         /* all loopbacks should disable self GUID check */
8633         write_csr(dd, DC_DC8051_CFG_MODE,
8634                 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8635
8636         /*
8637          * The simulator has only one loopback option - LCB.  Switch
8638          * to that option, which includes quick link up.
8639          *
8640          * Accept all valid loopback values.
8641          */
8642         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8643                 && (loopback == LOOPBACK_SERDES
8644                         || loopback == LOOPBACK_LCB
8645                         || loopback == LOOPBACK_CABLE)) {
8646                 loopback = LOOPBACK_LCB;
8647                 quick_linkup = 1;
8648                 return 0;
8649         }
8650
8651         /* handle serdes loopback */
8652         if (loopback == LOOPBACK_SERDES) {
8653                 /* internal serdes loopack needs quick linkup on RTL */
8654                 if (dd->icode == ICODE_RTL_SILICON)
8655                         quick_linkup = 1;
8656                 return set_serdes_loopback_mode(dd);
8657         }
8658
8659         /* LCB loopback - handled at poll time */
8660         if (loopback == LOOPBACK_LCB) {
8661                 quick_linkup = 1; /* LCB is always quick linkup */
8662
8663                 /* not supported in emulation due to emulation RTL changes */
8664                 if (dd->icode == ICODE_FPGA_EMULATION) {
8665                         dd_dev_err(dd,
8666                                 "LCB loopback not supported in emulation\n");
8667                         return -EINVAL;
8668                 }
8669                 return 0;
8670         }
8671
8672         /* external cable loopback requires no extra steps */
8673         if (loopback == LOOPBACK_CABLE)
8674                 return 0;
8675
8676         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8677         return -EINVAL;
8678 }
8679
8680 /*
8681  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8682  * used in the Verify Capability link width attribute.
8683  */
8684 static u16 opa_to_vc_link_widths(u16 opa_widths)
8685 {
8686         int i;
8687         u16 result = 0;
8688
8689         static const struct link_bits {
8690                 u16 from;
8691                 u16 to;
8692         } opa_link_xlate[] = {
8693                 { OPA_LINK_WIDTH_1X, 1 << (1-1)  },
8694                 { OPA_LINK_WIDTH_2X, 1 << (2-1)  },
8695                 { OPA_LINK_WIDTH_3X, 1 << (3-1)  },
8696                 { OPA_LINK_WIDTH_4X, 1 << (4-1)  },
8697         };
8698
8699         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8700                 if (opa_widths & opa_link_xlate[i].from)
8701                         result |= opa_link_xlate[i].to;
8702         }
8703         return result;
8704 }
8705
8706 /*
8707  * Set link attributes before moving to polling.
8708  */
8709 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8710 {
8711         struct hfi1_devdata *dd = ppd->dd;
8712         u8 enable_lane_tx;
8713         u8 tx_polarity_inversion;
8714         u8 rx_polarity_inversion;
8715         int ret;
8716
8717         /* reset our fabric serdes to clear any lingering problems */
8718         fabric_serdes_reset(dd);
8719
8720         /* set the local tx rate - need to read-modify-write */
8721         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8722                 &rx_polarity_inversion, &ppd->local_tx_rate);
8723         if (ret)
8724                 goto set_local_link_attributes_fail;
8725
8726         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8727                 /* set the tx rate to the fastest enabled */
8728                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8729                         ppd->local_tx_rate = 1;
8730                 else
8731                         ppd->local_tx_rate = 0;
8732         } else {
8733                 /* set the tx rate to all enabled */
8734                 ppd->local_tx_rate = 0;
8735                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8736                         ppd->local_tx_rate |= 2;
8737                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8738                         ppd->local_tx_rate |= 1;
8739         }
8740
8741         enable_lane_tx = 0xF; /* enable all four lanes */
8742         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8743                      rx_polarity_inversion, ppd->local_tx_rate);
8744         if (ret != HCMD_SUCCESS)
8745                 goto set_local_link_attributes_fail;
8746
8747         /*
8748          * DC supports continuous updates.
8749          */
8750         ret = write_vc_local_phy(dd, 0 /* no power management */,
8751                                      1 /* continuous updates */);
8752         if (ret != HCMD_SUCCESS)
8753                 goto set_local_link_attributes_fail;
8754
8755         /* z=1 in the next call: AU of 0 is not supported by the hardware */
8756         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8757                                     ppd->port_crc_mode_enabled);
8758         if (ret != HCMD_SUCCESS)
8759                 goto set_local_link_attributes_fail;
8760
8761         ret = write_vc_local_link_width(dd, 0, 0,
8762                      opa_to_vc_link_widths(ppd->link_width_enabled));
8763         if (ret != HCMD_SUCCESS)
8764                 goto set_local_link_attributes_fail;
8765
8766         /* let peer know who we are */
8767         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8768         if (ret == HCMD_SUCCESS)
8769                 return 0;
8770
8771 set_local_link_attributes_fail:
8772         dd_dev_err(dd,
8773                 "Failed to set local link attributes, return 0x%x\n",
8774                 ret);
8775         return ret;
8776 }
8777
8778 /*
8779  * Call this to start the link.  Schedule a retry if the cable is not
8780  * present or if unable to start polling.  Do not do anything if the
8781  * link is disabled.  Returns 0 if link is disabled or moved to polling
8782  */
8783 int start_link(struct hfi1_pportdata *ppd)
8784 {
8785         if (!ppd->link_enabled) {
8786                 dd_dev_info(ppd->dd,
8787                         "%s: stopping link start because link is disabled\n",
8788                         __func__);
8789                 return 0;
8790         }
8791         if (!ppd->driver_link_ready) {
8792                 dd_dev_info(ppd->dd,
8793                         "%s: stopping link start because driver is not ready\n",
8794                         __func__);
8795                 return 0;
8796         }
8797
8798         if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8799                         loopback == LOOPBACK_LCB ||
8800                         ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8801                 return set_link_state(ppd, HLS_DN_POLL);
8802
8803         dd_dev_info(ppd->dd,
8804                 "%s: stopping link start because no cable is present\n",
8805                 __func__);
8806         return -EAGAIN;
8807 }
8808
8809 static void reset_qsfp(struct hfi1_pportdata *ppd)
8810 {
8811         struct hfi1_devdata *dd = ppd->dd;
8812         u64 mask, qsfp_mask;
8813
8814         mask = (u64)QSFP_HFI0_RESET_N;
8815         qsfp_mask = read_csr(dd,
8816                 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
8817         qsfp_mask |= mask;
8818         write_csr(dd,
8819                 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
8820                 qsfp_mask);
8821
8822         qsfp_mask = read_csr(dd,
8823                 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
8824         qsfp_mask &= ~mask;
8825         write_csr(dd,
8826                 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8827                 qsfp_mask);
8828
8829         udelay(10);
8830
8831         qsfp_mask |= mask;
8832         write_csr(dd,
8833                 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8834                 qsfp_mask);
8835 }
8836
8837 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8838                                         u8 *qsfp_interrupt_status)
8839 {
8840         struct hfi1_devdata *dd = ppd->dd;
8841
8842         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8843                 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8844                 dd_dev_info(dd,
8845                         "%s: QSFP cable on fire\n",
8846                         __func__);
8847
8848         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8849                 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8850                 dd_dev_info(dd,
8851                         "%s: QSFP cable temperature too low\n",
8852                         __func__);
8853
8854         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8855                 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8856                 dd_dev_info(dd,
8857                         "%s: QSFP supply voltage too high\n",
8858                         __func__);
8859
8860         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8861                 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8862                 dd_dev_info(dd,
8863                         "%s: QSFP supply voltage too low\n",
8864                         __func__);
8865
8866         /* Byte 2 is vendor specific */
8867
8868         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
8869                 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
8870                 dd_dev_info(dd,
8871                         "%s: Cable RX channel 1/2 power too high\n",
8872                         __func__);
8873
8874         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
8875                 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
8876                 dd_dev_info(dd,
8877                         "%s: Cable RX channel 1/2 power too low\n",
8878                         __func__);
8879
8880         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
8881                 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
8882                 dd_dev_info(dd,
8883                         "%s: Cable RX channel 3/4 power too high\n",
8884                         __func__);
8885
8886         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
8887                 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
8888                 dd_dev_info(dd,
8889                         "%s: Cable RX channel 3/4 power too low\n",
8890                         __func__);
8891
8892         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
8893                 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
8894                 dd_dev_info(dd,
8895                         "%s: Cable TX channel 1/2 bias too high\n",
8896                         __func__);
8897
8898         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
8899                 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
8900                 dd_dev_info(dd,
8901                         "%s: Cable TX channel 1/2 bias too low\n",
8902                         __func__);
8903
8904         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
8905                 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
8906                 dd_dev_info(dd,
8907                         "%s: Cable TX channel 3/4 bias too high\n",
8908                         __func__);
8909
8910         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
8911                 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
8912                 dd_dev_info(dd,
8913                         "%s: Cable TX channel 3/4 bias too low\n",
8914                         __func__);
8915
8916         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
8917                 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
8918                 dd_dev_info(dd,
8919                         "%s: Cable TX channel 1/2 power too high\n",
8920                         __func__);
8921
8922         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
8923                 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
8924                 dd_dev_info(dd,
8925                         "%s: Cable TX channel 1/2 power too low\n",
8926                         __func__);
8927
8928         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
8929                 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
8930                 dd_dev_info(dd,
8931                         "%s: Cable TX channel 3/4 power too high\n",
8932                         __func__);
8933
8934         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
8935                 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
8936                 dd_dev_info(dd,
8937                         "%s: Cable TX channel 3/4 power too low\n",
8938                         __func__);
8939
8940         /* Bytes 9-10 and 11-12 are reserved */
8941         /* Bytes 13-15 are vendor specific */
8942
8943         return 0;
8944 }
8945
8946 static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
8947 {
8948         refresh_qsfp_cache(ppd, &ppd->qsfp_info);
8949
8950         return 0;
8951 }
8952
8953 static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
8954 {
8955         struct hfi1_devdata *dd = ppd->dd;
8956         u8 qsfp_interrupt_status = 0;
8957
8958         if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
8959                 != 1) {
8960                 dd_dev_info(dd,
8961                         "%s: Failed to read status of QSFP module\n",
8962                         __func__);
8963                 return -EIO;
8964         }
8965
8966         /* We don't care about alarms & warnings with a non-functional INT_N */
8967         if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
8968                 do_pre_lni_host_behaviors(ppd);
8969
8970         return 0;
8971 }
8972
8973 /* This routine will only be scheduled if the QSFP module is present */
8974 static void qsfp_event(struct work_struct *work)
8975 {
8976         struct qsfp_data *qd;
8977         struct hfi1_pportdata *ppd;
8978         struct hfi1_devdata *dd;
8979
8980         qd = container_of(work, struct qsfp_data, qsfp_work);
8981         ppd = qd->ppd;
8982         dd = ppd->dd;
8983
8984         /* Sanity check */
8985         if (!qsfp_mod_present(ppd))
8986                 return;
8987
8988         /*
8989          * Turn DC back on after cables has been
8990          * re-inserted. Up until now, the DC has been in
8991          * reset to save power.
8992          */
8993         dc_start(dd);
8994
8995         if (qd->cache_refresh_required) {
8996                 msleep(3000);
8997                 reset_qsfp(ppd);
8998
8999                 /* Check for QSFP interrupt after t_init (SFF 8679)
9000                  * + extra
9001                  */
9002                 msleep(3000);
9003                 if (!qd->qsfp_interrupt_functional) {
9004                         if (do_qsfp_intr_fallback(ppd) < 0)
9005                                 dd_dev_info(dd, "%s: QSFP fallback failed\n",
9006                                         __func__);
9007                         ppd->driver_link_ready = 1;
9008                         start_link(ppd);
9009                 }
9010         }
9011
9012         if (qd->check_interrupt_flags) {
9013                 u8 qsfp_interrupt_status[16] = {0,};
9014
9015                 if (qsfp_read(ppd, dd->hfi1_id, 6,
9016                               &qsfp_interrupt_status[0], 16) != 16) {
9017                         dd_dev_info(dd,
9018                                 "%s: Failed to read status of QSFP module\n",
9019                                 __func__);
9020                 } else {
9021                         unsigned long flags;
9022                         u8 data_status;
9023
9024                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9025                         ppd->qsfp_info.check_interrupt_flags = 0;
9026                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9027                                                                 flags);
9028
9029                         if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
9030                                  != 1) {
9031                                 dd_dev_info(dd,
9032                                 "%s: Failed to read status of QSFP module\n",
9033                                         __func__);
9034                         }
9035                         if (!(data_status & QSFP_DATA_NOT_READY)) {
9036                                 do_pre_lni_host_behaviors(ppd);
9037                                 start_link(ppd);
9038                         } else
9039                                 handle_qsfp_error_conditions(ppd,
9040                                                 qsfp_interrupt_status);
9041                 }
9042         }
9043 }
9044
9045 void init_qsfp(struct hfi1_pportdata *ppd)
9046 {
9047         struct hfi1_devdata *dd = ppd->dd;
9048         u64 qsfp_mask;
9049
9050         if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9051                         ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9052                 ppd->driver_link_ready = 1;
9053                 return;
9054         }
9055
9056         ppd->qsfp_info.ppd = ppd;
9057         INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
9058
9059         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9060         /* Clear current status to avoid spurious interrupts */
9061         write_csr(dd,
9062                         dd->hfi1_id ?
9063                                 ASIC_QSFP2_CLEAR :
9064                                 ASIC_QSFP1_CLEAR,
9065                 qsfp_mask);
9066
9067         /* Handle active low nature of INT_N and MODPRST_N pins */
9068         if (qsfp_mod_present(ppd))
9069                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9070         write_csr(dd,
9071                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9072                   qsfp_mask);
9073
9074         /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
9075         qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
9076         write_csr(dd,
9077                 dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9078                 qsfp_mask);
9079
9080         if (qsfp_mod_present(ppd)) {
9081                 msleep(3000);
9082                 reset_qsfp(ppd);
9083
9084                 /* Check for QSFP interrupt after t_init (SFF 8679)
9085                  * + extra
9086                  */
9087                 msleep(3000);
9088                 if (!ppd->qsfp_info.qsfp_interrupt_functional) {
9089                         if (do_qsfp_intr_fallback(ppd) < 0)
9090                                 dd_dev_info(dd,
9091                                         "%s: QSFP fallback failed\n",
9092                                         __func__);
9093                         ppd->driver_link_ready = 1;
9094                 }
9095         }
9096 }
9097
9098 /*
9099  * Do a one-time initialize of the LCB block.
9100  */
9101 static void init_lcb(struct hfi1_devdata *dd)
9102 {
9103         /* the DC has been reset earlier in the driver load */
9104
9105         /* set LCB for cclk loopback on the port */
9106         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9107         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9108         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9109         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9110         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9111         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9112         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9113 }
9114
9115 int bringup_serdes(struct hfi1_pportdata *ppd)
9116 {
9117         struct hfi1_devdata *dd = ppd->dd;
9118         u64 guid;
9119         int ret;
9120
9121         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9122                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9123
9124         guid = ppd->guid;
9125         if (!guid) {
9126                 if (dd->base_guid)
9127                         guid = dd->base_guid + ppd->port - 1;
9128                 ppd->guid = guid;
9129         }
9130
9131         /* the link defaults to enabled */
9132         ppd->link_enabled = 1;
9133         /* Set linkinit_reason on power up per OPA spec */
9134         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9135
9136         /* one-time init of the LCB */
9137         init_lcb(dd);
9138
9139         if (loopback) {
9140                 ret = init_loopback(dd);
9141                 if (ret < 0)
9142                         return ret;
9143         }
9144
9145         return start_link(ppd);
9146 }
9147
9148 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9149 {
9150         struct hfi1_devdata *dd = ppd->dd;
9151
9152         /*
9153          * Shut down the link and keep it down.   First turn off that the
9154          * driver wants to allow the link to be up (driver_link_ready).
9155          * Then make sure the link is not automatically restarted
9156          * (link_enabled).  Cancel any pending restart.  And finally
9157          * go offline.
9158          */
9159         ppd->driver_link_ready = 0;
9160         ppd->link_enabled = 0;
9161
9162         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9163           OPA_LINKDOWN_REASON_SMA_DISABLED);
9164         set_link_state(ppd, HLS_DN_OFFLINE);
9165
9166         /* disable the port */
9167         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9168 }
9169
9170 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9171 {
9172         struct hfi1_pportdata *ppd;
9173         int i;
9174
9175         ppd = (struct hfi1_pportdata *)(dd + 1);
9176         for (i = 0; i < dd->num_pports; i++, ppd++) {
9177                 ppd->ibport_data.rc_acks = NULL;
9178                 ppd->ibport_data.rc_qacks = NULL;
9179                 ppd->ibport_data.rc_acks = alloc_percpu(u64);
9180                 ppd->ibport_data.rc_qacks = alloc_percpu(u64);
9181                 ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
9182                 if ((ppd->ibport_data.rc_acks == NULL) ||
9183                     (ppd->ibport_data.rc_delayed_comp == NULL) ||
9184                     (ppd->ibport_data.rc_qacks == NULL))
9185                         return -ENOMEM;
9186         }
9187
9188         return 0;
9189 }
9190
9191 static const char * const pt_names[] = {
9192         "expected",
9193         "eager",
9194         "invalid"
9195 };
9196
9197 static const char *pt_name(u32 type)
9198 {
9199         return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9200 }
9201
9202 /*
9203  * index is the index into the receive array
9204  */
9205 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9206                   u32 type, unsigned long pa, u16 order)
9207 {
9208         u64 reg;
9209         void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9210                               (dd->kregbase + RCV_ARRAY));
9211
9212         if (!(dd->flags & HFI1_PRESENT))
9213                 goto done;
9214
9215         if (type == PT_INVALID) {
9216                 pa = 0;
9217         } else if (type > PT_INVALID) {
9218                 dd_dev_err(dd,
9219                         "unexpected receive array type %u for index %u, not handled\n",
9220                         type, index);
9221                 goto done;
9222         }
9223
9224         hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9225                   pt_name(type), index, pa, (unsigned long)order);
9226
9227 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9228         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9229                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9230                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9231                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9232         writeq(reg, base + (index * 8));
9233
9234         if (type == PT_EAGER)
9235                 /*
9236                  * Eager entries are written one-by-one so we have to push them
9237                  * after we write the entry.
9238                  */
9239                 flush_wc();
9240 done:
9241         return;
9242 }
9243
9244 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9245 {
9246         struct hfi1_devdata *dd = rcd->dd;
9247         u32 i;
9248
9249         /* this could be optimized */
9250         for (i = rcd->eager_base; i < rcd->eager_base +
9251                      rcd->egrbufs.alloced; i++)
9252                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9253
9254         for (i = rcd->expected_base;
9255                         i < rcd->expected_base + rcd->expected_count; i++)
9256                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9257 }
9258
9259 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9260                         struct hfi1_ctxt_info *kinfo)
9261 {
9262         kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9263                 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9264         return 0;
9265 }
9266
9267 struct hfi1_message_header *hfi1_get_msgheader(
9268                                 struct hfi1_devdata *dd, __le32 *rhf_addr)
9269 {
9270         u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9271
9272         return (struct hfi1_message_header *)
9273                 (rhf_addr - dd->rhf_offset + offset);
9274 }
9275
9276 static const char * const ib_cfg_name_strings[] = {
9277         "HFI1_IB_CFG_LIDLMC",
9278         "HFI1_IB_CFG_LWID_DG_ENB",
9279         "HFI1_IB_CFG_LWID_ENB",
9280         "HFI1_IB_CFG_LWID",
9281         "HFI1_IB_CFG_SPD_ENB",
9282         "HFI1_IB_CFG_SPD",
9283         "HFI1_IB_CFG_RXPOL_ENB",
9284         "HFI1_IB_CFG_LREV_ENB",
9285         "HFI1_IB_CFG_LINKLATENCY",
9286         "HFI1_IB_CFG_HRTBT",
9287         "HFI1_IB_CFG_OP_VLS",
9288         "HFI1_IB_CFG_VL_HIGH_CAP",
9289         "HFI1_IB_CFG_VL_LOW_CAP",
9290         "HFI1_IB_CFG_OVERRUN_THRESH",
9291         "HFI1_IB_CFG_PHYERR_THRESH",
9292         "HFI1_IB_CFG_LINKDEFAULT",
9293         "HFI1_IB_CFG_PKEYS",
9294         "HFI1_IB_CFG_MTU",
9295         "HFI1_IB_CFG_LSTATE",
9296         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9297         "HFI1_IB_CFG_PMA_TICKS",
9298         "HFI1_IB_CFG_PORT"
9299 };
9300
9301 static const char *ib_cfg_name(int which)
9302 {
9303         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9304                 return "invalid";
9305         return ib_cfg_name_strings[which];
9306 }
9307
9308 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9309 {
9310         struct hfi1_devdata *dd = ppd->dd;
9311         int val = 0;
9312
9313         switch (which) {
9314         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9315                 val = ppd->link_width_enabled;
9316                 break;
9317         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9318                 val = ppd->link_width_active;
9319                 break;
9320         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9321                 val = ppd->link_speed_enabled;
9322                 break;
9323         case HFI1_IB_CFG_SPD: /* current Link speed */
9324                 val = ppd->link_speed_active;
9325                 break;
9326
9327         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9328         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9329         case HFI1_IB_CFG_LINKLATENCY:
9330                 goto unimplemented;
9331
9332         case HFI1_IB_CFG_OP_VLS:
9333                 val = ppd->vls_operational;
9334                 break;
9335         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9336                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9337                 break;
9338         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9339                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9340                 break;
9341         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9342                 val = ppd->overrun_threshold;
9343                 break;
9344         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9345                 val = ppd->phy_error_threshold;
9346                 break;
9347         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9348                 val = dd->link_default;
9349                 break;
9350
9351         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9352         case HFI1_IB_CFG_PMA_TICKS:
9353         default:
9354 unimplemented:
9355                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9356                         dd_dev_info(
9357                                 dd,
9358                                 "%s: which %s: not implemented\n",
9359                                 __func__,
9360                                 ib_cfg_name(which));
9361                 break;
9362         }
9363
9364         return val;
9365 }
9366
9367 /*
9368  * The largest MAD packet size.
9369  */
9370 #define MAX_MAD_PACKET 2048
9371
9372 /*
9373  * Return the maximum header bytes that can go on the _wire_
9374  * for this device. This count includes the ICRC which is
9375  * not part of the packet held in memory but it is appended
9376  * by the HW.
9377  * This is dependent on the device's receive header entry size.
9378  * HFI allows this to be set per-receive context, but the
9379  * driver presently enforces a global value.
9380  */
9381 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9382 {
9383         /*
9384          * The maximum non-payload (MTU) bytes in LRH.PktLen are
9385          * the Receive Header Entry Size minus the PBC (or RHF) size
9386          * plus one DW for the ICRC appended by HW.
9387          *
9388          * dd->rcd[0].rcvhdrqentsize is in DW.
9389          * We use rcd[0] as all context will have the same value. Also,
9390          * the first kernel context would have been allocated by now so
9391          * we are guaranteed a valid value.
9392          */
9393         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9394 }
9395
9396 /*
9397  * Set Send Length
9398  * @ppd - per port data
9399  *
9400  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9401  * registers compare against LRH.PktLen, so use the max bytes included
9402  * in the LRH.
9403  *
9404  * This routine changes all VL values except VL15, which it maintains at
9405  * the same value.
9406  */
9407 static void set_send_length(struct hfi1_pportdata *ppd)
9408 {
9409         struct hfi1_devdata *dd = ppd->dd;
9410         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9411         u32 maxvlmtu = dd->vld[15].mtu;
9412         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9413                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9414                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9415         int i;
9416
9417         for (i = 0; i < ppd->vls_supported; i++) {
9418                 if (dd->vld[i].mtu > maxvlmtu)
9419                         maxvlmtu = dd->vld[i].mtu;
9420                 if (i <= 3)
9421                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9422                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9423                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9424                 else
9425                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9426                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9427                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9428         }
9429         write_csr(dd, SEND_LEN_CHECK0, len1);
9430         write_csr(dd, SEND_LEN_CHECK1, len2);
9431         /* adjust kernel credit return thresholds based on new MTUs */
9432         /* all kernel receive contexts have the same hdrqentsize */
9433         for (i = 0; i < ppd->vls_supported; i++) {
9434                 sc_set_cr_threshold(dd->vld[i].sc,
9435                         sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9436                                 dd->rcd[0]->rcvhdrqentsize));
9437         }
9438         sc_set_cr_threshold(dd->vld[15].sc,
9439                 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9440                         dd->rcd[0]->rcvhdrqentsize));
9441
9442         /* Adjust maximum MTU for the port in DC */
9443         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9444                 (ilog2(maxvlmtu >> 8) + 1);
9445         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9446         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9447         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9448                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9449         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9450 }
9451
9452 static void set_lidlmc(struct hfi1_pportdata *ppd)
9453 {
9454         int i;
9455         u64 sreg = 0;
9456         struct hfi1_devdata *dd = ppd->dd;
9457         u32 mask = ~((1U << ppd->lmc) - 1);
9458         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9459
9460         if (dd->hfi1_snoop.mode_flag)
9461                 dd_dev_info(dd, "Set lid/lmc while snooping");
9462
9463         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9464                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9465         c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9466                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9467               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9468                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9469         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9470
9471         /*
9472          * Iterate over all the send contexts and set their SLID check
9473          */
9474         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9475                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9476                (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9477                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9478
9479         for (i = 0; i < dd->chip_send_contexts; i++) {
9480                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9481                           i, (u32)sreg);
9482                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9483         }
9484
9485         /* Now we have to do the same thing for the sdma engines */
9486         sdma_update_lmc(dd, mask, ppd->lid);
9487 }
9488
9489 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9490 {
9491         unsigned long timeout;
9492         u32 curr_state;
9493
9494         timeout = jiffies + msecs_to_jiffies(msecs);
9495         while (1) {
9496                 curr_state = read_physical_state(dd);
9497                 if (curr_state == state)
9498                         break;
9499                 if (time_after(jiffies, timeout)) {
9500                         dd_dev_err(dd,
9501                                 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9502                                 state, curr_state);
9503                         return -ETIMEDOUT;
9504                 }
9505                 usleep_range(1950, 2050); /* sleep 2ms-ish */
9506         }
9507
9508         return 0;
9509 }
9510
9511 /*
9512  * Helper for set_link_state().  Do not call except from that routine.
9513  * Expects ppd->hls_mutex to be held.
9514  *
9515  * @rem_reason value to be sent to the neighbor
9516  *
9517  * LinkDownReasons only set if transition succeeds.
9518  */
9519 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9520 {
9521         struct hfi1_devdata *dd = ppd->dd;
9522         u32 pstate, previous_state;
9523         u32 last_local_state;
9524         u32 last_remote_state;
9525         int ret;
9526         int do_transition;
9527         int do_wait;
9528
9529         previous_state = ppd->host_link_state;
9530         ppd->host_link_state = HLS_GOING_OFFLINE;
9531         pstate = read_physical_state(dd);
9532         if (pstate == PLS_OFFLINE) {
9533                 do_transition = 0;      /* in right state */
9534                 do_wait = 0;            /* ...no need to wait */
9535         } else if ((pstate & 0xff) == PLS_OFFLINE) {
9536                 do_transition = 0;      /* in an offline transient state */
9537                 do_wait = 1;            /* ...wait for it to settle */
9538         } else {
9539                 do_transition = 1;      /* need to move to offline */
9540                 do_wait = 1;            /* ...will need to wait */
9541         }
9542
9543         if (do_transition) {
9544                 ret = set_physical_link_state(dd,
9545                         PLS_OFFLINE | (rem_reason << 8));
9546
9547                 if (ret != HCMD_SUCCESS) {
9548                         dd_dev_err(dd,
9549                                 "Failed to transition to Offline link state, return %d\n",
9550                                 ret);
9551                         return -EINVAL;
9552                 }
9553                 if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
9554                         ppd->offline_disabled_reason =
9555                         OPA_LINKDOWN_REASON_TRANSIENT;
9556         }
9557
9558         if (do_wait) {
9559                 /* it can take a while for the link to go down */
9560                 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9561                 if (ret < 0)
9562                         return ret;
9563         }
9564
9565         /* make sure the logical state is also down */
9566         wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9567
9568         /*
9569          * Now in charge of LCB - must be after the physical state is
9570          * offline.quiet and before host_link_state is changed.
9571          */
9572         set_host_lcb_access(dd);
9573         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9574         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9575
9576         /*
9577          * The LNI has a mandatory wait time after the physical state
9578          * moves to Offline.Quiet.  The wait time may be different
9579          * depending on how the link went down.  The 8051 firmware
9580          * will observe the needed wait time and only move to ready
9581          * when that is completed.  The largest of the quiet timeouts
9582          * is 6s, so wait that long and then at least 0.5s more for
9583          * other transitions, and another 0.5s for a buffer.
9584          */
9585         ret = wait_fm_ready(dd, 7000);
9586         if (ret) {
9587                 dd_dev_err(dd,
9588                         "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9589                 /* state is really offline, so make it so */
9590                 ppd->host_link_state = HLS_DN_OFFLINE;
9591                 return ret;
9592         }
9593
9594         /*
9595          * The state is now offline and the 8051 is ready to accept host
9596          * requests.
9597          *      - change our state
9598          *      - notify others if we were previously in a linkup state
9599          */
9600         ppd->host_link_state = HLS_DN_OFFLINE;
9601         if (previous_state & HLS_UP) {
9602                 /* went down while link was up */
9603                 handle_linkup_change(dd, 0);
9604         } else if (previous_state
9605                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9606                 /* went down while attempting link up */
9607                 /* byte 1 of last_*_state is the failure reason */
9608                 read_last_local_state(dd, &last_local_state);
9609                 read_last_remote_state(dd, &last_remote_state);
9610                 dd_dev_err(dd,
9611                         "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9612                         last_local_state, last_remote_state);
9613         }
9614
9615         /* the active link width (downgrade) is 0 on link down */
9616         ppd->link_width_active = 0;
9617         ppd->link_width_downgrade_tx_active = 0;
9618         ppd->link_width_downgrade_rx_active = 0;
9619         ppd->current_egress_rate = 0;
9620         return 0;
9621 }
9622
9623 /* return the link state name */
9624 static const char *link_state_name(u32 state)
9625 {
9626         const char *name;
9627         int n = ilog2(state);
9628         static const char * const names[] = {
9629                 [__HLS_UP_INIT_BP]       = "INIT",
9630                 [__HLS_UP_ARMED_BP]      = "ARMED",
9631                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
9632                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
9633                 [__HLS_DN_POLL_BP]       = "POLL",
9634                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
9635                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
9636                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
9637                 [__HLS_GOING_UP_BP]      = "GOING_UP",
9638                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9639                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9640         };
9641
9642         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9643         return name ? name : "unknown";
9644 }
9645
9646 /* return the link state reason name */
9647 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9648 {
9649         if (state == HLS_UP_INIT) {
9650                 switch (ppd->linkinit_reason) {
9651                 case OPA_LINKINIT_REASON_LINKUP:
9652                         return "(LINKUP)";
9653                 case OPA_LINKINIT_REASON_FLAPPING:
9654                         return "(FLAPPING)";
9655                 case OPA_LINKINIT_OUTSIDE_POLICY:
9656                         return "(OUTSIDE_POLICY)";
9657                 case OPA_LINKINIT_QUARANTINED:
9658                         return "(QUARANTINED)";
9659                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9660                         return "(INSUFIC_CAPABILITY)";
9661                 default:
9662                         break;
9663                 }
9664         }
9665         return "";
9666 }
9667
9668 /*
9669  * driver_physical_state - convert the driver's notion of a port's
9670  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9671  * Return -1 (converted to a u32) to indicate error.
9672  */
9673 u32 driver_physical_state(struct hfi1_pportdata *ppd)
9674 {
9675         switch (ppd->host_link_state) {
9676         case HLS_UP_INIT:
9677         case HLS_UP_ARMED:
9678         case HLS_UP_ACTIVE:
9679                 return IB_PORTPHYSSTATE_LINKUP;
9680         case HLS_DN_POLL:
9681                 return IB_PORTPHYSSTATE_POLLING;
9682         case HLS_DN_DISABLE:
9683                 return IB_PORTPHYSSTATE_DISABLED;
9684         case HLS_DN_OFFLINE:
9685                 return OPA_PORTPHYSSTATE_OFFLINE;
9686         case HLS_VERIFY_CAP:
9687                 return IB_PORTPHYSSTATE_POLLING;
9688         case HLS_GOING_UP:
9689                 return IB_PORTPHYSSTATE_POLLING;
9690         case HLS_GOING_OFFLINE:
9691                 return OPA_PORTPHYSSTATE_OFFLINE;
9692         case HLS_LINK_COOLDOWN:
9693                 return OPA_PORTPHYSSTATE_OFFLINE;
9694         case HLS_DN_DOWNDEF:
9695         default:
9696                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9697                            ppd->host_link_state);
9698                 return  -1;
9699         }
9700 }
9701
9702 /*
9703  * driver_logical_state - convert the driver's notion of a port's
9704  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9705  * (converted to a u32) to indicate error.
9706  */
9707 u32 driver_logical_state(struct hfi1_pportdata *ppd)
9708 {
9709         if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9710                 return IB_PORT_DOWN;
9711
9712         switch (ppd->host_link_state & HLS_UP) {
9713         case HLS_UP_INIT:
9714                 return IB_PORT_INIT;
9715         case HLS_UP_ARMED:
9716                 return IB_PORT_ARMED;
9717         case HLS_UP_ACTIVE:
9718                 return IB_PORT_ACTIVE;
9719         default:
9720                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9721                            ppd->host_link_state);
9722         return -1;
9723         }
9724 }
9725
9726 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9727                           u8 neigh_reason, u8 rem_reason)
9728 {
9729         if (ppd->local_link_down_reason.latest == 0 &&
9730             ppd->neigh_link_down_reason.latest == 0) {
9731                 ppd->local_link_down_reason.latest = lcl_reason;
9732                 ppd->neigh_link_down_reason.latest = neigh_reason;
9733                 ppd->remote_link_down_reason = rem_reason;
9734         }
9735 }
9736
9737 /*
9738  * Change the physical and/or logical link state.
9739  *
9740  * Do not call this routine while inside an interrupt.  It contains
9741  * calls to routines that can take multiple seconds to finish.
9742  *
9743  * Returns 0 on success, -errno on failure.
9744  */
9745 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9746 {
9747         struct hfi1_devdata *dd = ppd->dd;
9748         struct ib_event event = {.device = NULL};
9749         int ret1, ret = 0;
9750         int was_up, is_down;
9751         int orig_new_state, poll_bounce;
9752
9753         mutex_lock(&ppd->hls_lock);
9754
9755         orig_new_state = state;
9756         if (state == HLS_DN_DOWNDEF)
9757                 state = dd->link_default;
9758
9759         /* interpret poll -> poll as a link bounce */
9760         poll_bounce = ppd->host_link_state == HLS_DN_POLL
9761                                 && state == HLS_DN_POLL;
9762
9763         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9764                 link_state_name(ppd->host_link_state),
9765                 link_state_name(orig_new_state),
9766                 poll_bounce ? "(bounce) " : "",
9767                 link_state_reason_name(ppd, state));
9768
9769         was_up = !!(ppd->host_link_state & HLS_UP);
9770
9771         /*
9772          * If we're going to a (HLS_*) link state that implies the logical
9773          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9774          * reset is_sm_config_started to 0.
9775          */
9776         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9777                 ppd->is_sm_config_started = 0;
9778
9779         /*
9780          * Do nothing if the states match.  Let a poll to poll link bounce
9781          * go through.
9782          */
9783         if (ppd->host_link_state == state && !poll_bounce)
9784                 goto done;
9785
9786         switch (state) {
9787         case HLS_UP_INIT:
9788                 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9789                             || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9790                         /*
9791                          * Quick link up jumps from polling to here.
9792                          *
9793                          * Whether in normal or loopback mode, the
9794                          * simulator jumps from polling to link up.
9795                          * Accept that here.
9796                          */
9797                         /* OK */;
9798                 } else if (ppd->host_link_state != HLS_GOING_UP) {
9799                         goto unexpected;
9800                 }
9801
9802                 ppd->host_link_state = HLS_UP_INIT;
9803                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9804                 if (ret) {
9805                         /* logical state didn't change, stay at going_up */
9806                         ppd->host_link_state = HLS_GOING_UP;
9807                         dd_dev_err(dd,
9808                                 "%s: logical state did not change to INIT\n",
9809                                 __func__);
9810                 } else {
9811                         /* clear old transient LINKINIT_REASON code */
9812                         if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9813                                 ppd->linkinit_reason =
9814                                         OPA_LINKINIT_REASON_LINKUP;
9815
9816                         /* enable the port */
9817                         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9818
9819                         handle_linkup_change(dd, 1);
9820                 }
9821                 break;
9822         case HLS_UP_ARMED:
9823                 if (ppd->host_link_state != HLS_UP_INIT)
9824                         goto unexpected;
9825
9826                 ppd->host_link_state = HLS_UP_ARMED;
9827                 set_logical_state(dd, LSTATE_ARMED);
9828                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9829                 if (ret) {
9830                         /* logical state didn't change, stay at init */
9831                         ppd->host_link_state = HLS_UP_INIT;
9832                         dd_dev_err(dd,
9833                                 "%s: logical state did not change to ARMED\n",
9834                                 __func__);
9835                 }
9836                 /*
9837                  * The simulator does not currently implement SMA messages,
9838                  * so neighbor_normal is not set.  Set it here when we first
9839                  * move to Armed.
9840                  */
9841                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9842                         ppd->neighbor_normal = 1;
9843                 break;
9844         case HLS_UP_ACTIVE:
9845                 if (ppd->host_link_state != HLS_UP_ARMED)
9846                         goto unexpected;
9847
9848                 ppd->host_link_state = HLS_UP_ACTIVE;
9849                 set_logical_state(dd, LSTATE_ACTIVE);
9850                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9851                 if (ret) {
9852                         /* logical state didn't change, stay at armed */
9853                         ppd->host_link_state = HLS_UP_ARMED;
9854                         dd_dev_err(dd,
9855                                 "%s: logical state did not change to ACTIVE\n",
9856                                 __func__);
9857                 } else {
9858
9859                         /* tell all engines to go running */
9860                         sdma_all_running(dd);
9861
9862                         /* Signal the IB layer that the port has went active */
9863                         event.device = &dd->verbs_dev.ibdev;
9864                         event.element.port_num = ppd->port;
9865                         event.event = IB_EVENT_PORT_ACTIVE;
9866                 }
9867                 break;
9868         case HLS_DN_POLL:
9869                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9870                      ppd->host_link_state == HLS_DN_OFFLINE) &&
9871                     dd->dc_shutdown)
9872                         dc_start(dd);
9873                 /* Hand LED control to the DC */
9874                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9875
9876                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9877                         u8 tmp = ppd->link_enabled;
9878
9879                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
9880                         if (ret) {
9881                                 ppd->link_enabled = tmp;
9882                                 break;
9883                         }
9884                         ppd->remote_link_down_reason = 0;
9885
9886                         if (ppd->driver_link_ready)
9887                                 ppd->link_enabled = 1;
9888                 }
9889
9890                 ret = set_local_link_attributes(ppd);
9891                 if (ret)
9892                         break;
9893
9894                 ppd->port_error_action = 0;
9895                 ppd->host_link_state = HLS_DN_POLL;
9896
9897                 if (quick_linkup) {
9898                         /* quick linkup does not go into polling */
9899                         ret = do_quick_linkup(dd);
9900                 } else {
9901                         ret1 = set_physical_link_state(dd, PLS_POLLING);
9902                         if (ret1 != HCMD_SUCCESS) {
9903                                 dd_dev_err(dd,
9904                                         "Failed to transition to Polling link state, return 0x%x\n",
9905                                         ret1);
9906                                 ret = -EINVAL;
9907                         }
9908                 }
9909                 ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
9910                 /*
9911                  * If an error occurred above, go back to offline.  The
9912                  * caller may reschedule another attempt.
9913                  */
9914                 if (ret)
9915                         goto_offline(ppd, 0);
9916                 break;
9917         case HLS_DN_DISABLE:
9918                 /* link is disabled */
9919                 ppd->link_enabled = 0;
9920
9921                 /* allow any state to transition to disabled */
9922
9923                 /* must transition to offline first */
9924                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9925                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
9926                         if (ret)
9927                                 break;
9928                         ppd->remote_link_down_reason = 0;
9929                 }
9930
9931                 ret1 = set_physical_link_state(dd, PLS_DISABLED);
9932                 if (ret1 != HCMD_SUCCESS) {
9933                         dd_dev_err(dd,
9934                                 "Failed to transition to Disabled link state, return 0x%x\n",
9935                                 ret1);
9936                         ret = -EINVAL;
9937                         break;
9938                 }
9939                 ppd->host_link_state = HLS_DN_DISABLE;
9940                 dc_shutdown(dd);
9941                 break;
9942         case HLS_DN_OFFLINE:
9943                 if (ppd->host_link_state == HLS_DN_DISABLE)
9944                         dc_start(dd);
9945
9946                 /* allow any state to transition to offline */
9947                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9948                 if (!ret)
9949                         ppd->remote_link_down_reason = 0;
9950                 break;
9951         case HLS_VERIFY_CAP:
9952                 if (ppd->host_link_state != HLS_DN_POLL)
9953                         goto unexpected;
9954                 ppd->host_link_state = HLS_VERIFY_CAP;
9955                 break;
9956         case HLS_GOING_UP:
9957                 if (ppd->host_link_state != HLS_VERIFY_CAP)
9958                         goto unexpected;
9959
9960                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
9961                 if (ret1 != HCMD_SUCCESS) {
9962                         dd_dev_err(dd,
9963                                 "Failed to transition to link up state, return 0x%x\n",
9964                                 ret1);
9965                         ret = -EINVAL;
9966                         break;
9967                 }
9968                 ppd->host_link_state = HLS_GOING_UP;
9969                 break;
9970
9971         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
9972         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
9973         default:
9974                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
9975                         __func__, state);
9976                 ret = -EINVAL;
9977                 break;
9978         }
9979
9980         is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
9981                         HLS_DN_DISABLE | HLS_DN_OFFLINE));
9982
9983         if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
9984             ppd->neigh_link_down_reason.sma == 0) {
9985                 ppd->local_link_down_reason.sma =
9986                   ppd->local_link_down_reason.latest;
9987                 ppd->neigh_link_down_reason.sma =
9988                   ppd->neigh_link_down_reason.latest;
9989         }
9990
9991         goto done;
9992
9993 unexpected:
9994         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
9995                 __func__, link_state_name(ppd->host_link_state),
9996                 link_state_name(state));
9997         ret = -EINVAL;
9998
9999 done:
10000         mutex_unlock(&ppd->hls_lock);
10001
10002         if (event.device)
10003                 ib_dispatch_event(&event);
10004
10005         return ret;
10006 }
10007
10008 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10009 {
10010         u64 reg;
10011         int ret = 0;
10012
10013         switch (which) {
10014         case HFI1_IB_CFG_LIDLMC:
10015                 set_lidlmc(ppd);
10016                 break;
10017         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10018                 /*
10019                  * The VL Arbitrator high limit is sent in units of 4k
10020                  * bytes, while HFI stores it in units of 64 bytes.
10021                  */
10022                 val *= 4096/64;
10023                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10024                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10025                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10026                 break;
10027         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10028                 /* HFI only supports POLL as the default link down state */
10029                 if (val != HLS_DN_POLL)
10030                         ret = -EINVAL;
10031                 break;
10032         case HFI1_IB_CFG_OP_VLS:
10033                 if (ppd->vls_operational != val) {
10034                         ppd->vls_operational = val;
10035                         if (!ppd->port)
10036                                 ret = -EINVAL;
10037                         else
10038                                 ret = sdma_map_init(
10039                                         ppd->dd,
10040                                         ppd->port - 1,
10041                                         val,
10042                                         NULL);
10043                 }
10044                 break;
10045         /*
10046          * For link width, link width downgrade, and speed enable, always AND
10047          * the setting with what is actually supported.  This has two benefits.
10048          * First, enabled can't have unsupported values, no matter what the
10049          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10050          * "fill in with your supported value" have all the bits in the
10051          * field set, so simply ANDing with supported has the desired result.
10052          */
10053         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10054                 ppd->link_width_enabled = val & ppd->link_width_supported;
10055                 break;
10056         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10057                 ppd->link_width_downgrade_enabled =
10058                                 val & ppd->link_width_downgrade_supported;
10059                 break;
10060         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10061                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10062                 break;
10063         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10064                 /*
10065                  * HFI does not follow IB specs, save this value
10066                  * so we can report it, if asked.
10067                  */
10068                 ppd->overrun_threshold = val;
10069                 break;
10070         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10071                 /*
10072                  * HFI does not follow IB specs, save this value
10073                  * so we can report it, if asked.
10074                  */
10075                 ppd->phy_error_threshold = val;
10076                 break;
10077
10078         case HFI1_IB_CFG_MTU:
10079                 set_send_length(ppd);
10080                 break;
10081
10082         case HFI1_IB_CFG_PKEYS:
10083                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10084                         set_partition_keys(ppd);
10085                 break;
10086
10087         default:
10088                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10089                         dd_dev_info(ppd->dd,
10090                           "%s: which %s, val 0x%x: not implemented\n",
10091                           __func__, ib_cfg_name(which), val);
10092                 break;
10093         }
10094         return ret;
10095 }
10096
10097 /* begin functions related to vl arbitration table caching */
10098 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10099 {
10100         int i;
10101
10102         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10103                         VL_ARB_LOW_PRIO_TABLE_SIZE);
10104         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10105                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
10106
10107         /*
10108          * Note that we always return values directly from the
10109          * 'vl_arb_cache' (and do no CSR reads) in response to a
10110          * 'Get(VLArbTable)'. This is obviously correct after a
10111          * 'Set(VLArbTable)', since the cache will then be up to
10112          * date. But it's also correct prior to any 'Set(VLArbTable)'
10113          * since then both the cache, and the relevant h/w registers
10114          * will be zeroed.
10115          */
10116
10117         for (i = 0; i < MAX_PRIO_TABLE; i++)
10118                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10119 }
10120
10121 /*
10122  * vl_arb_lock_cache
10123  *
10124  * All other vl_arb_* functions should be called only after locking
10125  * the cache.
10126  */
10127 static inline struct vl_arb_cache *
10128 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10129 {
10130         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10131                 return NULL;
10132         spin_lock(&ppd->vl_arb_cache[idx].lock);
10133         return &ppd->vl_arb_cache[idx];
10134 }
10135
10136 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10137 {
10138         spin_unlock(&ppd->vl_arb_cache[idx].lock);
10139 }
10140
10141 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10142                              struct ib_vl_weight_elem *vl)
10143 {
10144         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10145 }
10146
10147 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10148                              struct ib_vl_weight_elem *vl)
10149 {
10150         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10151 }
10152
10153 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10154                               struct ib_vl_weight_elem *vl)
10155 {
10156         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10157 }
10158 /* end functions related to vl arbitration table caching */
10159
10160 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10161                           u32 size, struct ib_vl_weight_elem *vl)
10162 {
10163         struct hfi1_devdata *dd = ppd->dd;
10164         u64 reg;
10165         unsigned int i, is_up = 0;
10166         int drain, ret = 0;
10167
10168         mutex_lock(&ppd->hls_lock);
10169
10170         if (ppd->host_link_state & HLS_UP)
10171                 is_up = 1;
10172
10173         drain = !is_ax(dd) && is_up;
10174
10175         if (drain)
10176                 /*
10177                  * Before adjusting VL arbitration weights, empty per-VL
10178                  * FIFOs, otherwise a packet whose VL weight is being
10179                  * set to 0 could get stuck in a FIFO with no chance to
10180                  * egress.
10181                  */
10182                 ret = stop_drain_data_vls(dd);
10183
10184         if (ret) {
10185                 dd_dev_err(
10186                         dd,
10187                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10188                         __func__);
10189                 goto err;
10190         }
10191
10192         for (i = 0; i < size; i++, vl++) {
10193                 /*
10194                  * NOTE: The low priority shift and mask are used here, but
10195                  * they are the same for both the low and high registers.
10196                  */
10197                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10198                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10199                       | (((u64)vl->weight
10200                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10201                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10202                 write_csr(dd, target + (i * 8), reg);
10203         }
10204         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10205
10206         if (drain)
10207                 open_fill_data_vls(dd); /* reopen all VLs */
10208
10209 err:
10210         mutex_unlock(&ppd->hls_lock);
10211
10212         return ret;
10213 }
10214
10215 /*
10216  * Read one credit merge VL register.
10217  */
10218 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10219                            struct vl_limit *vll)
10220 {
10221         u64 reg = read_csr(dd, csr);
10222
10223         vll->dedicated = cpu_to_be16(
10224                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10225                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10226         vll->shared = cpu_to_be16(
10227                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10228                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10229 }
10230
10231 /*
10232  * Read the current credit merge limits.
10233  */
10234 static int get_buffer_control(struct hfi1_devdata *dd,
10235                               struct buffer_control *bc, u16 *overall_limit)
10236 {
10237         u64 reg;
10238         int i;
10239
10240         /* not all entries are filled in */
10241         memset(bc, 0, sizeof(*bc));
10242
10243         /* OPA and HFI have a 1-1 mapping */
10244         for (i = 0; i < TXE_NUM_DATA_VL; i++)
10245                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10246
10247         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10248         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10249
10250         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10251         bc->overall_shared_limit = cpu_to_be16(
10252                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10253                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10254         if (overall_limit)
10255                 *overall_limit = (reg
10256                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10257                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10258         return sizeof(struct buffer_control);
10259 }
10260
10261 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10262 {
10263         u64 reg;
10264         int i;
10265
10266         /* each register contains 16 SC->VLnt mappings, 4 bits each */
10267         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10268         for (i = 0; i < sizeof(u64); i++) {
10269                 u8 byte = *(((u8 *)&reg) + i);
10270
10271                 dp->vlnt[2 * i] = byte & 0xf;
10272                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10273         }
10274
10275         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10276         for (i = 0; i < sizeof(u64); i++) {
10277                 u8 byte = *(((u8 *)&reg) + i);
10278
10279                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10280                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10281         }
10282         return sizeof(struct sc2vlnt);
10283 }
10284
10285 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10286                               struct ib_vl_weight_elem *vl)
10287 {
10288         unsigned int i;
10289
10290         for (i = 0; i < nelems; i++, vl++) {
10291                 vl->vl = 0xf;
10292                 vl->weight = 0;
10293         }
10294 }
10295
10296 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10297 {
10298         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10299                 DC_SC_VL_VAL(15_0,
10300                 0, dp->vlnt[0] & 0xf,
10301                 1, dp->vlnt[1] & 0xf,
10302                 2, dp->vlnt[2] & 0xf,
10303                 3, dp->vlnt[3] & 0xf,
10304                 4, dp->vlnt[4] & 0xf,
10305                 5, dp->vlnt[5] & 0xf,
10306                 6, dp->vlnt[6] & 0xf,
10307                 7, dp->vlnt[7] & 0xf,
10308                 8, dp->vlnt[8] & 0xf,
10309                 9, dp->vlnt[9] & 0xf,
10310                 10, dp->vlnt[10] & 0xf,
10311                 11, dp->vlnt[11] & 0xf,
10312                 12, dp->vlnt[12] & 0xf,
10313                 13, dp->vlnt[13] & 0xf,
10314                 14, dp->vlnt[14] & 0xf,
10315                 15, dp->vlnt[15] & 0xf));
10316         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10317                 DC_SC_VL_VAL(31_16,
10318                 16, dp->vlnt[16] & 0xf,
10319                 17, dp->vlnt[17] & 0xf,
10320                 18, dp->vlnt[18] & 0xf,
10321                 19, dp->vlnt[19] & 0xf,
10322                 20, dp->vlnt[20] & 0xf,
10323                 21, dp->vlnt[21] & 0xf,
10324                 22, dp->vlnt[22] & 0xf,
10325                 23, dp->vlnt[23] & 0xf,
10326                 24, dp->vlnt[24] & 0xf,
10327                 25, dp->vlnt[25] & 0xf,
10328                 26, dp->vlnt[26] & 0xf,
10329                 27, dp->vlnt[27] & 0xf,
10330                 28, dp->vlnt[28] & 0xf,
10331                 29, dp->vlnt[29] & 0xf,
10332                 30, dp->vlnt[30] & 0xf,
10333                 31, dp->vlnt[31] & 0xf));
10334 }
10335
10336 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10337                         u16 limit)
10338 {
10339         if (limit != 0)
10340                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10341                         what, (int)limit, idx);
10342 }
10343
10344 /* change only the shared limit portion of SendCmGLobalCredit */
10345 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10346 {
10347         u64 reg;
10348
10349         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10350         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10351         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10352         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10353 }
10354
10355 /* change only the total credit limit portion of SendCmGLobalCredit */
10356 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10357 {
10358         u64 reg;
10359
10360         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10361         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10362         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10363         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10364 }
10365
10366 /* set the given per-VL shared limit */
10367 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10368 {
10369         u64 reg;
10370         u32 addr;
10371
10372         if (vl < TXE_NUM_DATA_VL)
10373                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10374         else
10375                 addr = SEND_CM_CREDIT_VL15;
10376
10377         reg = read_csr(dd, addr);
10378         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10379         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10380         write_csr(dd, addr, reg);
10381 }
10382
10383 /* set the given per-VL dedicated limit */
10384 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10385 {
10386         u64 reg;
10387         u32 addr;
10388
10389         if (vl < TXE_NUM_DATA_VL)
10390                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10391         else
10392                 addr = SEND_CM_CREDIT_VL15;
10393
10394         reg = read_csr(dd, addr);
10395         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10396         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10397         write_csr(dd, addr, reg);
10398 }
10399
10400 /* spin until the given per-VL status mask bits clear */
10401 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10402                                      const char *which)
10403 {
10404         unsigned long timeout;
10405         u64 reg;
10406
10407         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10408         while (1) {
10409                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10410
10411                 if (reg == 0)
10412                         return; /* success */
10413                 if (time_after(jiffies, timeout))
10414                         break;          /* timed out */
10415                 udelay(1);
10416         }
10417
10418         dd_dev_err(dd,
10419                 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10420                 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10421         /*
10422          * If this occurs, it is likely there was a credit loss on the link.
10423          * The only recovery from that is a link bounce.
10424          */
10425         dd_dev_err(dd,
10426                 "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10427 }
10428
10429 /*
10430  * The number of credits on the VLs may be changed while everything
10431  * is "live", but the following algorithm must be followed due to
10432  * how the hardware is actually implemented.  In particular,
10433  * Return_Credit_Status[] is the only correct status check.
10434  *
10435  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10436  *     set Global_Shared_Credit_Limit = 0
10437  *     use_all_vl = 1
10438  * mask0 = all VLs that are changing either dedicated or shared limits
10439  * set Shared_Limit[mask0] = 0
10440  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10441  * if (changing any dedicated limit)
10442  *     mask1 = all VLs that are lowering dedicated limits
10443  *     lower Dedicated_Limit[mask1]
10444  *     spin until Return_Credit_Status[mask1] == 0
10445  *     raise Dedicated_Limits
10446  * raise Shared_Limits
10447  * raise Global_Shared_Credit_Limit
10448  *
10449  * lower = if the new limit is lower, set the limit to the new value
10450  * raise = if the new limit is higher than the current value (may be changed
10451  *      earlier in the algorithm), set the new limit to the new value
10452  */
10453 static int set_buffer_control(struct hfi1_devdata *dd,
10454                               struct buffer_control *new_bc)
10455 {
10456         u64 changing_mask, ld_mask, stat_mask;
10457         int change_count;
10458         int i, use_all_mask;
10459         int this_shared_changing;
10460         /*
10461          * A0: add the variable any_shared_limit_changing below and in the
10462          * algorithm above.  If removing A0 support, it can be removed.
10463          */
10464         int any_shared_limit_changing;
10465         struct buffer_control cur_bc;
10466         u8 changing[OPA_MAX_VLS];
10467         u8 lowering_dedicated[OPA_MAX_VLS];
10468         u16 cur_total;
10469         u32 new_total = 0;
10470         const u64 all_mask =
10471         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10472          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10473          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10474          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10475          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10476          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10477          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10478          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10479          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10480
10481 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10482 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
10483
10484
10485         /* find the new total credits, do sanity check on unused VLs */
10486         for (i = 0; i < OPA_MAX_VLS; i++) {
10487                 if (valid_vl(i)) {
10488                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10489                         continue;
10490                 }
10491                 nonzero_msg(dd, i, "dedicated",
10492                         be16_to_cpu(new_bc->vl[i].dedicated));
10493                 nonzero_msg(dd, i, "shared",
10494                         be16_to_cpu(new_bc->vl[i].shared));
10495                 new_bc->vl[i].dedicated = 0;
10496                 new_bc->vl[i].shared = 0;
10497         }
10498         new_total += be16_to_cpu(new_bc->overall_shared_limit);
10499
10500         /* fetch the current values */
10501         get_buffer_control(dd, &cur_bc, &cur_total);
10502
10503         /*
10504          * Create the masks we will use.
10505          */
10506         memset(changing, 0, sizeof(changing));
10507         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10508         /* NOTE: Assumes that the individual VL bits are adjacent and in
10509            increasing order */
10510         stat_mask =
10511                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10512         changing_mask = 0;
10513         ld_mask = 0;
10514         change_count = 0;
10515         any_shared_limit_changing = 0;
10516         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10517                 if (!valid_vl(i))
10518                         continue;
10519                 this_shared_changing = new_bc->vl[i].shared
10520                                                 != cur_bc.vl[i].shared;
10521                 if (this_shared_changing)
10522                         any_shared_limit_changing = 1;
10523                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10524                                 || this_shared_changing) {
10525                         changing[i] = 1;
10526                         changing_mask |= stat_mask;
10527                         change_count++;
10528                 }
10529                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10530                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
10531                         lowering_dedicated[i] = 1;
10532                         ld_mask |= stat_mask;
10533                 }
10534         }
10535
10536         /* bracket the credit change with a total adjustment */
10537         if (new_total > cur_total)
10538                 set_global_limit(dd, new_total);
10539
10540         /*
10541          * Start the credit change algorithm.
10542          */
10543         use_all_mask = 0;
10544         if ((be16_to_cpu(new_bc->overall_shared_limit) <
10545              be16_to_cpu(cur_bc.overall_shared_limit)) ||
10546             (is_ax(dd) && any_shared_limit_changing)) {
10547                 set_global_shared(dd, 0);
10548                 cur_bc.overall_shared_limit = 0;
10549                 use_all_mask = 1;
10550         }
10551
10552         for (i = 0; i < NUM_USABLE_VLS; i++) {
10553                 if (!valid_vl(i))
10554                         continue;
10555
10556                 if (changing[i]) {
10557                         set_vl_shared(dd, i, 0);
10558                         cur_bc.vl[i].shared = 0;
10559                 }
10560         }
10561
10562         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10563                 "shared");
10564
10565         if (change_count > 0) {
10566                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10567                         if (!valid_vl(i))
10568                                 continue;
10569
10570                         if (lowering_dedicated[i]) {
10571                                 set_vl_dedicated(dd, i,
10572                                         be16_to_cpu(new_bc->vl[i].dedicated));
10573                                 cur_bc.vl[i].dedicated =
10574                                                 new_bc->vl[i].dedicated;
10575                         }
10576                 }
10577
10578                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10579
10580                 /* now raise all dedicated that are going up */
10581                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10582                         if (!valid_vl(i))
10583                                 continue;
10584
10585                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
10586                                         be16_to_cpu(cur_bc.vl[i].dedicated))
10587                                 set_vl_dedicated(dd, i,
10588                                         be16_to_cpu(new_bc->vl[i].dedicated));
10589                 }
10590         }
10591
10592         /* next raise all shared that are going up */
10593         for (i = 0; i < NUM_USABLE_VLS; i++) {
10594                 if (!valid_vl(i))
10595                         continue;
10596
10597                 if (be16_to_cpu(new_bc->vl[i].shared) >
10598                                 be16_to_cpu(cur_bc.vl[i].shared))
10599                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10600         }
10601
10602         /* finally raise the global shared */
10603         if (be16_to_cpu(new_bc->overall_shared_limit) >
10604                         be16_to_cpu(cur_bc.overall_shared_limit))
10605                 set_global_shared(dd,
10606                         be16_to_cpu(new_bc->overall_shared_limit));
10607
10608         /* bracket the credit change with a total adjustment */
10609         if (new_total < cur_total)
10610                 set_global_limit(dd, new_total);
10611         return 0;
10612 }
10613
10614 /*
10615  * Read the given fabric manager table. Return the size of the
10616  * table (in bytes) on success, and a negative error code on
10617  * failure.
10618  */
10619 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10620
10621 {
10622         int size;
10623         struct vl_arb_cache *vlc;
10624
10625         switch (which) {
10626         case FM_TBL_VL_HIGH_ARB:
10627                 size = 256;
10628                 /*
10629                  * OPA specifies 128 elements (of 2 bytes each), though
10630                  * HFI supports only 16 elements in h/w.
10631                  */
10632                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10633                 vl_arb_get_cache(vlc, t);
10634                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10635                 break;
10636         case FM_TBL_VL_LOW_ARB:
10637                 size = 256;
10638                 /*
10639                  * OPA specifies 128 elements (of 2 bytes each), though
10640                  * HFI supports only 16 elements in h/w.
10641                  */
10642                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10643                 vl_arb_get_cache(vlc, t);
10644                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10645                 break;
10646         case FM_TBL_BUFFER_CONTROL:
10647                 size = get_buffer_control(ppd->dd, t, NULL);
10648                 break;
10649         case FM_TBL_SC2VLNT:
10650                 size = get_sc2vlnt(ppd->dd, t);
10651                 break;
10652         case FM_TBL_VL_PREEMPT_ELEMS:
10653                 size = 256;
10654                 /* OPA specifies 128 elements, of 2 bytes each */
10655                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10656                 break;
10657         case FM_TBL_VL_PREEMPT_MATRIX:
10658                 size = 256;
10659                 /*
10660                  * OPA specifies that this is the same size as the VL
10661                  * arbitration tables (i.e., 256 bytes).
10662                  */
10663                 break;
10664         default:
10665                 return -EINVAL;
10666         }
10667         return size;
10668 }
10669
10670 /*
10671  * Write the given fabric manager table.
10672  */
10673 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10674 {
10675         int ret = 0;
10676         struct vl_arb_cache *vlc;
10677
10678         switch (which) {
10679         case FM_TBL_VL_HIGH_ARB:
10680                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10681                 if (vl_arb_match_cache(vlc, t)) {
10682                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10683                         break;
10684                 }
10685                 vl_arb_set_cache(vlc, t);
10686                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10687                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10688                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10689                 break;
10690         case FM_TBL_VL_LOW_ARB:
10691                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10692                 if (vl_arb_match_cache(vlc, t)) {
10693                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10694                         break;
10695                 }
10696                 vl_arb_set_cache(vlc, t);
10697                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10698                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10699                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10700                 break;
10701         case FM_TBL_BUFFER_CONTROL:
10702                 ret = set_buffer_control(ppd->dd, t);
10703                 break;
10704         case FM_TBL_SC2VLNT:
10705                 set_sc2vlnt(ppd->dd, t);
10706                 break;
10707         default:
10708                 ret = -EINVAL;
10709         }
10710         return ret;
10711 }
10712
10713 /*
10714  * Disable all data VLs.
10715  *
10716  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10717  */
10718 static int disable_data_vls(struct hfi1_devdata *dd)
10719 {
10720         if (is_ax(dd))
10721                 return 1;
10722
10723         pio_send_control(dd, PSC_DATA_VL_DISABLE);
10724
10725         return 0;
10726 }
10727
10728 /*
10729  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10730  * Just re-enables all data VLs (the "fill" part happens
10731  * automatically - the name was chosen for symmetry with
10732  * stop_drain_data_vls()).
10733  *
10734  * Return 0 if successful, non-zero if the VLs cannot be enabled.
10735  */
10736 int open_fill_data_vls(struct hfi1_devdata *dd)
10737 {
10738         if (is_ax(dd))
10739                 return 1;
10740
10741         pio_send_control(dd, PSC_DATA_VL_ENABLE);
10742
10743         return 0;
10744 }
10745
10746 /*
10747  * drain_data_vls() - assumes that disable_data_vls() has been called,
10748  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10749  * engines to drop to 0.
10750  */
10751 static void drain_data_vls(struct hfi1_devdata *dd)
10752 {
10753         sc_wait(dd);
10754         sdma_wait(dd);
10755         pause_for_credit_return(dd);
10756 }
10757
10758 /*
10759  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10760  *
10761  * Use open_fill_data_vls() to resume using data VLs.  This pair is
10762  * meant to be used like this:
10763  *
10764  * stop_drain_data_vls(dd);
10765  * // do things with per-VL resources
10766  * open_fill_data_vls(dd);
10767  */
10768 int stop_drain_data_vls(struct hfi1_devdata *dd)
10769 {
10770         int ret;
10771
10772         ret = disable_data_vls(dd);
10773         if (ret == 0)
10774                 drain_data_vls(dd);
10775
10776         return ret;
10777 }
10778
10779 /*
10780  * Convert a nanosecond time to a cclock count.  No matter how slow
10781  * the cclock, a non-zero ns will always have a non-zero result.
10782  */
10783 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10784 {
10785         u32 cclocks;
10786
10787         if (dd->icode == ICODE_FPGA_EMULATION)
10788                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10789         else  /* simulation pretends to be ASIC */
10790                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10791         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
10792                 cclocks = 1;
10793         return cclocks;
10794 }
10795
10796 /*
10797  * Convert a cclock count to nanoseconds. Not matter how slow
10798  * the cclock, a non-zero cclocks will always have a non-zero result.
10799  */
10800 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10801 {
10802         u32 ns;
10803
10804         if (dd->icode == ICODE_FPGA_EMULATION)
10805                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10806         else  /* simulation pretends to be ASIC */
10807                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10808         if (cclocks && !ns)
10809                 ns = 1;
10810         return ns;
10811 }
10812
10813 /*
10814  * Dynamically adjust the receive interrupt timeout for a context based on
10815  * incoming packet rate.
10816  *
10817  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10818  */
10819 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10820 {
10821         struct hfi1_devdata *dd = rcd->dd;
10822         u32 timeout = rcd->rcvavail_timeout;
10823
10824         /*
10825          * This algorithm doubles or halves the timeout depending on whether
10826          * the number of packets received in this interrupt were less than or
10827          * greater equal the interrupt count.
10828          *
10829          * The calculations below do not allow a steady state to be achieved.
10830          * Only at the endpoints it is possible to have an unchanging
10831          * timeout.
10832          */
10833         if (npkts < rcv_intr_count) {
10834                 /*
10835                  * Not enough packets arrived before the timeout, adjust
10836                  * timeout downward.
10837                  */
10838                 if (timeout < 2) /* already at minimum? */
10839                         return;
10840                 timeout >>= 1;
10841         } else {
10842                 /*
10843                  * More than enough packets arrived before the timeout, adjust
10844                  * timeout upward.
10845                  */
10846                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10847                         return;
10848                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10849         }
10850
10851         rcd->rcvavail_timeout = timeout;
10852         /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10853            been verified to be in range */
10854         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10855                 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10856 }
10857
10858 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10859                     u32 intr_adjust, u32 npkts)
10860 {
10861         struct hfi1_devdata *dd = rcd->dd;
10862         u64 reg;
10863         u32 ctxt = rcd->ctxt;
10864
10865         /*
10866          * Need to write timeout register before updating RcvHdrHead to ensure
10867          * that a new value is used when the HW decides to restart counting.
10868          */
10869         if (intr_adjust)
10870                 adjust_rcv_timeout(rcd, npkts);
10871         if (updegr) {
10872                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10873                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10874                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10875         }
10876         mmiowb();
10877         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10878                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10879                         << RCV_HDR_HEAD_HEAD_SHIFT);
10880         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10881         mmiowb();
10882 }
10883
10884 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10885 {
10886         u32 head, tail;
10887
10888         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10889                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10890
10891         if (rcd->rcvhdrtail_kvaddr)
10892                 tail = get_rcvhdrtail(rcd);
10893         else
10894                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10895
10896         return head == tail;
10897 }
10898
10899 /*
10900  * Context Control and Receive Array encoding for buffer size:
10901  *      0x0 invalid
10902  *      0x1   4 KB
10903  *      0x2   8 KB
10904  *      0x3  16 KB
10905  *      0x4  32 KB
10906  *      0x5  64 KB
10907  *      0x6 128 KB
10908  *      0x7 256 KB
10909  *      0x8 512 KB (Receive Array only)
10910  *      0x9   1 MB (Receive Array only)
10911  *      0xa   2 MB (Receive Array only)
10912  *
10913  *      0xB-0xF - reserved (Receive Array only)
10914  *
10915  *
10916  * This routine assumes that the value has already been sanity checked.
10917  */
10918 static u32 encoded_size(u32 size)
10919 {
10920         switch (size) {
10921         case   4*1024: return 0x1;
10922         case   8*1024: return 0x2;
10923         case  16*1024: return 0x3;
10924         case  32*1024: return 0x4;
10925         case  64*1024: return 0x5;
10926         case 128*1024: return 0x6;
10927         case 256*1024: return 0x7;
10928         case 512*1024: return 0x8;
10929         case   1*1024*1024: return 0x9;
10930         case   2*1024*1024: return 0xa;
10931         }
10932         return 0x1;     /* if invalid, go with the minimum size */
10933 }
10934
10935 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
10936 {
10937         struct hfi1_ctxtdata *rcd;
10938         u64 rcvctrl, reg;
10939         int did_enable = 0;
10940
10941         rcd = dd->rcd[ctxt];
10942         if (!rcd)
10943                 return;
10944
10945         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
10946
10947         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
10948         /* if the context already enabled, don't do the extra steps */
10949         if ((op & HFI1_RCVCTRL_CTXT_ENB)
10950                         && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
10951                 /* reset the tail and hdr addresses, and sequence count */
10952                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
10953                                 rcd->rcvhdrq_phys);
10954                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
10955                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
10956                                         rcd->rcvhdrqtailaddr_phys);
10957                 rcd->seq_cnt = 1;
10958
10959                 /* reset the cached receive header queue head value */
10960                 rcd->head = 0;
10961
10962                 /*
10963                  * Zero the receive header queue so we don't get false
10964                  * positives when checking the sequence number.  The
10965                  * sequence numbers could land exactly on the same spot.
10966                  * E.g. a rcd restart before the receive header wrapped.
10967                  */
10968                 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
10969
10970                 /* starting timeout */
10971                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
10972
10973                 /* enable the context */
10974                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
10975
10976                 /* clean the egr buffer size first */
10977                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
10978                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
10979                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
10980                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
10981
10982                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
10983                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
10984                 did_enable = 1;
10985
10986                 /* zero RcvEgrIndexHead */
10987                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
10988
10989                 /* set eager count and base index */
10990                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
10991                         & RCV_EGR_CTRL_EGR_CNT_MASK)
10992                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
10993                         (((rcd->eager_base >> RCV_SHIFT)
10994                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
10995                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
10996                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
10997
10998                 /*
10999                  * Set TID (expected) count and base index.
11000                  * rcd->expected_count is set to individual RcvArray entries,
11001                  * not pairs, and the CSR takes a pair-count in groups of
11002                  * four, so divide by 8.
11003                  */
11004                 reg = (((rcd->expected_count >> RCV_SHIFT)
11005                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11006                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11007                       (((rcd->expected_base >> RCV_SHIFT)
11008                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11009                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11010                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11011                 if (ctxt == HFI1_CTRL_CTXT)
11012                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11013         }
11014         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11015                 write_csr(dd, RCV_VL15, 0);
11016                 /*
11017                  * When receive context is being disabled turn on tail
11018                  * update with a dummy tail address and then disable
11019                  * receive context.
11020                  */
11021                 if (dd->rcvhdrtail_dummy_physaddr) {
11022                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11023                                         dd->rcvhdrtail_dummy_physaddr);
11024                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11025                 }
11026
11027                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11028         }
11029         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11030                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11031         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11032                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11033         if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11034                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11035         if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11036                 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11037         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11038                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11039         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11040                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11041         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11042                 /* In one-packet-per-eager mode, the size comes from
11043                    the RcvArray entry. */
11044                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11045                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11046         }
11047         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11048                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11049         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11050                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11051         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11052                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11053         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11054                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11055         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11056                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11057         rcd->rcvctrl = rcvctrl;
11058         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11059         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11060
11061         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11062         if (did_enable
11063             && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11064                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11065                 if (reg != 0) {
11066                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11067                                 ctxt, reg);
11068                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11069                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11070                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11071                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11072                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11073                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11074                                 ctxt, reg, reg == 0 ? "not" : "still");
11075                 }
11076         }
11077
11078         if (did_enable) {
11079                 /*
11080                  * The interrupt timeout and count must be set after
11081                  * the context is enabled to take effect.
11082                  */
11083                 /* set interrupt timeout */
11084                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11085                         (u64)rcd->rcvavail_timeout <<
11086                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11087
11088                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11089                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11090                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11091         }
11092
11093         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11094                 /*
11095                  * If the context has been disabled and the Tail Update has
11096                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11097                  * so it doesn't contain an address that is invalid.
11098                  */
11099                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11100                                 dd->rcvhdrtail_dummy_physaddr);
11101 }
11102
11103 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11104                     u64 **cntrp)
11105 {
11106         int ret;
11107         u64 val = 0;
11108
11109         if (namep) {
11110                 ret = dd->cntrnameslen;
11111                 if (pos != 0) {
11112                         dd_dev_err(dd, "read_cntrs does not support indexing");
11113                         return 0;
11114                 }
11115                 *namep = dd->cntrnames;
11116         } else {
11117                 const struct cntr_entry *entry;
11118                 int i, j;
11119
11120                 ret = (dd->ndevcntrs) * sizeof(u64);
11121                 if (pos != 0) {
11122                         dd_dev_err(dd, "read_cntrs does not support indexing");
11123                         return 0;
11124                 }
11125
11126                 /* Get the start of the block of counters */
11127                 *cntrp = dd->cntrs;
11128
11129                 /*
11130                  * Now go and fill in each counter in the block.
11131                  */
11132                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11133                         entry = &dev_cntrs[i];
11134                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11135                         if (entry->flags & CNTR_DISABLED) {
11136                                 /* Nothing */
11137                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11138                         } else {
11139                                 if (entry->flags & CNTR_VL) {
11140                                         hfi1_cdbg(CNTR, "\tPer VL\n");
11141                                         for (j = 0; j < C_VL_COUNT; j++) {
11142                                                 val = entry->rw_cntr(entry,
11143                                                                   dd, j,
11144                                                                   CNTR_MODE_R,
11145                                                                   0);
11146                                                 hfi1_cdbg(
11147                                                    CNTR,
11148                                                    "\t\tRead 0x%llx for %d\n",
11149                                                    val, j);
11150                                                 dd->cntrs[entry->offset + j] =
11151                                                                             val;
11152                                         }
11153                                 } else {
11154                                         val = entry->rw_cntr(entry, dd,
11155                                                         CNTR_INVALID_VL,
11156                                                         CNTR_MODE_R, 0);
11157                                         dd->cntrs[entry->offset] = val;
11158                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11159                                 }
11160                         }
11161                 }
11162         }
11163         return ret;
11164 }
11165
11166 /*
11167  * Used by sysfs to create files for hfi stats to read
11168  */
11169 u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11170                         char **namep, u64 **cntrp)
11171 {
11172         int ret;
11173         u64 val = 0;
11174
11175         if (namep) {
11176                 ret = dd->portcntrnameslen;
11177                 if (pos != 0) {
11178                         dd_dev_err(dd, "index not supported");
11179                         return 0;
11180                 }
11181                 *namep = dd->portcntrnames;
11182         } else {
11183                 const struct cntr_entry *entry;
11184                 struct hfi1_pportdata *ppd;
11185                 int i, j;
11186
11187                 ret = (dd->nportcntrs) * sizeof(u64);
11188                 if (pos != 0) {
11189                         dd_dev_err(dd, "indexing not supported");
11190                         return 0;
11191                 }
11192                 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11193                 *cntrp = ppd->cntrs;
11194
11195                 for (i = 0; i < PORT_CNTR_LAST; i++) {
11196                         entry = &port_cntrs[i];
11197                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11198                         if (entry->flags & CNTR_DISABLED) {
11199                                 /* Nothing */
11200                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11201                                 continue;
11202                         }
11203
11204                         if (entry->flags & CNTR_VL) {
11205                                 hfi1_cdbg(CNTR, "\tPer VL");
11206                                 for (j = 0; j < C_VL_COUNT; j++) {
11207                                         val = entry->rw_cntr(entry, ppd, j,
11208                                                                CNTR_MODE_R,
11209                                                                0);
11210                                         hfi1_cdbg(
11211                                            CNTR,
11212                                            "\t\tRead 0x%llx for %d",
11213                                            val, j);
11214                                         ppd->cntrs[entry->offset + j] = val;
11215                                 }
11216                         } else {
11217                                 val = entry->rw_cntr(entry, ppd,
11218                                                        CNTR_INVALID_VL,
11219                                                        CNTR_MODE_R,
11220                                                        0);
11221                                 ppd->cntrs[entry->offset] = val;
11222                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11223                         }
11224                 }
11225         }
11226         return ret;
11227 }
11228
11229 static void free_cntrs(struct hfi1_devdata *dd)
11230 {
11231         struct hfi1_pportdata *ppd;
11232         int i;
11233
11234         if (dd->synth_stats_timer.data)
11235                 del_timer_sync(&dd->synth_stats_timer);
11236         dd->synth_stats_timer.data = 0;
11237         ppd = (struct hfi1_pportdata *)(dd + 1);
11238         for (i = 0; i < dd->num_pports; i++, ppd++) {
11239                 kfree(ppd->cntrs);
11240                 kfree(ppd->scntrs);
11241                 free_percpu(ppd->ibport_data.rc_acks);
11242                 free_percpu(ppd->ibport_data.rc_qacks);
11243                 free_percpu(ppd->ibport_data.rc_delayed_comp);
11244                 ppd->cntrs = NULL;
11245                 ppd->scntrs = NULL;
11246                 ppd->ibport_data.rc_acks = NULL;
11247                 ppd->ibport_data.rc_qacks = NULL;
11248                 ppd->ibport_data.rc_delayed_comp = NULL;
11249         }
11250         kfree(dd->portcntrnames);
11251         dd->portcntrnames = NULL;
11252         kfree(dd->cntrs);
11253         dd->cntrs = NULL;
11254         kfree(dd->scntrs);
11255         dd->scntrs = NULL;
11256         kfree(dd->cntrnames);
11257         dd->cntrnames = NULL;
11258 }
11259
11260 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11261 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11262
11263 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11264                               u64 *psval, void *context, int vl)
11265 {
11266         u64 val;
11267         u64 sval = *psval;
11268
11269         if (entry->flags & CNTR_DISABLED) {
11270                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11271                 return 0;
11272         }
11273
11274         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11275
11276         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11277
11278         /* If its a synthetic counter there is more work we need to do */
11279         if (entry->flags & CNTR_SYNTH) {
11280                 if (sval == CNTR_MAX) {
11281                         /* No need to read already saturated */
11282                         return CNTR_MAX;
11283                 }
11284
11285                 if (entry->flags & CNTR_32BIT) {
11286                         /* 32bit counters can wrap multiple times */
11287                         u64 upper = sval >> 32;
11288                         u64 lower = (sval << 32) >> 32;
11289
11290                         if (lower > val) { /* hw wrapped */
11291                                 if (upper == CNTR_32BIT_MAX)
11292                                         val = CNTR_MAX;
11293                                 else
11294                                         upper++;
11295                         }
11296
11297                         if (val != CNTR_MAX)
11298                                 val = (upper << 32) | val;
11299
11300                 } else {
11301                         /* If we rolled we are saturated */
11302                         if ((val < sval) || (val > CNTR_MAX))
11303                                 val = CNTR_MAX;
11304                 }
11305         }
11306
11307         *psval = val;
11308
11309         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11310
11311         return val;
11312 }
11313
11314 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11315                                struct cntr_entry *entry,
11316                                u64 *psval, void *context, int vl, u64 data)
11317 {
11318         u64 val;
11319
11320         if (entry->flags & CNTR_DISABLED) {
11321                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11322                 return 0;
11323         }
11324
11325         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11326
11327         if (entry->flags & CNTR_SYNTH) {
11328                 *psval = data;
11329                 if (entry->flags & CNTR_32BIT) {
11330                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11331                                              (data << 32) >> 32);
11332                         val = data; /* return the full 64bit value */
11333                 } else {
11334                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11335                                              data);
11336                 }
11337         } else {
11338                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11339         }
11340
11341         *psval = val;
11342
11343         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11344
11345         return val;
11346 }
11347
11348 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11349 {
11350         struct cntr_entry *entry;
11351         u64 *sval;
11352
11353         entry = &dev_cntrs[index];
11354         sval = dd->scntrs + entry->offset;
11355
11356         if (vl != CNTR_INVALID_VL)
11357                 sval += vl;
11358
11359         return read_dev_port_cntr(dd, entry, sval, dd, vl);
11360 }
11361
11362 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11363 {
11364         struct cntr_entry *entry;
11365         u64 *sval;
11366
11367         entry = &dev_cntrs[index];
11368         sval = dd->scntrs + entry->offset;
11369
11370         if (vl != CNTR_INVALID_VL)
11371                 sval += vl;
11372
11373         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11374 }
11375
11376 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11377 {
11378         struct cntr_entry *entry;
11379         u64 *sval;
11380
11381         entry = &port_cntrs[index];
11382         sval = ppd->scntrs + entry->offset;
11383
11384         if (vl != CNTR_INVALID_VL)
11385                 sval += vl;
11386
11387         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11388             (index <= C_RCV_HDR_OVF_LAST)) {
11389                 /* We do not want to bother for disabled contexts */
11390                 return 0;
11391         }
11392
11393         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11394 }
11395
11396 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11397 {
11398         struct cntr_entry *entry;
11399         u64 *sval;
11400
11401         entry = &port_cntrs[index];
11402         sval = ppd->scntrs + entry->offset;
11403
11404         if (vl != CNTR_INVALID_VL)
11405                 sval += vl;
11406
11407         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11408             (index <= C_RCV_HDR_OVF_LAST)) {
11409                 /* We do not want to bother for disabled contexts */
11410                 return 0;
11411         }
11412
11413         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11414 }
11415
11416 static void update_synth_timer(unsigned long opaque)
11417 {
11418         u64 cur_tx;
11419         u64 cur_rx;
11420         u64 total_flits;
11421         u8 update = 0;
11422         int i, j, vl;
11423         struct hfi1_pportdata *ppd;
11424         struct cntr_entry *entry;
11425
11426         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11427
11428         /*
11429          * Rather than keep beating on the CSRs pick a minimal set that we can
11430          * check to watch for potential roll over. We can do this by looking at
11431          * the number of flits sent/recv. If the total flits exceeds 32bits then
11432          * we have to iterate all the counters and update.
11433          */
11434         entry = &dev_cntrs[C_DC_RCV_FLITS];
11435         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11436
11437         entry = &dev_cntrs[C_DC_XMIT_FLITS];
11438         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11439
11440         hfi1_cdbg(
11441             CNTR,
11442             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11443             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11444
11445         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11446                 /*
11447                  * May not be strictly necessary to update but it won't hurt and
11448                  * simplifies the logic here.
11449                  */
11450                 update = 1;
11451                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11452                           dd->unit);
11453         } else {
11454                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11455                 hfi1_cdbg(CNTR,
11456                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11457                           total_flits, (u64)CNTR_32BIT_MAX);
11458                 if (total_flits >= CNTR_32BIT_MAX) {
11459                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11460                                   dd->unit);
11461                         update = 1;
11462                 }
11463         }
11464
11465         if (update) {
11466                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11467                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11468                         entry = &dev_cntrs[i];
11469                         if (entry->flags & CNTR_VL) {
11470                                 for (vl = 0; vl < C_VL_COUNT; vl++)
11471                                         read_dev_cntr(dd, i, vl);
11472                         } else {
11473                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11474                         }
11475                 }
11476                 ppd = (struct hfi1_pportdata *)(dd + 1);
11477                 for (i = 0; i < dd->num_pports; i++, ppd++) {
11478                         for (j = 0; j < PORT_CNTR_LAST; j++) {
11479                                 entry = &port_cntrs[j];
11480                                 if (entry->flags & CNTR_VL) {
11481                                         for (vl = 0; vl < C_VL_COUNT; vl++)
11482                                                 read_port_cntr(ppd, j, vl);
11483                                 } else {
11484                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
11485                                 }
11486                         }
11487                 }
11488
11489                 /*
11490                  * We want the value in the register. The goal is to keep track
11491                  * of the number of "ticks" not the counter value. In other
11492                  * words if the register rolls we want to notice it and go ahead
11493                  * and force an update.
11494                  */
11495                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11496                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11497                                                 CNTR_MODE_R, 0);
11498
11499                 entry = &dev_cntrs[C_DC_RCV_FLITS];
11500                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11501                                                 CNTR_MODE_R, 0);
11502
11503                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11504                           dd->unit, dd->last_tx, dd->last_rx);
11505
11506         } else {
11507                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11508         }
11509
11510 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11511 }
11512
11513 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11514 static int init_cntrs(struct hfi1_devdata *dd)
11515 {
11516         int i, rcv_ctxts, index, j;
11517         size_t sz;
11518         char *p;
11519         char name[C_MAX_NAME];
11520         struct hfi1_pportdata *ppd;
11521
11522         /* set up the stats timer; the add_timer is done at the end */
11523         setup_timer(&dd->synth_stats_timer, update_synth_timer,
11524                     (unsigned long)dd);
11525
11526         /***********************/
11527         /* per device counters */
11528         /***********************/
11529
11530         /* size names and determine how many we have*/
11531         dd->ndevcntrs = 0;
11532         sz = 0;
11533         index = 0;
11534
11535         for (i = 0; i < DEV_CNTR_LAST; i++) {
11536                 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11537                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11538                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11539                         continue;
11540                 }
11541
11542                 if (dev_cntrs[i].flags & CNTR_VL) {
11543                         hfi1_dbg_early("\tProcessing VL cntr\n");
11544                         dev_cntrs[i].offset = index;
11545                         for (j = 0; j < C_VL_COUNT; j++) {
11546                                 memset(name, '\0', C_MAX_NAME);
11547                                 snprintf(name, C_MAX_NAME, "%s%d",
11548                                         dev_cntrs[i].name,
11549                                         vl_from_idx(j));
11550                                 sz += strlen(name);
11551                                 sz++;
11552                                 hfi1_dbg_early("\t\t%s\n", name);
11553                                 dd->ndevcntrs++;
11554                                 index++;
11555                         }
11556                 } else {
11557                         /* +1 for newline  */
11558                         sz += strlen(dev_cntrs[i].name) + 1;
11559                         dd->ndevcntrs++;
11560                         dev_cntrs[i].offset = index;
11561                         index++;
11562                         hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11563                 }
11564         }
11565
11566         /* allocate space for the counter values */
11567         dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11568         if (!dd->cntrs)
11569                 goto bail;
11570
11571         dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11572         if (!dd->scntrs)
11573                 goto bail;
11574
11575
11576         /* allocate space for the counter names */
11577         dd->cntrnameslen = sz;
11578         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11579         if (!dd->cntrnames)
11580                 goto bail;
11581
11582         /* fill in the names */
11583         for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) {
11584                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11585                         /* Nothing */
11586                 } else {
11587                         if (dev_cntrs[i].flags & CNTR_VL) {
11588                                 for (j = 0; j < C_VL_COUNT; j++) {
11589                                         memset(name, '\0', C_MAX_NAME);
11590                                         snprintf(name, C_MAX_NAME, "%s%d",
11591                                                 dev_cntrs[i].name,
11592                                                 vl_from_idx(j));
11593                                         memcpy(p, name, strlen(name));
11594                                         p += strlen(name);
11595                                         *p++ = '\n';
11596                                 }
11597                         } else {
11598                                 memcpy(p, dev_cntrs[i].name,
11599                                        strlen(dev_cntrs[i].name));
11600                                 p += strlen(dev_cntrs[i].name);
11601                                 *p++ = '\n';
11602                         }
11603                         index++;
11604                 }
11605         }
11606
11607         /*********************/
11608         /* per port counters */
11609         /*********************/
11610
11611         /*
11612          * Go through the counters for the overflows and disable the ones we
11613          * don't need. This varies based on platform so we need to do it
11614          * dynamically here.
11615          */
11616         rcv_ctxts = dd->num_rcv_contexts;
11617         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11618              i <= C_RCV_HDR_OVF_LAST; i++) {
11619                 port_cntrs[i].flags |= CNTR_DISABLED;
11620         }
11621
11622         /* size port counter names and determine how many we have*/
11623         sz = 0;
11624         dd->nportcntrs = 0;
11625         for (i = 0; i < PORT_CNTR_LAST; i++) {
11626                 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11627                 if (port_cntrs[i].flags & CNTR_DISABLED) {
11628                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11629                         continue;
11630                 }
11631
11632                 if (port_cntrs[i].flags & CNTR_VL) {
11633                         hfi1_dbg_early("\tProcessing VL cntr\n");
11634                         port_cntrs[i].offset = dd->nportcntrs;
11635                         for (j = 0; j < C_VL_COUNT; j++) {
11636                                 memset(name, '\0', C_MAX_NAME);
11637                                 snprintf(name, C_MAX_NAME, "%s%d",
11638                                         port_cntrs[i].name,
11639                                         vl_from_idx(j));
11640                                 sz += strlen(name);
11641                                 sz++;
11642                                 hfi1_dbg_early("\t\t%s\n", name);
11643                                 dd->nportcntrs++;
11644                         }
11645                 } else {
11646                         /* +1 for newline  */
11647                         sz += strlen(port_cntrs[i].name) + 1;
11648                         port_cntrs[i].offset = dd->nportcntrs;
11649                         dd->nportcntrs++;
11650                         hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11651                 }
11652         }
11653
11654         /* allocate space for the counter names */
11655         dd->portcntrnameslen = sz;
11656         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11657         if (!dd->portcntrnames)
11658                 goto bail;
11659
11660         /* fill in port cntr names */
11661         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11662                 if (port_cntrs[i].flags & CNTR_DISABLED)
11663                         continue;
11664
11665                 if (port_cntrs[i].flags & CNTR_VL) {
11666                         for (j = 0; j < C_VL_COUNT; j++) {
11667                                 memset(name, '\0', C_MAX_NAME);
11668                                 snprintf(name, C_MAX_NAME, "%s%d",
11669                                         port_cntrs[i].name,
11670                                         vl_from_idx(j));
11671                                 memcpy(p, name, strlen(name));
11672                                 p += strlen(name);
11673                                 *p++ = '\n';
11674                         }
11675                 } else {
11676                         memcpy(p, port_cntrs[i].name,
11677                                strlen(port_cntrs[i].name));
11678                         p += strlen(port_cntrs[i].name);
11679                         *p++ = '\n';
11680                 }
11681         }
11682
11683         /* allocate per port storage for counter values */
11684         ppd = (struct hfi1_pportdata *)(dd + 1);
11685         for (i = 0; i < dd->num_pports; i++, ppd++) {
11686                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11687                 if (!ppd->cntrs)
11688                         goto bail;
11689
11690                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11691                 if (!ppd->scntrs)
11692                         goto bail;
11693         }
11694
11695         /* CPU counters need to be allocated and zeroed */
11696         if (init_cpu_counters(dd))
11697                 goto bail;
11698
11699         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11700         return 0;
11701 bail:
11702         free_cntrs(dd);
11703         return -ENOMEM;
11704 }
11705
11706
11707 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11708 {
11709         switch (chip_lstate) {
11710         default:
11711                 dd_dev_err(dd,
11712                          "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11713                          chip_lstate);
11714                 /* fall through */
11715         case LSTATE_DOWN:
11716                 return IB_PORT_DOWN;
11717         case LSTATE_INIT:
11718                 return IB_PORT_INIT;
11719         case LSTATE_ARMED:
11720                 return IB_PORT_ARMED;
11721         case LSTATE_ACTIVE:
11722                 return IB_PORT_ACTIVE;
11723         }
11724 }
11725
11726 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11727 {
11728         /* look at the HFI meta-states only */
11729         switch (chip_pstate & 0xf0) {
11730         default:
11731                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11732                         chip_pstate);
11733                 /* fall through */
11734         case PLS_DISABLED:
11735                 return IB_PORTPHYSSTATE_DISABLED;
11736         case PLS_OFFLINE:
11737                 return OPA_PORTPHYSSTATE_OFFLINE;
11738         case PLS_POLLING:
11739                 return IB_PORTPHYSSTATE_POLLING;
11740         case PLS_CONFIGPHY:
11741                 return IB_PORTPHYSSTATE_TRAINING;
11742         case PLS_LINKUP:
11743                 return IB_PORTPHYSSTATE_LINKUP;
11744         case PLS_PHYTEST:
11745                 return IB_PORTPHYSSTATE_PHY_TEST;
11746         }
11747 }
11748
11749 /* return the OPA port logical state name */
11750 const char *opa_lstate_name(u32 lstate)
11751 {
11752         static const char * const port_logical_names[] = {
11753                 "PORT_NOP",
11754                 "PORT_DOWN",
11755                 "PORT_INIT",
11756                 "PORT_ARMED",
11757                 "PORT_ACTIVE",
11758                 "PORT_ACTIVE_DEFER",
11759         };
11760         if (lstate < ARRAY_SIZE(port_logical_names))
11761                 return port_logical_names[lstate];
11762         return "unknown";
11763 }
11764
11765 /* return the OPA port physical state name */
11766 const char *opa_pstate_name(u32 pstate)
11767 {
11768         static const char * const port_physical_names[] = {
11769                 "PHYS_NOP",
11770                 "reserved1",
11771                 "PHYS_POLL",
11772                 "PHYS_DISABLED",
11773                 "PHYS_TRAINING",
11774                 "PHYS_LINKUP",
11775                 "PHYS_LINK_ERR_RECOVER",
11776                 "PHYS_PHY_TEST",
11777                 "reserved8",
11778                 "PHYS_OFFLINE",
11779                 "PHYS_GANGED",
11780                 "PHYS_TEST",
11781         };
11782         if (pstate < ARRAY_SIZE(port_physical_names))
11783                 return port_physical_names[pstate];
11784         return "unknown";
11785 }
11786
11787 /*
11788  * Read the hardware link state and set the driver's cached value of it.
11789  * Return the (new) current value.
11790  */
11791 u32 get_logical_state(struct hfi1_pportdata *ppd)
11792 {
11793         u32 new_state;
11794
11795         new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11796         if (new_state != ppd->lstate) {
11797                 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11798                         opa_lstate_name(new_state), new_state);
11799                 ppd->lstate = new_state;
11800         }
11801         /*
11802          * Set port status flags in the page mapped into userspace
11803          * memory. Do it here to ensure a reliable state - this is
11804          * the only function called by all state handling code.
11805          * Always set the flags due to the fact that the cache value
11806          * might have been changed explicitly outside of this
11807          * function.
11808          */
11809         if (ppd->statusp) {
11810                 switch (ppd->lstate) {
11811                 case IB_PORT_DOWN:
11812                 case IB_PORT_INIT:
11813                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11814                                            HFI1_STATUS_IB_READY);
11815                         break;
11816                 case IB_PORT_ARMED:
11817                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
11818                         break;
11819                 case IB_PORT_ACTIVE:
11820                         *ppd->statusp |= HFI1_STATUS_IB_READY;
11821                         break;
11822                 }
11823         }
11824         return ppd->lstate;
11825 }
11826
11827 /**
11828  * wait_logical_linkstate - wait for an IB link state change to occur
11829  * @ppd: port device
11830  * @state: the state to wait for
11831  * @msecs: the number of milliseconds to wait
11832  *
11833  * Wait up to msecs milliseconds for IB link state change to occur.
11834  * For now, take the easy polling route.
11835  * Returns 0 if state reached, otherwise -ETIMEDOUT.
11836  */
11837 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11838                                   int msecs)
11839 {
11840         unsigned long timeout;
11841
11842         timeout = jiffies + msecs_to_jiffies(msecs);
11843         while (1) {
11844                 if (get_logical_state(ppd) == state)
11845                         return 0;
11846                 if (time_after(jiffies, timeout))
11847                         break;
11848                 msleep(20);
11849         }
11850         dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11851
11852         return -ETIMEDOUT;
11853 }
11854
11855 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11856 {
11857         static u32 remembered_state = 0xff;
11858         u32 pstate;
11859         u32 ib_pstate;
11860
11861         pstate = read_physical_state(ppd->dd);
11862         ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11863         if (remembered_state != ib_pstate) {
11864                 dd_dev_info(ppd->dd,
11865                         "%s: physical state changed to %s (0x%x), phy 0x%x\n",
11866                         __func__, opa_pstate_name(ib_pstate), ib_pstate,
11867                         pstate);
11868                 remembered_state = ib_pstate;
11869         }
11870         return ib_pstate;
11871 }
11872
11873 /*
11874  * Read/modify/write ASIC_QSFP register bits as selected by mask
11875  * data: 0 or 1 in the positions depending on what needs to be written
11876  * dir: 0 for read, 1 for write
11877  * mask: select by setting
11878  *      I2CCLK  (bit 0)
11879  *      I2CDATA (bit 1)
11880  */
11881 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
11882                   u32 mask)
11883 {
11884         u64 qsfp_oe, target_oe;
11885
11886         target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
11887         if (mask) {
11888                 /* We are writing register bits, so lock access */
11889                 dir &= mask;
11890                 data &= mask;
11891
11892                 qsfp_oe = read_csr(dd, target_oe);
11893                 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
11894                 write_csr(dd, target_oe, qsfp_oe);
11895         }
11896         /* We are exclusively reading bits here, but it is unlikely
11897          * we'll get valid data when we set the direction of the pin
11898          * in the same call, so read should call this function again
11899          * to get valid data
11900          */
11901         return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
11902 }
11903
11904 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
11905 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11906
11907 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
11908 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11909
11910 int hfi1_init_ctxt(struct send_context *sc)
11911 {
11912         if (sc != NULL) {
11913                 struct hfi1_devdata *dd = sc->dd;
11914                 u64 reg;
11915                 u8 set = (sc->type == SC_USER ?
11916                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
11917                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
11918                 reg = read_kctxt_csr(dd, sc->hw_context,
11919                                      SEND_CTXT_CHECK_ENABLE);
11920                 if (set)
11921                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
11922                 else
11923                         SET_STATIC_RATE_CONTROL_SMASK(reg);
11924                 write_kctxt_csr(dd, sc->hw_context,
11925                                 SEND_CTXT_CHECK_ENABLE, reg);
11926         }
11927         return 0;
11928 }
11929
11930 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
11931 {
11932         int ret = 0;
11933         u64 reg;
11934
11935         if (dd->icode != ICODE_RTL_SILICON) {
11936                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11937                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
11938                                     __func__);
11939                 return -EINVAL;
11940         }
11941         reg = read_csr(dd, ASIC_STS_THERM);
11942         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
11943                       ASIC_STS_THERM_CURR_TEMP_MASK);
11944         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
11945                         ASIC_STS_THERM_LO_TEMP_MASK);
11946         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
11947                         ASIC_STS_THERM_HI_TEMP_MASK);
11948         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
11949                           ASIC_STS_THERM_CRIT_TEMP_MASK);
11950         /* triggers is a 3-bit value - 1 bit per trigger. */
11951         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
11952
11953         return ret;
11954 }
11955
11956 /* ========================================================================= */
11957
11958 /*
11959  * Enable/disable chip from delivering interrupts.
11960  */
11961 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
11962 {
11963         int i;
11964
11965         /*
11966          * In HFI, the mask needs to be 1 to allow interrupts.
11967          */
11968         if (enable) {
11969                 u64 cce_int_mask;
11970                 const int qsfp1_int_smask = QSFP1_INT % 64;
11971                 const int qsfp2_int_smask = QSFP2_INT % 64;
11972
11973                 /* enable all interrupts */
11974                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11975                         write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
11976
11977                 /*
11978                  * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
11979                  * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
11980                  * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
11981                  * the index of the appropriate CSR in the CCEIntMask CSR array
11982                  */
11983                 cce_int_mask = read_csr(dd, CCE_INT_MASK +
11984                                                 (8*(QSFP1_INT/64)));
11985                 if (dd->hfi1_id) {
11986                         cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
11987                         write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
11988                                         cce_int_mask);
11989                 } else {
11990                         cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
11991                         write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
11992                                         cce_int_mask);
11993                 }
11994         } else {
11995                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11996                         write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
11997         }
11998 }
11999
12000 /*
12001  * Clear all interrupt sources on the chip.
12002  */
12003 static void clear_all_interrupts(struct hfi1_devdata *dd)
12004 {
12005         int i;
12006
12007         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12008                 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12009
12010         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12011         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12012         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12013         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12014         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12015         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12016         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12017         for (i = 0; i < dd->chip_send_contexts; i++)
12018                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12019         for (i = 0; i < dd->chip_sdma_engines; i++)
12020                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12021
12022         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12023         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12024         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12025 }
12026
12027 /* Move to pcie.c? */
12028 static void disable_intx(struct pci_dev *pdev)
12029 {
12030         pci_intx(pdev, 0);
12031 }
12032
12033 static void clean_up_interrupts(struct hfi1_devdata *dd)
12034 {
12035         int i;
12036
12037         /* remove irqs - must happen before disabling/turning off */
12038         if (dd->num_msix_entries) {
12039                 /* MSI-X */
12040                 struct hfi1_msix_entry *me = dd->msix_entries;
12041
12042                 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12043                         if (me->arg == NULL) /* => no irq, no affinity */
12044                                 break;
12045                         irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12046                                         NULL);
12047                         free_irq(me->msix.vector, me->arg);
12048                 }
12049         } else {
12050                 /* INTx */
12051                 if (dd->requested_intx_irq) {
12052                         free_irq(dd->pcidev->irq, dd);
12053                         dd->requested_intx_irq = 0;
12054                 }
12055         }
12056
12057         /* turn off interrupts */
12058         if (dd->num_msix_entries) {
12059                 /* MSI-X */
12060                 pci_disable_msix(dd->pcidev);
12061         } else {
12062                 /* INTx */
12063                 disable_intx(dd->pcidev);
12064         }
12065
12066         /* clean structures */
12067         for (i = 0; i < dd->num_msix_entries; i++)
12068                 free_cpumask_var(dd->msix_entries[i].mask);
12069         kfree(dd->msix_entries);
12070         dd->msix_entries = NULL;
12071         dd->num_msix_entries = 0;
12072 }
12073
12074 /*
12075  * Remap the interrupt source from the general handler to the given MSI-X
12076  * interrupt.
12077  */
12078 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12079 {
12080         u64 reg;
12081         int m, n;
12082
12083         /* clear from the handled mask of the general interrupt */
12084         m = isrc / 64;
12085         n = isrc % 64;
12086         dd->gi_mask[m] &= ~((u64)1 << n);
12087
12088         /* direct the chip source to the given MSI-X interrupt */
12089         m = isrc / 8;
12090         n = isrc % 8;
12091         reg = read_csr(dd, CCE_INT_MAP + (8*m));
12092         reg &= ~((u64)0xff << (8*n));
12093         reg |= ((u64)msix_intr & 0xff) << (8*n);
12094         write_csr(dd, CCE_INT_MAP + (8*m), reg);
12095 }
12096
12097 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12098                                   int engine, int msix_intr)
12099 {
12100         /*
12101          * SDMA engine interrupt sources grouped by type, rather than
12102          * engine.  Per-engine interrupts are as follows:
12103          *      SDMA
12104          *      SDMAProgress
12105          *      SDMAIdle
12106          */
12107         remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12108                 msix_intr);
12109         remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12110                 msix_intr);
12111         remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12112                 msix_intr);
12113 }
12114
12115 static int request_intx_irq(struct hfi1_devdata *dd)
12116 {
12117         int ret;
12118
12119         snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12120                  dd->unit);
12121         ret = request_irq(dd->pcidev->irq, general_interrupt,
12122                                   IRQF_SHARED, dd->intx_name, dd);
12123         if (ret)
12124                 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12125                                 ret);
12126         else
12127                 dd->requested_intx_irq = 1;
12128         return ret;
12129 }
12130
12131 static int request_msix_irqs(struct hfi1_devdata *dd)
12132 {
12133         const struct cpumask *local_mask;
12134         cpumask_var_t def, rcv;
12135         bool def_ret, rcv_ret;
12136         int first_general, last_general;
12137         int first_sdma, last_sdma;
12138         int first_rx, last_rx;
12139         int first_cpu, curr_cpu;
12140         int rcv_cpu, sdma_cpu;
12141         int i, ret = 0, possible;
12142         int ht;
12143
12144         /* calculate the ranges we are going to use */
12145         first_general = 0;
12146         first_sdma = last_general = first_general + 1;
12147         first_rx = last_sdma = first_sdma + dd->num_sdma;
12148         last_rx = first_rx + dd->n_krcv_queues;
12149
12150         /*
12151          * Interrupt affinity.
12152          *
12153          * non-rcv avail gets a default mask that
12154          * starts as possible cpus with threads reset
12155          * and each rcv avail reset.
12156          *
12157          * rcv avail gets node relative 1 wrapping back
12158          * to the node relative 1 as necessary.
12159          *
12160          */
12161         local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12162         /* if first cpu is invalid, use NUMA 0 */
12163         if (cpumask_first(local_mask) >= nr_cpu_ids)
12164                 local_mask = topology_core_cpumask(0);
12165
12166         def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12167         rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12168         if (!def_ret || !rcv_ret)
12169                 goto bail;
12170         /* use local mask as default */
12171         cpumask_copy(def, local_mask);
12172         possible = cpumask_weight(def);
12173         /* disarm threads from default */
12174         ht = cpumask_weight(
12175                         topology_sibling_cpumask(cpumask_first(local_mask)));
12176         for (i = possible/ht; i < possible; i++)
12177                 cpumask_clear_cpu(i, def);
12178         /* def now has full cores on chosen node*/
12179         first_cpu = cpumask_first(def);
12180         if (nr_cpu_ids >= first_cpu)
12181                 first_cpu++;
12182         curr_cpu = first_cpu;
12183
12184         /*  One context is reserved as control context */
12185         for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
12186                 cpumask_clear_cpu(curr_cpu, def);
12187                 cpumask_set_cpu(curr_cpu, rcv);
12188                 curr_cpu = cpumask_next(curr_cpu, def);
12189                 if (curr_cpu >= nr_cpu_ids)
12190                         break;
12191         }
12192         /* def mask has non-rcv, rcv has recv mask */
12193         rcv_cpu = cpumask_first(rcv);
12194         sdma_cpu = cpumask_first(def);
12195
12196         /*
12197          * Sanity check - the code expects all SDMA chip source
12198          * interrupts to be in the same CSR, starting at bit 0.  Verify
12199          * that this is true by checking the bit location of the start.
12200          */
12201         BUILD_BUG_ON(IS_SDMA_START % 64);
12202
12203         for (i = 0; i < dd->num_msix_entries; i++) {
12204                 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12205                 const char *err_info;
12206                 irq_handler_t handler;
12207                 irq_handler_t thread = NULL;
12208                 void *arg;
12209                 int idx;
12210                 struct hfi1_ctxtdata *rcd = NULL;
12211                 struct sdma_engine *sde = NULL;
12212
12213                 /* obtain the arguments to request_irq */
12214                 if (first_general <= i && i < last_general) {
12215                         idx = i - first_general;
12216                         handler = general_interrupt;
12217                         arg = dd;
12218                         snprintf(me->name, sizeof(me->name),
12219                                  DRIVER_NAME "_%d", dd->unit);
12220                         err_info = "general";
12221                 } else if (first_sdma <= i && i < last_sdma) {
12222                         idx = i - first_sdma;
12223                         sde = &dd->per_sdma[idx];
12224                         handler = sdma_interrupt;
12225                         arg = sde;
12226                         snprintf(me->name, sizeof(me->name),
12227                                  DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12228                         err_info = "sdma";
12229                         remap_sdma_interrupts(dd, idx, i);
12230                 } else if (first_rx <= i && i < last_rx) {
12231                         idx = i - first_rx;
12232                         rcd = dd->rcd[idx];
12233                         /* no interrupt if no rcd */
12234                         if (!rcd)
12235                                 continue;
12236                         /*
12237                          * Set the interrupt register and mask for this
12238                          * context's interrupt.
12239                          */
12240                         rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12241                         rcd->imask = ((u64)1) <<
12242                                         ((IS_RCVAVAIL_START+idx) % 64);
12243                         handler = receive_context_interrupt;
12244                         thread = receive_context_thread;
12245                         arg = rcd;
12246                         snprintf(me->name, sizeof(me->name),
12247                                  DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12248                         err_info = "receive context";
12249                         remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12250                 } else {
12251                         /* not in our expected range - complain, then
12252                            ignore it */
12253                         dd_dev_err(dd,
12254                                 "Unexpected extra MSI-X interrupt %d\n", i);
12255                         continue;
12256                 }
12257                 /* no argument, no interrupt */
12258                 if (arg == NULL)
12259                         continue;
12260                 /* make sure the name is terminated */
12261                 me->name[sizeof(me->name)-1] = 0;
12262
12263                 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12264                                                 me->name, arg);
12265                 if (ret) {
12266                         dd_dev_err(dd,
12267                                 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12268                                  err_info, me->msix.vector, idx, ret);
12269                         return ret;
12270                 }
12271                 /*
12272                  * assign arg after request_irq call, so it will be
12273                  * cleaned up
12274                  */
12275                 me->arg = arg;
12276
12277                 if (!zalloc_cpumask_var(
12278                         &dd->msix_entries[i].mask,
12279                         GFP_KERNEL))
12280                         goto bail;
12281                 if (handler == sdma_interrupt) {
12282                         dd_dev_info(dd, "sdma engine %d cpu %d\n",
12283                                 sde->this_idx, sdma_cpu);
12284                         sde->cpu = sdma_cpu;
12285                         cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12286                         sdma_cpu = cpumask_next(sdma_cpu, def);
12287                         if (sdma_cpu >= nr_cpu_ids)
12288                                 sdma_cpu = cpumask_first(def);
12289                 } else if (handler == receive_context_interrupt) {
12290                         dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12291                                     (rcd->ctxt == HFI1_CTRL_CTXT) ?
12292                                             cpumask_first(def) : rcv_cpu);
12293                         if (rcd->ctxt == HFI1_CTRL_CTXT) {
12294                                 /* map to first default */
12295                                 cpumask_set_cpu(cpumask_first(def),
12296                                                 dd->msix_entries[i].mask);
12297                         } else {
12298                                 cpumask_set_cpu(rcv_cpu,
12299                                                 dd->msix_entries[i].mask);
12300                                 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12301                                 if (rcv_cpu >= nr_cpu_ids)
12302                                         rcv_cpu = cpumask_first(rcv);
12303                         }
12304                 } else {
12305                         /* otherwise first def */
12306                         dd_dev_info(dd, "%s cpu %d\n",
12307                                 err_info, cpumask_first(def));
12308                         cpumask_set_cpu(
12309                                 cpumask_first(def), dd->msix_entries[i].mask);
12310                 }
12311                 irq_set_affinity_hint(
12312                         dd->msix_entries[i].msix.vector,
12313                         dd->msix_entries[i].mask);
12314         }
12315
12316 out:
12317         free_cpumask_var(def);
12318         free_cpumask_var(rcv);
12319         return ret;
12320 bail:
12321         ret = -ENOMEM;
12322         goto  out;
12323 }
12324
12325 /*
12326  * Set the general handler to accept all interrupts, remap all
12327  * chip interrupts back to MSI-X 0.
12328  */
12329 static void reset_interrupts(struct hfi1_devdata *dd)
12330 {
12331         int i;
12332
12333         /* all interrupts handled by the general handler */
12334         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12335                 dd->gi_mask[i] = ~(u64)0;
12336
12337         /* all chip interrupts map to MSI-X 0 */
12338         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12339                 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12340 }
12341
12342 static int set_up_interrupts(struct hfi1_devdata *dd)
12343 {
12344         struct hfi1_msix_entry *entries;
12345         u32 total, request;
12346         int i, ret;
12347         int single_interrupt = 0; /* we expect to have all the interrupts */
12348
12349         /*
12350          * Interrupt count:
12351          *      1 general, "slow path" interrupt (includes the SDMA engines
12352          *              slow source, SDMACleanupDone)
12353          *      N interrupts - one per used SDMA engine
12354          *      M interrupt - one per kernel receive context
12355          */
12356         total = 1 + dd->num_sdma + dd->n_krcv_queues;
12357
12358         entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12359         if (!entries) {
12360                 ret = -ENOMEM;
12361                 goto fail;
12362         }
12363         /* 1-1 MSI-X entry assignment */
12364         for (i = 0; i < total; i++)
12365                 entries[i].msix.entry = i;
12366
12367         /* ask for MSI-X interrupts */
12368         request = total;
12369         request_msix(dd, &request, entries);
12370
12371         if (request == 0) {
12372                 /* using INTx */
12373                 /* dd->num_msix_entries already zero */
12374                 kfree(entries);
12375                 single_interrupt = 1;
12376                 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12377         } else {
12378                 /* using MSI-X */
12379                 dd->num_msix_entries = request;
12380                 dd->msix_entries = entries;
12381
12382                 if (request != total) {
12383                         /* using MSI-X, with reduced interrupts */
12384                         dd_dev_err(
12385                                 dd,
12386                                 "cannot handle reduced interrupt case, want %u, got %u\n",
12387                                 total, request);
12388                         ret = -EINVAL;
12389                         goto fail;
12390                 }
12391                 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12392         }
12393
12394         /* mask all interrupts */
12395         set_intr_state(dd, 0);
12396         /* clear all pending interrupts */
12397         clear_all_interrupts(dd);
12398
12399         /* reset general handler mask, chip MSI-X mappings */
12400         reset_interrupts(dd);
12401
12402         if (single_interrupt)
12403                 ret = request_intx_irq(dd);
12404         else
12405                 ret = request_msix_irqs(dd);
12406         if (ret)
12407                 goto fail;
12408
12409         return 0;
12410
12411 fail:
12412         clean_up_interrupts(dd);
12413         return ret;
12414 }
12415
12416 /*
12417  * Set up context values in dd.  Sets:
12418  *
12419  *      num_rcv_contexts - number of contexts being used
12420  *      n_krcv_queues - number of kernel contexts
12421  *      first_user_ctxt - first non-kernel context in array of contexts
12422  *      freectxts  - number of free user contexts
12423  *      num_send_contexts - number of PIO send contexts being used
12424  */
12425 static int set_up_context_variables(struct hfi1_devdata *dd)
12426 {
12427         int num_kernel_contexts;
12428         int total_contexts;
12429         int ret;
12430         unsigned ngroups;
12431
12432         /*
12433          * Kernel contexts: (to be fixed later):
12434          * - min or 2 or 1 context/numa
12435          * - Context 0 - control context (VL15/multicast/error)
12436          * - Context 1 - default context
12437          */
12438         if (n_krcvqs)
12439                 /*
12440                  * Don't count context 0 in n_krcvqs since
12441                  * is isn't used for normal verbs traffic.
12442                  *
12443                  * krcvqs will reflect number of kernel
12444                  * receive contexts above 0.
12445                  */
12446                 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12447         else
12448                 num_kernel_contexts = num_online_nodes();
12449         num_kernel_contexts =
12450                 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12451         /*
12452          * Every kernel receive context needs an ACK send context.
12453          * one send context is allocated for each VL{0-7} and VL15
12454          */
12455         if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12456                 dd_dev_err(dd,
12457                            "Reducing # kernel rcv contexts to: %d, from %d\n",
12458                            (int)(dd->chip_send_contexts - num_vls - 1),
12459                            (int)num_kernel_contexts);
12460                 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12461         }
12462         /*
12463          * User contexts: (to be fixed later)
12464          *      - default to 1 user context per CPU if num_user_contexts is
12465          *        negative
12466          */
12467         if (num_user_contexts < 0)
12468                 num_user_contexts = num_online_cpus();
12469
12470         total_contexts = num_kernel_contexts + num_user_contexts;
12471
12472         /*
12473          * Adjust the counts given a global max.
12474          */
12475         if (total_contexts > dd->chip_rcv_contexts) {
12476                 dd_dev_err(dd,
12477                            "Reducing # user receive contexts to: %d, from %d\n",
12478                            (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12479                            (int)num_user_contexts);
12480                 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12481                 /* recalculate */
12482                 total_contexts = num_kernel_contexts + num_user_contexts;
12483         }
12484
12485         /* the first N are kernel contexts, the rest are user contexts */
12486         dd->num_rcv_contexts = total_contexts;
12487         dd->n_krcv_queues = num_kernel_contexts;
12488         dd->first_user_ctxt = num_kernel_contexts;
12489         dd->freectxts = num_user_contexts;
12490         dd_dev_info(dd,
12491                 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12492                 (int)dd->chip_rcv_contexts,
12493                 (int)dd->num_rcv_contexts,
12494                 (int)dd->n_krcv_queues,
12495                 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12496
12497         /*
12498          * Receive array allocation:
12499          *   All RcvArray entries are divided into groups of 8. This
12500          *   is required by the hardware and will speed up writes to
12501          *   consecutive entries by using write-combining of the entire
12502          *   cacheline.
12503          *
12504          *   The number of groups are evenly divided among all contexts.
12505          *   any left over groups will be given to the first N user
12506          *   contexts.
12507          */
12508         dd->rcv_entries.group_size = RCV_INCREMENT;
12509         ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12510         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12511         dd->rcv_entries.nctxt_extra = ngroups -
12512                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12513         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12514                     dd->rcv_entries.ngroups,
12515                     dd->rcv_entries.nctxt_extra);
12516         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12517             MAX_EAGER_ENTRIES * 2) {
12518                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12519                         dd->rcv_entries.group_size;
12520                 dd_dev_info(dd,
12521                    "RcvArray group count too high, change to %u\n",
12522                    dd->rcv_entries.ngroups);
12523                 dd->rcv_entries.nctxt_extra = 0;
12524         }
12525         /*
12526          * PIO send contexts
12527          */
12528         ret = init_sc_pools_and_sizes(dd);
12529         if (ret >= 0) { /* success */
12530                 dd->num_send_contexts = ret;
12531                 dd_dev_info(
12532                         dd,
12533                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12534                         dd->chip_send_contexts,
12535                         dd->num_send_contexts,
12536                         dd->sc_sizes[SC_KERNEL].count,
12537                         dd->sc_sizes[SC_ACK].count,
12538                         dd->sc_sizes[SC_USER].count);
12539                 ret = 0;        /* success */
12540         }
12541
12542         return ret;
12543 }
12544
12545 /*
12546  * Set the device/port partition key table. The MAD code
12547  * will ensure that, at least, the partial management
12548  * partition key is present in the table.
12549  */
12550 static void set_partition_keys(struct hfi1_pportdata *ppd)
12551 {
12552         struct hfi1_devdata *dd = ppd->dd;
12553         u64 reg = 0;
12554         int i;
12555
12556         dd_dev_info(dd, "Setting partition keys\n");
12557         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12558                 reg |= (ppd->pkeys[i] &
12559                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12560                         ((i % 4) *
12561                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12562                 /* Each register holds 4 PKey values. */
12563                 if ((i % 4) == 3) {
12564                         write_csr(dd, RCV_PARTITION_KEY +
12565                                   ((i - 3) * 2), reg);
12566                         reg = 0;
12567                 }
12568         }
12569
12570         /* Always enable HW pkeys check when pkeys table is set */
12571         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12572 }
12573
12574 /*
12575  * These CSRs and memories are uninitialized on reset and must be
12576  * written before reading to set the ECC/parity bits.
12577  *
12578  * NOTE: All user context CSRs that are not mmaped write-only
12579  * (e.g. the TID flows) must be initialized even if the driver never
12580  * reads them.
12581  */
12582 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12583 {
12584         int i, j;
12585
12586         /* CceIntMap */
12587         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12588                 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12589
12590         /* SendCtxtCreditReturnAddr */
12591         for (i = 0; i < dd->chip_send_contexts; i++)
12592                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12593
12594         /* PIO Send buffers */
12595         /* SDMA Send buffers */
12596         /* These are not normally read, and (presently) have no method
12597            to be read, so are not pre-initialized */
12598
12599         /* RcvHdrAddr */
12600         /* RcvHdrTailAddr */
12601         /* RcvTidFlowTable */
12602         for (i = 0; i < dd->chip_rcv_contexts; i++) {
12603                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12604                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12605                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12606                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12607         }
12608
12609         /* RcvArray */
12610         for (i = 0; i < dd->chip_rcv_array_count; i++)
12611                 write_csr(dd, RCV_ARRAY + (8*i),
12612                                         RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12613
12614         /* RcvQPMapTable */
12615         for (i = 0; i < 32; i++)
12616                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12617 }
12618
12619 /*
12620  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12621  */
12622 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12623                              u64 ctrl_bits)
12624 {
12625         unsigned long timeout;
12626         u64 reg;
12627
12628         /* is the condition present? */
12629         reg = read_csr(dd, CCE_STATUS);
12630         if ((reg & status_bits) == 0)
12631                 return;
12632
12633         /* clear the condition */
12634         write_csr(dd, CCE_CTRL, ctrl_bits);
12635
12636         /* wait for the condition to clear */
12637         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12638         while (1) {
12639                 reg = read_csr(dd, CCE_STATUS);
12640                 if ((reg & status_bits) == 0)
12641                         return;
12642                 if (time_after(jiffies, timeout)) {
12643                         dd_dev_err(dd,
12644                                 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12645                                 status_bits, reg & status_bits);
12646                         return;
12647                 }
12648                 udelay(1);
12649         }
12650 }
12651
12652 /* set CCE CSRs to chip reset defaults */
12653 static void reset_cce_csrs(struct hfi1_devdata *dd)
12654 {
12655         int i;
12656
12657         /* CCE_REVISION read-only */
12658         /* CCE_REVISION2 read-only */
12659         /* CCE_CTRL - bits clear automatically */
12660         /* CCE_STATUS read-only, use CceCtrl to clear */
12661         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12662         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12663         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12664         for (i = 0; i < CCE_NUM_SCRATCH; i++)
12665                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12666         /* CCE_ERR_STATUS read-only */
12667         write_csr(dd, CCE_ERR_MASK, 0);
12668         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12669         /* CCE_ERR_FORCE leave alone */
12670         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12671                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12672         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12673         /* CCE_PCIE_CTRL leave alone */
12674         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12675                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12676                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12677                                         CCE_MSIX_TABLE_UPPER_RESETCSR);
12678         }
12679         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12680                 /* CCE_MSIX_PBA read-only */
12681                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12682                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12683         }
12684         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12685                 write_csr(dd, CCE_INT_MAP, 0);
12686         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12687                 /* CCE_INT_STATUS read-only */
12688                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12689                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12690                 /* CCE_INT_FORCE leave alone */
12691                 /* CCE_INT_BLOCKED read-only */
12692         }
12693         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12694                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12695 }
12696
12697 /* set ASIC CSRs to chip reset defaults */
12698 static void reset_asic_csrs(struct hfi1_devdata *dd)
12699 {
12700         int i;
12701
12702         /*
12703          * If the HFIs are shared between separate nodes or VMs,
12704          * then more will need to be done here.  One idea is a module
12705          * parameter that returns early, letting the first power-on or
12706          * a known first load do the reset and blocking all others.
12707          */
12708
12709         if (!(dd->flags & HFI1_DO_INIT_ASIC))
12710                 return;
12711
12712         if (dd->icode != ICODE_FPGA_EMULATION) {
12713                 /* emulation does not have an SBus - leave these alone */
12714                 /*
12715                  * All writes to ASIC_CFG_SBUS_REQUEST do something.
12716                  * Notes:
12717                  * o The reset is not zero if aimed at the core.  See the
12718                  *   SBus documentation for details.
12719                  * o If the SBus firmware has been updated (e.g. by the BIOS),
12720                  *   will the reset revert that?
12721                  */
12722                 /* ASIC_CFG_SBUS_REQUEST leave alone */
12723                 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12724         }
12725         /* ASIC_SBUS_RESULT read-only */
12726         write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12727         for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12728                 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12729         write_csr(dd, ASIC_CFG_MUTEX, 0);       /* this will clear it */
12730
12731         /* We might want to retain this state across FLR if we ever use it */
12732         write_csr(dd, ASIC_CFG_DRV_STR, 0);
12733
12734         /* ASIC_CFG_THERM_POLL_EN leave alone */
12735         /* ASIC_STS_THERM read-only */
12736         /* ASIC_CFG_RESET leave alone */
12737
12738         write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12739         /* ASIC_PCIE_SD_HOST_STATUS read-only */
12740         write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12741         write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12742         /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12743         write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12744         /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12745         /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12746         for (i = 0; i < 16; i++)
12747                 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12748
12749         /* ASIC_GPIO_IN read-only */
12750         write_csr(dd, ASIC_GPIO_OE, 0);
12751         write_csr(dd, ASIC_GPIO_INVERT, 0);
12752         write_csr(dd, ASIC_GPIO_OUT, 0);
12753         write_csr(dd, ASIC_GPIO_MASK, 0);
12754         /* ASIC_GPIO_STATUS read-only */
12755         write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12756         /* ASIC_GPIO_FORCE leave alone */
12757
12758         /* ASIC_QSFP1_IN read-only */
12759         write_csr(dd, ASIC_QSFP1_OE, 0);
12760         write_csr(dd, ASIC_QSFP1_INVERT, 0);
12761         write_csr(dd, ASIC_QSFP1_OUT, 0);
12762         write_csr(dd, ASIC_QSFP1_MASK, 0);
12763         /* ASIC_QSFP1_STATUS read-only */
12764         write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12765         /* ASIC_QSFP1_FORCE leave alone */
12766
12767         /* ASIC_QSFP2_IN read-only */
12768         write_csr(dd, ASIC_QSFP2_OE, 0);
12769         write_csr(dd, ASIC_QSFP2_INVERT, 0);
12770         write_csr(dd, ASIC_QSFP2_OUT, 0);
12771         write_csr(dd, ASIC_QSFP2_MASK, 0);
12772         /* ASIC_QSFP2_STATUS read-only */
12773         write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12774         /* ASIC_QSFP2_FORCE leave alone */
12775
12776         write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12777         /* this also writes a NOP command, clearing paging mode */
12778         write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12779         write_csr(dd, ASIC_EEP_DATA, 0);
12780 }
12781
12782 /* set MISC CSRs to chip reset defaults */
12783 static void reset_misc_csrs(struct hfi1_devdata *dd)
12784 {
12785         int i;
12786
12787         for (i = 0; i < 32; i++) {
12788                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12789                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12790                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12791         }
12792         /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12793            only be written 128-byte chunks */
12794         /* init RSA engine to clear lingering errors */
12795         write_csr(dd, MISC_CFG_RSA_CMD, 1);
12796         write_csr(dd, MISC_CFG_RSA_MU, 0);
12797         write_csr(dd, MISC_CFG_FW_CTRL, 0);
12798         /* MISC_STS_8051_DIGEST read-only */
12799         /* MISC_STS_SBM_DIGEST read-only */
12800         /* MISC_STS_PCIE_DIGEST read-only */
12801         /* MISC_STS_FAB_DIGEST read-only */
12802         /* MISC_ERR_STATUS read-only */
12803         write_csr(dd, MISC_ERR_MASK, 0);
12804         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12805         /* MISC_ERR_FORCE leave alone */
12806 }
12807
12808 /* set TXE CSRs to chip reset defaults */
12809 static void reset_txe_csrs(struct hfi1_devdata *dd)
12810 {
12811         int i;
12812
12813         /*
12814          * TXE Kernel CSRs
12815          */
12816         write_csr(dd, SEND_CTRL, 0);
12817         __cm_reset(dd, 0);      /* reset CM internal state */
12818         /* SEND_CONTEXTS read-only */
12819         /* SEND_DMA_ENGINES read-only */
12820         /* SEND_PIO_MEM_SIZE read-only */
12821         /* SEND_DMA_MEM_SIZE read-only */
12822         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12823         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
12824         /* SEND_PIO_ERR_STATUS read-only */
12825         write_csr(dd, SEND_PIO_ERR_MASK, 0);
12826         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12827         /* SEND_PIO_ERR_FORCE leave alone */
12828         /* SEND_DMA_ERR_STATUS read-only */
12829         write_csr(dd, SEND_DMA_ERR_MASK, 0);
12830         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12831         /* SEND_DMA_ERR_FORCE leave alone */
12832         /* SEND_EGRESS_ERR_STATUS read-only */
12833         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12834         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12835         /* SEND_EGRESS_ERR_FORCE leave alone */
12836         write_csr(dd, SEND_BTH_QP, 0);
12837         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12838         write_csr(dd, SEND_SC2VLT0, 0);
12839         write_csr(dd, SEND_SC2VLT1, 0);
12840         write_csr(dd, SEND_SC2VLT2, 0);
12841         write_csr(dd, SEND_SC2VLT3, 0);
12842         write_csr(dd, SEND_LEN_CHECK0, 0);
12843         write_csr(dd, SEND_LEN_CHECK1, 0);
12844         /* SEND_ERR_STATUS read-only */
12845         write_csr(dd, SEND_ERR_MASK, 0);
12846         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12847         /* SEND_ERR_FORCE read-only */
12848         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12849                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12850         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12851                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12852         for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12853                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12854         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12855                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12856         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12857                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12858         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12859         write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12860                                         SEND_CM_GLOBAL_CREDIT_RESETCSR);
12861         /* SEND_CM_CREDIT_USED_STATUS read-only */
12862         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12863         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12864         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12865         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12866         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12867         for (i = 0; i < TXE_NUM_DATA_VL; i++)
12868                 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12869         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12870         /* SEND_CM_CREDIT_USED_VL read-only */
12871         /* SEND_CM_CREDIT_USED_VL15 read-only */
12872         /* SEND_EGRESS_CTXT_STATUS read-only */
12873         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12874         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12875         /* SEND_EGRESS_ERR_INFO read-only */
12876         /* SEND_EGRESS_ERR_SOURCE read-only */
12877
12878         /*
12879          * TXE Per-Context CSRs
12880          */
12881         for (i = 0; i < dd->chip_send_contexts; i++) {
12882                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12883                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
12884                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12885                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
12886                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
12887                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
12888                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
12889                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
12890                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
12891                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
12892                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
12893                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
12894         }
12895
12896         /*
12897          * TXE Per-SDMA CSRs
12898          */
12899         for (i = 0; i < dd->chip_sdma_engines; i++) {
12900                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
12901                 /* SEND_DMA_STATUS read-only */
12902                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
12903                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
12904                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
12905                 /* SEND_DMA_HEAD read-only */
12906                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
12907                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
12908                 /* SEND_DMA_IDLE_CNT read-only */
12909                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
12910                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
12911                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
12912                 /* SEND_DMA_ENG_ERR_STATUS read-only */
12913                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
12914                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
12915                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
12916                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
12917                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
12918                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
12919                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
12920                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
12921                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
12922                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
12923         }
12924 }
12925
12926 /*
12927  * Expect on entry:
12928  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
12929  */
12930 static void init_rbufs(struct hfi1_devdata *dd)
12931 {
12932         u64 reg;
12933         int count;
12934
12935         /*
12936          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
12937          * clear.
12938          */
12939         count = 0;
12940         while (1) {
12941                 reg = read_csr(dd, RCV_STATUS);
12942                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
12943                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
12944                         break;
12945                 /*
12946                  * Give up after 1ms - maximum wait time.
12947                  *
12948                  * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
12949                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
12950                  *      148 KB / (66% * 250MB/s) = 920us
12951                  */
12952                 if (count++ > 500) {
12953                         dd_dev_err(dd,
12954                                 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
12955                                 __func__, reg);
12956                         break;
12957                 }
12958                 udelay(2); /* do not busy-wait the CSR */
12959         }
12960
12961         /* start the init - expect RcvCtrl to be 0 */
12962         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
12963
12964         /*
12965          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
12966          * period after the write before RcvStatus.RxRbufInitDone is valid.
12967          * The delay in the first run through the loop below is sufficient and
12968          * required before the first read of RcvStatus.RxRbufInintDone.
12969          */
12970         read_csr(dd, RCV_CTRL);
12971
12972         /* wait for the init to finish */
12973         count = 0;
12974         while (1) {
12975                 /* delay is required first time through - see above */
12976                 udelay(2); /* do not busy-wait the CSR */
12977                 reg = read_csr(dd, RCV_STATUS);
12978                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
12979                         break;
12980
12981                 /* give up after 100us - slowest possible at 33MHz is 73us */
12982                 if (count++ > 50) {
12983                         dd_dev_err(dd,
12984                                 "%s: RcvStatus.RxRbufInit not set, continuing\n",
12985                                 __func__);
12986                         break;
12987                 }
12988         }
12989 }
12990
12991 /* set RXE CSRs to chip reset defaults */
12992 static void reset_rxe_csrs(struct hfi1_devdata *dd)
12993 {
12994         int i, j;
12995
12996         /*
12997          * RXE Kernel CSRs
12998          */
12999         write_csr(dd, RCV_CTRL, 0);
13000         init_rbufs(dd);
13001         /* RCV_STATUS read-only */
13002         /* RCV_CONTEXTS read-only */
13003         /* RCV_ARRAY_CNT read-only */
13004         /* RCV_BUF_SIZE read-only */
13005         write_csr(dd, RCV_BTH_QP, 0);
13006         write_csr(dd, RCV_MULTICAST, 0);
13007         write_csr(dd, RCV_BYPASS, 0);
13008         write_csr(dd, RCV_VL15, 0);
13009         /* this is a clear-down */
13010         write_csr(dd, RCV_ERR_INFO,
13011                         RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13012         /* RCV_ERR_STATUS read-only */
13013         write_csr(dd, RCV_ERR_MASK, 0);
13014         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13015         /* RCV_ERR_FORCE leave alone */
13016         for (i = 0; i < 32; i++)
13017                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13018         for (i = 0; i < 4; i++)
13019                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13020         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13021                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13022         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13023                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13024         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13025                 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13026                 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13027                 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13028         }
13029         for (i = 0; i < 32; i++)
13030                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13031
13032         /*
13033          * RXE Kernel and User Per-Context CSRs
13034          */
13035         for (i = 0; i < dd->chip_rcv_contexts; i++) {
13036                 /* kernel */
13037                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13038                 /* RCV_CTXT_STATUS read-only */
13039                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13040                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13041                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13042                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13043                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13044                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13045                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13046                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13047                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13048                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13049
13050                 /* user */
13051                 /* RCV_HDR_TAIL read-only */
13052                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13053                 /* RCV_EGR_INDEX_TAIL read-only */
13054                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13055                 /* RCV_EGR_OFFSET_TAIL read-only */
13056                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13057                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13058                                 0);
13059                 }
13060         }
13061 }
13062
13063 /*
13064  * Set sc2vl tables.
13065  *
13066  * They power on to zeros, so to avoid send context errors
13067  * they need to be set:
13068  *
13069  * SC 0-7 -> VL 0-7 (respectively)
13070  * SC 15  -> VL 15
13071  * otherwise
13072  *        -> VL 0
13073  */
13074 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13075 {
13076         int i;
13077         /* init per architecture spec, constrained by hardware capability */
13078
13079         /* HFI maps sent packets */
13080         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13081                 0,
13082                 0, 0, 1, 1,
13083                 2, 2, 3, 3,
13084                 4, 4, 5, 5,
13085                 6, 6, 7, 7));
13086         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13087                 1,
13088                 8, 0, 9, 0,
13089                 10, 0, 11, 0,
13090                 12, 0, 13, 0,
13091                 14, 0, 15, 15));
13092         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13093                 2,
13094                 16, 0, 17, 0,
13095                 18, 0, 19, 0,
13096                 20, 0, 21, 0,
13097                 22, 0, 23, 0));
13098         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13099                 3,
13100                 24, 0, 25, 0,
13101                 26, 0, 27, 0,
13102                 28, 0, 29, 0,
13103                 30, 0, 31, 0));
13104
13105         /* DC maps received packets */
13106         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13107                 15_0,
13108                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13109                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13110         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13111                 31_16,
13112                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13113                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13114
13115         /* initialize the cached sc2vl values consistently with h/w */
13116         for (i = 0; i < 32; i++) {
13117                 if (i < 8 || i == 15)
13118                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13119                 else
13120                         *((u8 *)(dd->sc2vl) + i) = 0;
13121         }
13122 }
13123
13124 /*
13125  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13126  * depend on the chip going through a power-on reset - a driver may be loaded
13127  * and unloaded many times.
13128  *
13129  * Do not write any CSR values to the chip in this routine - there may be
13130  * a reset following the (possible) FLR in this routine.
13131  *
13132  */
13133 static void init_chip(struct hfi1_devdata *dd)
13134 {
13135         int i;
13136
13137         /*
13138          * Put the HFI CSRs in a known state.
13139          * Combine this with a DC reset.
13140          *
13141          * Stop the device from doing anything while we do a
13142          * reset.  We know there are no other active users of
13143          * the device since we are now in charge.  Turn off
13144          * off all outbound and inbound traffic and make sure
13145          * the device does not generate any interrupts.
13146          */
13147
13148         /* disable send contexts and SDMA engines */
13149         write_csr(dd, SEND_CTRL, 0);
13150         for (i = 0; i < dd->chip_send_contexts; i++)
13151                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13152         for (i = 0; i < dd->chip_sdma_engines; i++)
13153                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13154         /* disable port (turn off RXE inbound traffic) and contexts */
13155         write_csr(dd, RCV_CTRL, 0);
13156         for (i = 0; i < dd->chip_rcv_contexts; i++)
13157                 write_csr(dd, RCV_CTXT_CTRL, 0);
13158         /* mask all interrupt sources */
13159         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13160                 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13161
13162         /*
13163          * DC Reset: do a full DC reset before the register clear.
13164          * A recommended length of time to hold is one CSR read,
13165          * so reread the CceDcCtrl.  Then, hold the DC in reset
13166          * across the clear.
13167          */
13168         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13169         (void) read_csr(dd, CCE_DC_CTRL);
13170
13171         if (use_flr) {
13172                 /*
13173                  * A FLR will reset the SPC core and part of the PCIe.
13174                  * The parts that need to be restored have already been
13175                  * saved.
13176                  */
13177                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13178
13179                 /* do the FLR, the DC reset will remain */
13180                 hfi1_pcie_flr(dd);
13181
13182                 /* restore command and BARs */
13183                 restore_pci_variables(dd);
13184
13185                 if (is_ax(dd)) {
13186                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13187                         hfi1_pcie_flr(dd);
13188                         restore_pci_variables(dd);
13189                 }
13190
13191                 reset_asic_csrs(dd);
13192         } else {
13193                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13194                 reset_cce_csrs(dd);
13195                 reset_txe_csrs(dd);
13196                 reset_rxe_csrs(dd);
13197                 reset_asic_csrs(dd);
13198                 reset_misc_csrs(dd);
13199         }
13200         /* clear the DC reset */
13201         write_csr(dd, CCE_DC_CTRL, 0);
13202
13203         /* Set the LED off */
13204         if (is_ax(dd))
13205                 setextled(dd, 0);
13206         /*
13207          * Clear the QSFP reset.
13208          * An FLR enforces a 0 on all out pins. The driver does not touch
13209          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13210          * anything plugged constantly in reset, if it pays attention
13211          * to RESET_N.
13212          * Prime examples of this are optical cables. Set all pins high.
13213          * I2CCLK and I2CDAT will change per direction, and INT_N and
13214          * MODPRS_N are input only and their value is ignored.
13215          */
13216         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13217         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13218 }
13219
13220 static void init_early_variables(struct hfi1_devdata *dd)
13221 {
13222         int i;
13223
13224         /* assign link credit variables */
13225         dd->vau = CM_VAU;
13226         dd->link_credits = CM_GLOBAL_CREDITS;
13227         if (is_ax(dd))
13228                 dd->link_credits--;
13229         dd->vcu = cu_to_vcu(hfi1_cu);
13230         /* enough room for 8 MAD packets plus header - 17K */
13231         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13232         if (dd->vl15_init > dd->link_credits)
13233                 dd->vl15_init = dd->link_credits;
13234
13235         write_uninitialized_csrs_and_memories(dd);
13236
13237         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13238                 for (i = 0; i < dd->num_pports; i++) {
13239                         struct hfi1_pportdata *ppd = &dd->pport[i];
13240
13241                         set_partition_keys(ppd);
13242                 }
13243         init_sc2vl_tables(dd);
13244 }
13245
13246 static void init_kdeth_qp(struct hfi1_devdata *dd)
13247 {
13248         /* user changed the KDETH_QP */
13249         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13250                 /* out of range or illegal value */
13251                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13252                 kdeth_qp = 0;
13253         }
13254         if (kdeth_qp == 0)      /* not set, or failed range check */
13255                 kdeth_qp = DEFAULT_KDETH_QP;
13256
13257         write_csr(dd, SEND_BTH_QP,
13258                         (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13259                                 << SEND_BTH_QP_KDETH_QP_SHIFT);
13260
13261         write_csr(dd, RCV_BTH_QP,
13262                         (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13263                                 << RCV_BTH_QP_KDETH_QP_SHIFT);
13264 }
13265
13266 /**
13267  * init_qpmap_table
13268  * @dd - device data
13269  * @first_ctxt - first context
13270  * @last_ctxt - first context
13271  *
13272  * This return sets the qpn mapping table that
13273  * is indexed by qpn[8:1].
13274  *
13275  * The routine will round robin the 256 settings
13276  * from first_ctxt to last_ctxt.
13277  *
13278  * The first/last looks ahead to having specialized
13279  * receive contexts for mgmt and bypass.  Normal
13280  * verbs traffic will assumed to be on a range
13281  * of receive contexts.
13282  */
13283 static void init_qpmap_table(struct hfi1_devdata *dd,
13284                              u32 first_ctxt,
13285                              u32 last_ctxt)
13286 {
13287         u64 reg = 0;
13288         u64 regno = RCV_QP_MAP_TABLE;
13289         int i;
13290         u64 ctxt = first_ctxt;
13291
13292         for (i = 0; i < 256;) {
13293                 reg |= ctxt << (8 * (i % 8));
13294                 i++;
13295                 ctxt++;
13296                 if (ctxt > last_ctxt)
13297                         ctxt = first_ctxt;
13298                 if (i % 8 == 0) {
13299                         write_csr(dd, regno, reg);
13300                         reg = 0;
13301                         regno += 8;
13302                 }
13303         }
13304         if (i % 8)
13305                 write_csr(dd, regno, reg);
13306
13307         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13308                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13309 }
13310
13311 /**
13312  * init_qos - init RX qos
13313  * @dd - device data
13314  * @first_context
13315  *
13316  * This routine initializes Rule 0 and the
13317  * RSM map table to implement qos.
13318  *
13319  * If all of the limit tests succeed,
13320  * qos is applied based on the array
13321  * interpretation of krcvqs where
13322  * entry 0 is VL0.
13323  *
13324  * The number of vl bits (n) and the number of qpn
13325  * bits (m) are computed to feed both the RSM map table
13326  * and the single rule.
13327  *
13328  */
13329 static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13330 {
13331         u8 max_by_vl = 0;
13332         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13333         u64 *rsmmap;
13334         u64 reg;
13335         u8  rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13336
13337         /* validate */
13338         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13339             num_vls == 1 ||
13340             krcvqsset <= 1)
13341                 goto bail;
13342         for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13343                 if (krcvqs[i] > max_by_vl)
13344                         max_by_vl = krcvqs[i];
13345         if (max_by_vl > 32)
13346                 goto bail;
13347         qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13348         /* determine bits vl */
13349         n = ilog2(num_vls);
13350         /* determine bits for qpn */
13351         m = ilog2(qpns_per_vl);
13352         if ((m + n) > 7)
13353                 goto bail;
13354         if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13355                 goto bail;
13356         rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13357         if (!rsmmap)
13358                 goto bail;
13359         memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13360         /* init the local copy of the table */
13361         for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13362                 unsigned tctxt;
13363
13364                 for (qpn = 0, tctxt = ctxt;
13365                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13366                         unsigned idx, regoff, regidx;
13367
13368                         /* generate index <= 128 */
13369                         idx = (qpn << n) ^ i;
13370                         regoff = (idx % 8) * 8;
13371                         regidx = idx / 8;
13372                         reg = rsmmap[regidx];
13373                         /* replace 0xff with context number */
13374                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13375                                 << regoff);
13376                         reg |= (u64)(tctxt++) << regoff;
13377                         rsmmap[regidx] = reg;
13378                         if (tctxt == ctxt + krcvqs[i])
13379                                 tctxt = ctxt;
13380                 }
13381                 ctxt += krcvqs[i];
13382         }
13383         /* flush cached copies to chip */
13384         for (i = 0; i < NUM_MAP_REGS; i++)
13385                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13386         /* add rule0 */
13387         write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13388                 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13389                         << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13390                 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13391         write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13392                 LRH_BTH_MATCH_OFFSET
13393                         << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13394                 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13395                 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13396                 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13397                 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13398                 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13399         write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13400                 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13401                 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13402                 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13403                 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13404         /* Enable RSM */
13405         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13406         kfree(rsmmap);
13407         /* map everything else to first context */
13408         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13409         dd->qos_shift = n + 1;
13410         return;
13411 bail:
13412         dd->qos_shift = 1;
13413         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13414 }
13415
13416 static void init_rxe(struct hfi1_devdata *dd)
13417 {
13418         /* enable all receive errors */
13419         write_csr(dd, RCV_ERR_MASK, ~0ull);
13420         /* setup QPN map table - start where VL15 context leaves off */
13421         init_qos(
13422                 dd,
13423                 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13424         /*
13425          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13426          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13427          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
13428          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13429          * Max_PayLoad_Size set to its minimum of 128.
13430          *
13431          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13432          * (64 bytes).  Max_Payload_Size is possibly modified upward in
13433          * tune_pcie_caps() which is called after this routine.
13434          */
13435 }
13436
13437 static void init_other(struct hfi1_devdata *dd)
13438 {
13439         /* enable all CCE errors */
13440         write_csr(dd, CCE_ERR_MASK, ~0ull);
13441         /* enable *some* Misc errors */
13442         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13443         /* enable all DC errors, except LCB */
13444         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13445         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13446 }
13447
13448 /*
13449  * Fill out the given AU table using the given CU.  A CU is defined in terms
13450  * AUs.  The table is a an encoding: given the index, how many AUs does that
13451  * represent?
13452  *
13453  * NOTE: Assumes that the register layout is the same for the
13454  * local and remote tables.
13455  */
13456 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13457                                u32 csr0to3, u32 csr4to7)
13458 {
13459         write_csr(dd, csr0to3,
13460                    0ull <<
13461                         SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13462                 |  1ull <<
13463                         SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13464                 |  2ull * cu <<
13465                         SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13466                 |  4ull * cu <<
13467                         SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13468         write_csr(dd, csr4to7,
13469                    8ull * cu <<
13470                         SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13471                 | 16ull * cu <<
13472                         SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13473                 | 32ull * cu <<
13474                         SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13475                 | 64ull * cu <<
13476                         SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13477
13478 }
13479
13480 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13481 {
13482         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13483                                         SEND_CM_LOCAL_AU_TABLE4_TO7);
13484 }
13485
13486 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13487 {
13488         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13489                                         SEND_CM_REMOTE_AU_TABLE4_TO7);
13490 }
13491
13492 static void init_txe(struct hfi1_devdata *dd)
13493 {
13494         int i;
13495
13496         /* enable all PIO, SDMA, general, and Egress errors */
13497         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13498         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13499         write_csr(dd, SEND_ERR_MASK, ~0ull);
13500         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13501
13502         /* enable all per-context and per-SDMA engine errors */
13503         for (i = 0; i < dd->chip_send_contexts; i++)
13504                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13505         for (i = 0; i < dd->chip_sdma_engines; i++)
13506                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13507
13508         /* set the local CU to AU mapping */
13509         assign_local_cm_au_table(dd, dd->vcu);
13510
13511         /*
13512          * Set reasonable default for Credit Return Timer
13513          * Don't set on Simulator - causes it to choke.
13514          */
13515         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13516                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13517 }
13518
13519 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13520 {
13521         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13522         unsigned sctxt;
13523         int ret = 0;
13524         u64 reg;
13525
13526         if (!rcd || !rcd->sc) {
13527                 ret = -EINVAL;
13528                 goto done;
13529         }
13530         sctxt = rcd->sc->hw_context;
13531         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13532                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13533                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13534         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13535         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13536                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13537         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13538         /*
13539          * Enable send-side J_KEY integrity check, unless this is A0 h/w
13540          * (due to A0 erratum).
13541          */
13542         if (!is_ax(dd)) {
13543                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13544                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13545                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13546         }
13547
13548         /* Enable J_KEY check on receive context. */
13549         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13550                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13551                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13552         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13553 done:
13554         return ret;
13555 }
13556
13557 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13558 {
13559         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13560         unsigned sctxt;
13561         int ret = 0;
13562         u64 reg;
13563
13564         if (!rcd || !rcd->sc) {
13565                 ret = -EINVAL;
13566                 goto done;
13567         }
13568         sctxt = rcd->sc->hw_context;
13569         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13570         /*
13571          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13572          * This check would not have been enabled for A0 h/w, see
13573          * set_ctxt_jkey().
13574          */
13575         if (!is_ax(dd)) {
13576                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13577                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13578                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13579         }
13580         /* Turn off the J_KEY on the receive side */
13581         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13582 done:
13583         return ret;
13584 }
13585
13586 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13587 {
13588         struct hfi1_ctxtdata *rcd;
13589         unsigned sctxt;
13590         int ret = 0;
13591         u64 reg;
13592
13593         if (ctxt < dd->num_rcv_contexts)
13594                 rcd = dd->rcd[ctxt];
13595         else {
13596                 ret = -EINVAL;
13597                 goto done;
13598         }
13599         if (!rcd || !rcd->sc) {
13600                 ret = -EINVAL;
13601                 goto done;
13602         }
13603         sctxt = rcd->sc->hw_context;
13604         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13605                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13606         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13607         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13608         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13609         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13610 done:
13611         return ret;
13612 }
13613
13614 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13615 {
13616         struct hfi1_ctxtdata *rcd;
13617         unsigned sctxt;
13618         int ret = 0;
13619         u64 reg;
13620
13621         if (ctxt < dd->num_rcv_contexts)
13622                 rcd = dd->rcd[ctxt];
13623         else {
13624                 ret = -EINVAL;
13625                 goto done;
13626         }
13627         if (!rcd || !rcd->sc) {
13628                 ret = -EINVAL;
13629                 goto done;
13630         }
13631         sctxt = rcd->sc->hw_context;
13632         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13633         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13634         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13635         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13636 done:
13637         return ret;
13638 }
13639
13640 /*
13641  * Start doing the clean up the the chip. Our clean up happens in multiple
13642  * stages and this is just the first.
13643  */
13644 void hfi1_start_cleanup(struct hfi1_devdata *dd)
13645 {
13646         free_cntrs(dd);
13647         free_rcverr(dd);
13648         clean_up_interrupts(dd);
13649 }
13650
13651 #define HFI_BASE_GUID(dev) \
13652         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13653
13654 /*
13655  * Certain chip functions need to be initialized only once per asic
13656  * instead of per-device. This function finds the peer device and
13657  * checks whether that chip initialization needs to be done by this
13658  * device.
13659  */
13660 static void asic_should_init(struct hfi1_devdata *dd)
13661 {
13662         unsigned long flags;
13663         struct hfi1_devdata *tmp, *peer = NULL;
13664
13665         spin_lock_irqsave(&hfi1_devs_lock, flags);
13666         /* Find our peer device */
13667         list_for_each_entry(tmp, &hfi1_dev_list, list) {
13668                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13669                     dd->unit != tmp->unit) {
13670                         peer = tmp;
13671                         break;
13672                 }
13673         }
13674
13675         /*
13676          * "Claim" the ASIC for initialization if it hasn't been
13677          " "claimed" yet.
13678          */
13679         if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13680                 dd->flags |= HFI1_DO_INIT_ASIC;
13681         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13682 }
13683
13684 /*
13685  * Set dd->boardname.  Use a generic name if a name is not returned from
13686  * EFI variable space.
13687  *
13688  * Return 0 on success, -ENOMEM if space could not be allocated.
13689  */
13690 static int obtain_boardname(struct hfi1_devdata *dd)
13691 {
13692         /* generic board description */
13693         const char generic[] =
13694                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13695         unsigned long size;
13696         int ret;
13697
13698         ret = read_hfi1_efi_var(dd, "description", &size,
13699                                 (void **)&dd->boardname);
13700         if (ret) {
13701                 dd_dev_err(dd, "Board description not found\n");
13702                 /* use generic description */
13703                 dd->boardname = kstrdup(generic, GFP_KERNEL);
13704                 if (!dd->boardname)
13705                         return -ENOMEM;
13706         }
13707         return 0;
13708 }
13709
13710 /**
13711  * Allocate and initialize the device structure for the hfi.
13712  * @dev: the pci_dev for hfi1_ib device
13713  * @ent: pci_device_id struct for this dev
13714  *
13715  * Also allocates, initializes, and returns the devdata struct for this
13716  * device instance
13717  *
13718  * This is global, and is called directly at init to set up the
13719  * chip-specific function pointers for later use.
13720  */
13721 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13722                                   const struct pci_device_id *ent)
13723 {
13724         struct hfi1_devdata *dd;
13725         struct hfi1_pportdata *ppd;
13726         u64 reg;
13727         int i, ret;
13728         static const char * const inames[] = { /* implementation names */
13729                 "RTL silicon",
13730                 "RTL VCS simulation",
13731                 "RTL FPGA emulation",
13732                 "Functional simulator"
13733         };
13734
13735         dd = hfi1_alloc_devdata(pdev,
13736                 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13737         if (IS_ERR(dd))
13738                 goto bail;
13739         ppd = dd->pport;
13740         for (i = 0; i < dd->num_pports; i++, ppd++) {
13741                 int vl;
13742                 /* init common fields */
13743                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13744                 /* DC supports 4 link widths */
13745                 ppd->link_width_supported =
13746                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13747                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13748                 ppd->link_width_downgrade_supported =
13749                         ppd->link_width_supported;
13750                 /* start out enabling only 4X */
13751                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13752                 ppd->link_width_downgrade_enabled =
13753                                         ppd->link_width_downgrade_supported;
13754                 /* link width active is 0 when link is down */
13755                 /* link width downgrade active is 0 when link is down */
13756
13757                 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13758                         || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13759                         hfi1_early_err(&pdev->dev,
13760                                        "Invalid num_vls %u, using %u VLs\n",
13761                                     num_vls, HFI1_MAX_VLS_SUPPORTED);
13762                         num_vls = HFI1_MAX_VLS_SUPPORTED;
13763                 }
13764                 ppd->vls_supported = num_vls;
13765                 ppd->vls_operational = ppd->vls_supported;
13766                 /* Set the default MTU. */
13767                 for (vl = 0; vl < num_vls; vl++)
13768                         dd->vld[vl].mtu = hfi1_max_mtu;
13769                 dd->vld[15].mtu = MAX_MAD_PACKET;
13770                 /*
13771                  * Set the initial values to reasonable default, will be set
13772                  * for real when link is up.
13773                  */
13774                 ppd->lstate = IB_PORT_DOWN;
13775                 ppd->overrun_threshold = 0x4;
13776                 ppd->phy_error_threshold = 0xf;
13777                 ppd->port_crc_mode_enabled = link_crc_mask;
13778                 /* initialize supported LTP CRC mode */
13779                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13780                 /* initialize enabled LTP CRC mode */
13781                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13782                 /* start in offline */
13783                 ppd->host_link_state = HLS_DN_OFFLINE;
13784                 init_vl_arb_caches(ppd);
13785         }
13786
13787         dd->link_default = HLS_DN_POLL;
13788
13789         /*
13790          * Do remaining PCIe setup and save PCIe values in dd.
13791          * Any error printing is already done by the init code.
13792          * On return, we have the chip mapped.
13793          */
13794         ret = hfi1_pcie_ddinit(dd, pdev, ent);
13795         if (ret < 0)
13796                 goto bail_free;
13797
13798         /* verify that reads actually work, save revision for reset check */
13799         dd->revision = read_csr(dd, CCE_REVISION);
13800         if (dd->revision == ~(u64)0) {
13801                 dd_dev_err(dd, "cannot read chip CSRs\n");
13802                 ret = -EINVAL;
13803                 goto bail_cleanup;
13804         }
13805         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13806                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13807         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13808                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
13809
13810         /* obtain the hardware ID - NOT related to unit, which is a
13811            software enumeration */
13812         reg = read_csr(dd, CCE_REVISION2);
13813         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13814                                         & CCE_REVISION2_HFI_ID_MASK;
13815         /* the variable size will remove unwanted bits */
13816         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13817         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13818         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13819                 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13820                 (int)dd->irev);
13821
13822         /* speeds the hardware can support */
13823         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13824         /* speeds allowed to run at */
13825         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13826         /* give a reasonable active value, will be set on link up */
13827         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13828
13829         dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13830         dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13831         dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13832         dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13833         dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13834         /* fix up link widths for emulation _p */
13835         ppd = dd->pport;
13836         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13837                 ppd->link_width_supported =
13838                         ppd->link_width_enabled =
13839                         ppd->link_width_downgrade_supported =
13840                         ppd->link_width_downgrade_enabled =
13841                                 OPA_LINK_WIDTH_1X;
13842         }
13843         /* insure num_vls isn't larger than number of sdma engines */
13844         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13845                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
13846                            num_vls, dd->chip_sdma_engines);
13847                 num_vls = dd->chip_sdma_engines;
13848                 ppd->vls_supported = dd->chip_sdma_engines;
13849         }
13850
13851         /*
13852          * Convert the ns parameter to the 64 * cclocks used in the CSR.
13853          * Limit the max if larger than the field holds.  If timeout is
13854          * non-zero, then the calculated field will be at least 1.
13855          *
13856          * Must be after icode is set up - the cclock rate depends
13857          * on knowing the hardware being used.
13858          */
13859         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13860         if (dd->rcv_intr_timeout_csr >
13861                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13862                 dd->rcv_intr_timeout_csr =
13863                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13864         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13865                 dd->rcv_intr_timeout_csr = 1;
13866
13867         /* needs to be done before we look for the peer device */
13868         read_guid(dd);
13869
13870         /* should this device init the ASIC block? */
13871         asic_should_init(dd);
13872
13873         /* obtain chip sizes, reset chip CSRs */
13874         init_chip(dd);
13875
13876         /* read in the PCIe link speed information */
13877         ret = pcie_speeds(dd);
13878         if (ret)
13879                 goto bail_cleanup;
13880
13881         /* read in firmware */
13882         ret = hfi1_firmware_init(dd);
13883         if (ret)
13884                 goto bail_cleanup;
13885
13886         /*
13887          * In general, the PCIe Gen3 transition must occur after the
13888          * chip has been idled (so it won't initiate any PCIe transactions
13889          * e.g. an interrupt) and before the driver changes any registers
13890          * (the transition will reset the registers).
13891          *
13892          * In particular, place this call after:
13893          * - init_chip()     - the chip will not initiate any PCIe transactions
13894          * - pcie_speeds()   - reads the current link speed
13895          * - hfi1_firmware_init() - the needed firmware is ready to be
13896          *                          downloaded
13897          */
13898         ret = do_pcie_gen3_transition(dd);
13899         if (ret)
13900                 goto bail_cleanup;
13901
13902         /* start setting dd values and adjusting CSRs */
13903         init_early_variables(dd);
13904
13905         parse_platform_config(dd);
13906
13907         ret = obtain_boardname(dd);
13908         if (ret)
13909                 goto bail_cleanup;
13910
13911         snprintf(dd->boardversion, BOARD_VERS_MAX,
13912                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
13913                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
13914                  (u32)dd->majrev,
13915                  (u32)dd->minrev,
13916                  (dd->revision >> CCE_REVISION_SW_SHIFT)
13917                     & CCE_REVISION_SW_MASK);
13918
13919         ret = set_up_context_variables(dd);
13920         if (ret)
13921                 goto bail_cleanup;
13922
13923         /* set initial RXE CSRs */
13924         init_rxe(dd);
13925         /* set initial TXE CSRs */
13926         init_txe(dd);
13927         /* set initial non-RXE, non-TXE CSRs */
13928         init_other(dd);
13929         /* set up KDETH QP prefix in both RX and TX CSRs */
13930         init_kdeth_qp(dd);
13931
13932         /* send contexts must be set up before receive contexts */
13933         ret = init_send_contexts(dd);
13934         if (ret)
13935                 goto bail_cleanup;
13936
13937         ret = hfi1_create_ctxts(dd);
13938         if (ret)
13939                 goto bail_cleanup;
13940
13941         dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
13942         /*
13943          * rcd[0] is guaranteed to be valid by this point. Also, all
13944          * context are using the same value, as per the module parameter.
13945          */
13946         dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
13947
13948         ret = init_pervl_scs(dd);
13949         if (ret)
13950                 goto bail_cleanup;
13951
13952         /* sdma init */
13953         for (i = 0; i < dd->num_pports; ++i) {
13954                 ret = sdma_init(dd, i);
13955                 if (ret)
13956                         goto bail_cleanup;
13957         }
13958
13959         /* use contexts created by hfi1_create_ctxts */
13960         ret = set_up_interrupts(dd);
13961         if (ret)
13962                 goto bail_cleanup;
13963
13964         /* set up LCB access - must be after set_up_interrupts() */
13965         init_lcb_access(dd);
13966
13967         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
13968                  dd->base_guid & 0xFFFFFF);
13969
13970         dd->oui1 = dd->base_guid >> 56 & 0xFF;
13971         dd->oui2 = dd->base_guid >> 48 & 0xFF;
13972         dd->oui3 = dd->base_guid >> 40 & 0xFF;
13973
13974         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
13975         if (ret)
13976                 goto bail_clear_intr;
13977         check_fabric_firmware_versions(dd);
13978
13979         thermal_init(dd);
13980
13981         ret = init_cntrs(dd);
13982         if (ret)
13983                 goto bail_clear_intr;
13984
13985         ret = init_rcverr(dd);
13986         if (ret)
13987                 goto bail_free_cntrs;
13988
13989         ret = eprom_init(dd);
13990         if (ret)
13991                 goto bail_free_rcverr;
13992
13993         goto bail;
13994
13995 bail_free_rcverr:
13996         free_rcverr(dd);
13997 bail_free_cntrs:
13998         free_cntrs(dd);
13999 bail_clear_intr:
14000         clean_up_interrupts(dd);
14001 bail_cleanup:
14002         hfi1_pcie_ddcleanup(dd);
14003 bail_free:
14004         hfi1_free_devdata(dd);
14005         dd = ERR_PTR(ret);
14006 bail:
14007         return dd;
14008 }
14009
14010 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14011                         u32 dw_len)
14012 {
14013         u32 delta_cycles;
14014         u32 current_egress_rate = ppd->current_egress_rate;
14015         /* rates here are in units of 10^6 bits/sec */
14016
14017         if (desired_egress_rate == -1)
14018                 return 0; /* shouldn't happen */
14019
14020         if (desired_egress_rate >= current_egress_rate)
14021                 return 0; /* we can't help go faster, only slower */
14022
14023         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14024                         egress_cycles(dw_len * 4, current_egress_rate);
14025
14026         return (u16)delta_cycles;
14027 }
14028
14029
14030 /**
14031  * create_pbc - build a pbc for transmission
14032  * @flags: special case flags or-ed in built pbc
14033  * @srate: static rate
14034  * @vl: vl
14035  * @dwlen: dword length (header words + data words + pbc words)
14036  *
14037  * Create a PBC with the given flags, rate, VL, and length.
14038  *
14039  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14040  * for verbs, which does not use this PSM feature.  The lone other caller
14041  * is for the diagnostic interface which calls this if the user does not
14042  * supply their own PBC.
14043  */
14044 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14045                u32 dw_len)
14046 {
14047         u64 pbc, delay = 0;
14048
14049         if (unlikely(srate_mbs))
14050                 delay = delay_cycles(ppd, srate_mbs, dw_len);
14051
14052         pbc = flags
14053                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14054                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14055                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14056                 | (dw_len & PBC_LENGTH_DWS_MASK)
14057                         << PBC_LENGTH_DWS_SHIFT;
14058
14059         return pbc;
14060 }
14061
14062 #define SBUS_THERMAL    0x4f
14063 #define SBUS_THERM_MONITOR_MODE 0x1
14064
14065 #define THERM_FAILURE(dev, ret, reason) \
14066         dd_dev_err((dd),                                                \
14067                    "Thermal sensor initialization failed: %s (%d)\n",   \
14068                    (reason), (ret))
14069
14070 /*
14071  * Initialize the Avago Thermal sensor.
14072  *
14073  * After initialization, enable polling of thermal sensor through
14074  * SBus interface. In order for this to work, the SBus Master
14075  * firmware has to be loaded due to the fact that the HW polling
14076  * logic uses SBus interrupts, which are not supported with
14077  * default firmware. Otherwise, no data will be returned through
14078  * the ASIC_STS_THERM CSR.
14079  */
14080 static int thermal_init(struct hfi1_devdata *dd)
14081 {
14082         int ret = 0;
14083
14084         if (dd->icode != ICODE_RTL_SILICON ||
14085             !(dd->flags & HFI1_DO_INIT_ASIC))
14086                 return ret;
14087
14088         acquire_hw_mutex(dd);
14089         dd_dev_info(dd, "Initializing thermal sensor\n");
14090         /* Disable polling of thermal readings */
14091         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14092         msleep(100);
14093         /* Thermal Sensor Initialization */
14094         /*    Step 1: Reset the Thermal SBus Receiver */
14095         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14096                                 RESET_SBUS_RECEIVER, 0);
14097         if (ret) {
14098                 THERM_FAILURE(dd, ret, "Bus Reset");
14099                 goto done;
14100         }
14101         /*    Step 2: Set Reset bit in Thermal block */
14102         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14103                                 WRITE_SBUS_RECEIVER, 0x1);
14104         if (ret) {
14105                 THERM_FAILURE(dd, ret, "Therm Block Reset");
14106                 goto done;
14107         }
14108         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14109         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14110                                 WRITE_SBUS_RECEIVER, 0x32);
14111         if (ret) {
14112                 THERM_FAILURE(dd, ret, "Write Clock Div");
14113                 goto done;
14114         }
14115         /*    Step 4: Select temperature mode */
14116         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14117                                 WRITE_SBUS_RECEIVER,
14118                                 SBUS_THERM_MONITOR_MODE);
14119         if (ret) {
14120                 THERM_FAILURE(dd, ret, "Write Mode Sel");
14121                 goto done;
14122         }
14123         /*    Step 5: De-assert block reset and start conversion */
14124         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14125                                 WRITE_SBUS_RECEIVER, 0x2);
14126         if (ret) {
14127                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14128                 goto done;
14129         }
14130         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14131         msleep(22);
14132
14133         /* Enable polling of thermal readings */
14134         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14135 done:
14136         release_hw_mutex(dd);
14137         return ret;
14138 }
14139
14140 static void handle_temp_err(struct hfi1_devdata *dd)
14141 {
14142         struct hfi1_pportdata *ppd = &dd->pport[0];
14143         /*
14144          * Thermal Critical Interrupt
14145          * Put the device into forced freeze mode, take link down to
14146          * offline, and put DC into reset.
14147          */
14148         dd_dev_emerg(dd,
14149                      "Critical temperature reached! Forcing device into freeze mode!\n");
14150         dd->flags |= HFI1_FORCED_FREEZE;
14151         start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14152         /*
14153          * Shut DC down as much and as quickly as possible.
14154          *
14155          * Step 1: Take the link down to OFFLINE. This will cause the
14156          *         8051 to put the Serdes in reset. However, we don't want to
14157          *         go through the entire link state machine since we want to
14158          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14159          *         but rather an attempt to save the chip.
14160          *         Code below is almost the same as quiet_serdes() but avoids
14161          *         all the extra work and the sleeps.
14162          */
14163         ppd->driver_link_ready = 0;
14164         ppd->link_enabled = 0;
14165         set_physical_link_state(dd, PLS_OFFLINE |
14166                                 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14167         /*
14168          * Step 2: Shutdown LCB and 8051
14169          *         After shutdown, do not restore DC_CFG_RESET value.
14170          */
14171         dc_shutdown(dd);
14172 }