3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 * This file contains all of the code that is specific to the HFI chip
55 #include <linux/pci.h>
56 #include <linux/delay.h>
57 #include <linux/interrupt.h>
58 #include <linux/module.h>
68 #define NUM_IB_PORTS 1
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED 0x1
121 #define SEC_PACKET_DROPPED 0x2
122 #define SEC_SC_HALTED 0x4 /* per-context only */
123 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125 #define MIN_KERNEL_KCTXTS 2
126 #define FIRST_KERNEL_KCTXT 1
127 #define NUM_MAP_REGS 32
129 /* Bit offset into the GUID which carries HFI id information */
130 #define GUID_HFI_INDEX_SHIFT 39
132 /* extract the emulation revision */
133 #define emulator_rev(dd) ((dd)->irev >> 8)
134 /* parallel and serial emulation versions are 3 and 4 respectively */
135 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
136 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
141 #define IB_PACKET_TYPE 2ull
142 #define QW_SHIFT 6ull
144 #define QPN_WIDTH 7ull
146 /* LRH.BTH: QW 0, OFFSET 48 - for match */
147 #define LRH_BTH_QW 0ull
148 #define LRH_BTH_BIT_OFFSET 48ull
149 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
150 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
151 #define LRH_BTH_SELECT
152 #define LRH_BTH_MASK 3ull
153 #define LRH_BTH_VALUE 2ull
155 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
156 #define LRH_SC_QW 0ull
157 #define LRH_SC_BIT_OFFSET 56ull
158 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
159 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
160 #define LRH_SC_MASK 128ull
161 #define LRH_SC_VALUE 0ull
163 /* SC[n..0] QW 0, OFFSET 60 - for select */
164 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166 /* QPN[m+n:1] QW 1, OFFSET 1 */
167 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169 /* defines to build power on SC2VL table */
181 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
182 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
183 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
184 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
185 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
186 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
187 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
188 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191 #define DC_SC_VL_VAL( \
210 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
211 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
212 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
213 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
214 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
215 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
216 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
217 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
218 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
219 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
220 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
221 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
222 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
223 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
224 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
225 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228 /* all CceStatus sub-block freeze bits */
229 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
230 | CCE_STATUS_RXE_FROZE_SMASK \
231 | CCE_STATUS_TXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
233 /* all CceStatus sub-block TXE pause bits */
234 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
235 | CCE_STATUS_TXE_PAUSED_SMASK \
236 | CCE_STATUS_SDMA_PAUSED_SMASK)
237 /* all CceStatus sub-block RXE pause bits */
238 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
243 static struct flag_table cce_err_status_flags[] = {
244 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
245 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
246 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
247 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
248 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
250 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
251 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
252 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
253 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
254 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
255 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
256 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
259 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
260 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
261 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
262 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
263 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
264 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
266 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
268 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
270 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
271 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
272 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
274 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
275 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
276 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
278 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
280 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
282 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
283 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
284 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
286 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
287 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
288 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
290 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
291 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
292 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
294 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
295 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
296 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
298 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
299 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
300 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
302 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
303 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
304 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
305 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
306 /*31*/ FLAG_ENTRY0("LATriggered",
307 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
308 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
309 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
310 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
312 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
313 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
314 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
315 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
316 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
317 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
318 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
320 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
321 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
322 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
324 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
325 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
333 static struct flag_table misc_err_status_flags[] = {
334 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
335 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
336 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
337 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
338 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
339 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
340 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
341 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
342 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
343 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
344 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
345 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
346 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
350 * TXE PIO Error flags and consequences
352 static struct flag_table pio_err_status_flags[] = {
353 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
356 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
359 /* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
362 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
365 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
368 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
371 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
374 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
377 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
380 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
383 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
386 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
389 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
392 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
395 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
398 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
401 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
404 /*17*/ FLAG_ENTRY("PioInitSmIn",
406 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
407 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
410 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
413 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
416 /*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
419 /*22*/ FLAG_ENTRY("PioStateMachine",
421 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
422 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
423 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
425 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
426 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
428 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
431 /*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
434 /*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
437 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
440 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
447 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
450 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
453 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
459 /* TXE PIO errors that cause an SPC freeze */
460 #define ALL_PIO_FREEZE_ERR \
461 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
462 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492 * TXE SDMA Error flags
494 static struct flag_table sdma_err_status_flags[] = {
495 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
496 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
497 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
498 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
499 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
500 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
501 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
506 /* TXE SDMA errors that cause an SPC freeze */
507 #define ALL_SDMA_FREEZE_ERR \
508 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
509 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513 * TXE Egress Error flags
515 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
516 static struct flag_table egress_err_status_flags[] = {
517 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
518 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
520 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
521 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
522 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
523 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
525 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
526 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
527 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
528 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
530 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
531 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
532 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
533 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
534 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
535 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
536 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
537 SEES(TX_SDMA0_DISALLOWED_PACKET)),
538 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
539 SEES(TX_SDMA1_DISALLOWED_PACKET)),
540 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
541 SEES(TX_SDMA2_DISALLOWED_PACKET)),
542 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
543 SEES(TX_SDMA3_DISALLOWED_PACKET)),
544 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
545 SEES(TX_SDMA4_DISALLOWED_PACKET)),
546 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
547 SEES(TX_SDMA5_DISALLOWED_PACKET)),
548 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
549 SEES(TX_SDMA6_DISALLOWED_PACKET)),
550 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
551 SEES(TX_SDMA7_DISALLOWED_PACKET)),
552 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
553 SEES(TX_SDMA8_DISALLOWED_PACKET)),
554 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
555 SEES(TX_SDMA9_DISALLOWED_PACKET)),
556 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
557 SEES(TX_SDMA10_DISALLOWED_PACKET)),
558 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
559 SEES(TX_SDMA11_DISALLOWED_PACKET)),
560 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
561 SEES(TX_SDMA12_DISALLOWED_PACKET)),
562 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
563 SEES(TX_SDMA13_DISALLOWED_PACKET)),
564 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
565 SEES(TX_SDMA14_DISALLOWED_PACKET)),
566 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
567 SEES(TX_SDMA15_DISALLOWED_PACKET)),
568 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
569 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
570 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
571 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
572 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
573 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
574 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
575 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
576 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
578 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
580 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
582 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
584 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
586 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
587 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
588 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
589 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
590 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
591 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
592 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
593 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
594 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
595 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
596 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
597 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
598 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
599 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
600 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
601 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
602 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
603 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
604 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
605 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
606 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
607 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
608 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
609 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
610 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
614 * TXE Egress Error Info flags
616 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
617 static struct flag_table egress_err_info_flags[] = {
618 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
619 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
620 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
621 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
623 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
624 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
625 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
626 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
627 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
628 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
629 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
630 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
631 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
632 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
633 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
634 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
635 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
636 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
637 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
638 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
639 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
642 /* TXE Egress errors that cause an SPC freeze */
643 #define ALL_TXE_EGRESS_FREEZE_ERR \
644 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
645 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
646 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
648 | SEES(TX_LAUNCH_CSR_PARITY) \
649 | SEES(TX_SBRD_CTL_CSR_PARITY) \
650 | SEES(TX_CONFIG_PARITY) \
651 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
652 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
660 | SEES(TX_CREDIT_RETURN_PARITY))
663 * TXE Send error flags
665 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
666 static struct flag_table send_err_status_flags[] = {
667 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
668 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
669 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
673 * TXE Send Context Error flags and consequences
675 static struct flag_table sc_err_status_flags[] = {
676 /* 0*/ FLAG_ENTRY("InconsistentSop",
677 SEC_PACKET_DROPPED | SEC_SC_HALTED,
678 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
679 /* 1*/ FLAG_ENTRY("DisallowedPacket",
680 SEC_PACKET_DROPPED | SEC_SC_HALTED,
681 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
682 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
683 SEC_WRITE_DROPPED | SEC_SC_HALTED,
684 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
685 /* 3*/ FLAG_ENTRY("WriteOverflow",
686 SEC_WRITE_DROPPED | SEC_SC_HALTED,
687 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
688 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
689 SEC_WRITE_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
695 * RXE Receive Error flags
697 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
698 static struct flag_table rxe_err_status_flags[] = {
699 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
700 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
701 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
702 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
703 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
704 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
705 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
706 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
707 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
708 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
709 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
710 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
711 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
712 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
713 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
714 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
715 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
716 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
717 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
718 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
719 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
720 RXES(RBUF_BLOCK_LIST_READ_UNC)),
721 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
722 RXES(RBUF_BLOCK_LIST_READ_COR)),
723 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
724 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
725 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
726 RXES(RBUF_CSR_QENT_CNT_PARITY)),
727 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
728 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
729 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
730 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
731 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
732 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
733 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
734 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
735 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
736 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
737 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
738 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
739 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
740 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
741 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
742 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
743 RXES(RBUF_FL_INITDONE_PARITY)),
744 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
745 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
746 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
747 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
748 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
749 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
750 RXES(LOOKUP_DES_PART1_UNC_COR)),
751 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
752 RXES(LOOKUP_DES_PART2_PARITY)),
753 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
754 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
755 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
756 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
757 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
758 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
759 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
760 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
761 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
762 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
763 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
764 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
765 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
766 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
767 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
768 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
769 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
770 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
771 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
772 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
773 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
774 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
777 /* RXE errors that will trigger an SPC freeze */
778 #define ALL_RXE_FREEZE_ERR \
779 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
780 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
824 #define RXE_FREEZE_ABORT_MASK \
825 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
826 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
832 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
833 static struct flag_table dcc_err_flags[] = {
834 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
835 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
836 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
837 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
838 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
840 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
841 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
842 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
843 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
844 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
845 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
846 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
847 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
848 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
849 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
850 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
851 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
852 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
853 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
854 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
855 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
856 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
857 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
858 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
859 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
860 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
861 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
862 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
863 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
864 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
865 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
866 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
868 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
869 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
870 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
871 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
872 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
873 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
874 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
875 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
876 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
877 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
879 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
885 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
886 static struct flag_table lcb_err_flags[] = {
887 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
888 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
889 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
890 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
891 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
892 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
893 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
894 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
895 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
896 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
897 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
898 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
899 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
900 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
901 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
902 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
903 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
904 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
905 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
906 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
907 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
908 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
909 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
910 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
911 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
912 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
913 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
914 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
915 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
916 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
917 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
918 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
919 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
920 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
921 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
922 LCBE(REDUNDANT_FLIT_PARITY_ERR))
928 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
929 static struct flag_table dc8051_err_flags[] = {
930 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
931 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
932 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
933 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
934 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
935 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
936 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
937 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
938 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
939 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
940 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
944 * DC8051 Information Error flags
946 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
948 static struct flag_table dc8051_info_err_flags[] = {
949 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
950 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
951 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
952 FLAG_ENTRY0("Serdes internal loopback failure",
953 FAILED_SERDES_INTERNAL_LOOPBACK),
954 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
955 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
956 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
957 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
958 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
959 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
961 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
965 * DC8051 Information Host Information flags
967 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
969 static struct flag_table dc8051_info_host_msg_flags[] = {
970 FLAG_ENTRY0("Host request done", 0x0001),
971 FLAG_ENTRY0("BC SMA message", 0x0002),
972 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
973 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
974 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
975 FLAG_ENTRY0("External device config request", 0x0020),
976 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
977 FLAG_ENTRY0("LinkUp achieved", 0x0080),
978 FLAG_ENTRY0("Link going down", 0x0100),
982 static u32 encoded_size(u32 size);
983 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
984 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
985 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
987 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
988 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
989 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
990 u8 *remote_tx_rate, u16 *link_widths);
991 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
992 u8 *flag_bits, u16 *link_widths);
993 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
995 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
996 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
997 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
998 u8 *tx_polarity_inversion,
999 u8 *rx_polarity_inversion, u8 *max_rate);
1000 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1001 unsigned int context, u64 err_status);
1002 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1003 static void handle_dcc_err(struct hfi1_devdata *dd,
1004 unsigned int context, u64 err_status);
1005 static void handle_lcb_err(struct hfi1_devdata *dd,
1006 unsigned int context, u64 err_status);
1007 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1008 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void set_partition_keys(struct hfi1_pportdata *);
1016 static const char *link_state_name(u32 state);
1017 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1019 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1021 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1022 static int thermal_init(struct hfi1_devdata *dd);
1024 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1026 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1027 static void handle_temp_err(struct hfi1_devdata *);
1028 static void dc_shutdown(struct hfi1_devdata *);
1029 static void dc_start(struct hfi1_devdata *);
1032 * Error interrupt table entry. This is used as input to the interrupt
1033 * "clear down" routine used for all second tier error interrupt register.
1034 * Second tier interrupt registers have a single bit representing them
1035 * in the top-level CceIntStatus.
1037 struct err_reg_info {
1038 u32 status; /* status CSR offset */
1039 u32 clear; /* clear CSR offset */
1040 u32 mask; /* mask CSR offset */
1041 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1046 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1047 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1050 * Helpers for building HFI and DC error interrupt table entries. Different
1051 * helpers are needed because of inconsistent register names.
1053 #define EE(reg, handler, desc) \
1054 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1056 #define DC_EE1(reg, handler, desc) \
1057 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1058 #define DC_EE2(reg, handler, desc) \
1059 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1062 * Table of the "misc" grouping of error interrupts. Each entry refers to
1063 * another register containing more information.
1065 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1066 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1067 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1068 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1069 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1070 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1071 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1072 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1073 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1074 /* the rest are reserved */
1078 * Index into the Various section of the interrupt sources
1079 * corresponding to the Critical Temperature interrupt.
1081 #define TCRIT_INT_SOURCE 4
1084 * SDMA error interrupt entry - refers to another register containing more
1087 static const struct err_reg_info sdma_eng_err =
1088 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1090 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1091 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1092 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1093 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1094 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1095 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1096 /* rest are reserved */
1100 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1101 * register can not be derived from the MTU value because 10K is not
1102 * a power of 2. Therefore, we need a constant. Everything else can
1105 #define DCC_CFG_PORT_MTU_CAP_10240 7
1108 * Table of the DC grouping of error interrupts. Each entry refers to
1109 * another register containing more information.
1111 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1112 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1113 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1114 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1115 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1116 /* the rest are reserved */
1126 * csr to read for name (if applicable)
1131 * offset into dd or ppd to store the counter's value
1141 * accessor for stat element, context either dd or ppd
1143 u64 (*rw_cntr)(const struct cntr_entry *,
1150 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1151 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1153 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1163 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1165 (counter * 8 + RCV_COUNTER_ARRAY32), \
1166 0, flags | CNTR_32BIT, \
1167 port_access_u32_csr)
1169 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1171 (counter * 8 + RCV_COUNTER_ARRAY32), \
1172 0, flags | CNTR_32BIT, \
1176 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1178 (counter * 8 + RCV_COUNTER_ARRAY64), \
1180 port_access_u64_csr)
1182 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1184 (counter * 8 + RCV_COUNTER_ARRAY64), \
1188 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1189 #define OVR_ELM(ctx) \
1190 CNTR_ELEM("RcvHdrOvr" #ctx, \
1191 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1192 0, CNTR_NORMAL, port_access_u64_csr)
1195 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1197 (counter * 8 + SEND_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1202 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1204 (counter * 8 + SEND_COUNTER_ARRAY64), \
1206 port_access_u64_csr)
1208 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1210 counter * 8 + SEND_COUNTER_ARRAY64, \
1216 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1218 (counter * 8 + CCE_COUNTER_ARRAY32), \
1219 0, flags | CNTR_32BIT, \
1222 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1224 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1225 0, flags | CNTR_32BIT, \
1229 #define DC_PERF_CNTR(name, counter, flags) \
1236 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1244 #define SW_IBP_CNTR(name, cntr) \
1251 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1255 if (dd->flags & HFI1_PRESENT) {
1256 val = readq((void __iomem *)dd->kregbase + offset);
1262 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1264 if (dd->flags & HFI1_PRESENT)
1265 writeq(value, (void __iomem *)dd->kregbase + offset);
1268 void __iomem *get_csr_addr(
1269 struct hfi1_devdata *dd,
1272 return (void __iomem *)dd->kregbase + offset;
1275 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1276 int mode, u64 value)
1281 if (mode == CNTR_MODE_R) {
1282 ret = read_csr(dd, csr);
1283 } else if (mode == CNTR_MODE_W) {
1284 write_csr(dd, csr, value);
1287 dd_dev_err(dd, "Invalid cntr register access mode");
1291 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297 void *context, int vl, int mode, u64 data)
1299 struct hfi1_devdata *dd = context;
1301 if (vl != CNTR_INVALID_VL)
1303 return read_write_csr(dd, entry->csr, mode, data);
1306 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1307 int vl, int mode, u64 data)
1309 struct hfi1_devdata *dd = context;
1312 u64 csr = entry->csr;
1314 if (entry->flags & CNTR_VL) {
1315 if (vl == CNTR_INVALID_VL)
1319 if (vl != CNTR_INVALID_VL)
1323 val = read_write_csr(dd, csr, mode, data);
1327 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1328 int vl, int mode, u64 data)
1330 struct hfi1_devdata *dd = context;
1331 u32 csr = entry->csr;
1334 if (vl != CNTR_INVALID_VL)
1336 if (mode == CNTR_MODE_R)
1337 ret = read_lcb_csr(dd, csr, &data);
1338 else if (mode == CNTR_MODE_W)
1339 ret = write_lcb_csr(dd, csr, data);
1342 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1346 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1351 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1352 int vl, int mode, u64 data)
1354 struct hfi1_pportdata *ppd = context;
1356 if (vl != CNTR_INVALID_VL)
1358 return read_write_csr(ppd->dd, entry->csr, mode, data);
1361 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1362 void *context, int vl, int mode, u64 data)
1364 struct hfi1_pportdata *ppd = context;
1366 u64 csr = entry->csr;
1368 if (entry->flags & CNTR_VL) {
1369 if (vl == CNTR_INVALID_VL)
1373 if (vl != CNTR_INVALID_VL)
1376 val = read_write_csr(ppd->dd, csr, mode, data);
1380 /* Software defined */
1381 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1386 if (mode == CNTR_MODE_R) {
1388 } else if (mode == CNTR_MODE_W) {
1392 dd_dev_err(dd, "Invalid cntr sw access mode");
1396 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1401 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1402 int vl, int mode, u64 data)
1404 struct hfi1_pportdata *ppd = context;
1406 if (vl != CNTR_INVALID_VL)
1408 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1411 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1412 int vl, int mode, u64 data)
1414 struct hfi1_pportdata *ppd = context;
1416 if (vl != CNTR_INVALID_VL)
1418 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1421 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1422 void *context, int vl, int mode,
1425 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1427 if (vl != CNTR_INVALID_VL)
1429 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1432 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1433 void *context, int vl, int mode, u64 data)
1435 struct hfi1_pportdata *ppd = context;
1437 if (vl != CNTR_INVALID_VL)
1440 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1443 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1444 void *context, int vl, int mode, u64 data)
1446 struct hfi1_pportdata *ppd = context;
1448 if (vl != CNTR_INVALID_VL)
1451 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1455 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1456 void *context, int vl, int mode, u64 data)
1458 struct hfi1_pportdata *ppd = context;
1460 if (vl != CNTR_INVALID_VL)
1463 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1467 u64 get_all_cpu_total(u64 __percpu *cntr)
1472 for_each_possible_cpu(cpu)
1473 counter += *per_cpu_ptr(cntr, cpu);
1477 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1479 int vl, int mode, u64 data)
1484 if (vl != CNTR_INVALID_VL)
1487 if (mode == CNTR_MODE_R) {
1488 ret = get_all_cpu_total(cntr) - *z_val;
1489 } else if (mode == CNTR_MODE_W) {
1490 /* A write can only zero the counter */
1492 *z_val = get_all_cpu_total(cntr);
1494 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1496 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1503 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1506 struct hfi1_devdata *dd = context;
1508 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1512 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1513 void *context, int vl, int mode, u64 data)
1515 struct hfi1_devdata *dd = context;
1517 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1521 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1522 void *context, int vl, int mode, u64 data)
1524 struct hfi1_devdata *dd = context;
1526 return dd->verbs_dev.n_piowait;
1529 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1530 void *context, int vl, int mode, u64 data)
1532 struct hfi1_devdata *dd = context;
1534 return dd->verbs_dev.n_txwait;
1537 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1538 void *context, int vl, int mode, u64 data)
1540 struct hfi1_devdata *dd = context;
1542 return dd->verbs_dev.n_kmem_wait;
1545 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1546 void *context, int vl, int mode, u64 data)
1548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1550 return dd->verbs_dev.n_send_schedule;
1553 /* Software counters for the error status bits within MISC_ERR_STATUS */
1554 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1555 void *context, int vl, int mode,
1558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1560 return dd->misc_err_status_cnt[12];
1563 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1564 void *context, int vl, int mode,
1567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1569 return dd->misc_err_status_cnt[11];
1572 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1573 void *context, int vl, int mode,
1576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1578 return dd->misc_err_status_cnt[10];
1581 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1582 void *context, int vl,
1585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1587 return dd->misc_err_status_cnt[9];
1590 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1591 void *context, int vl, int mode,
1594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1596 return dd->misc_err_status_cnt[8];
1599 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1600 const struct cntr_entry *entry,
1601 void *context, int vl, int mode, u64 data)
1603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1605 return dd->misc_err_status_cnt[7];
1608 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1609 void *context, int vl,
1612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1614 return dd->misc_err_status_cnt[6];
1617 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1618 void *context, int vl, int mode,
1621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1623 return dd->misc_err_status_cnt[5];
1626 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1627 void *context, int vl, int mode,
1630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1632 return dd->misc_err_status_cnt[4];
1635 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1636 void *context, int vl,
1639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1641 return dd->misc_err_status_cnt[3];
1644 static u64 access_misc_csr_write_bad_addr_err_cnt(
1645 const struct cntr_entry *entry,
1646 void *context, int vl, int mode, u64 data)
1648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1650 return dd->misc_err_status_cnt[2];
1653 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1654 void *context, int vl,
1657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1659 return dd->misc_err_status_cnt[1];
1662 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1663 void *context, int vl, int mode,
1666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1668 return dd->misc_err_status_cnt[0];
1672 * Software counter for the aggregate of
1673 * individual CceErrStatus counters
1675 static u64 access_sw_cce_err_status_aggregated_cnt(
1676 const struct cntr_entry *entry,
1677 void *context, int vl, int mode, u64 data)
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1681 return dd->sw_cce_err_status_aggregate;
1685 * Software counters corresponding to each of the
1686 * error status bits within CceErrStatus
1688 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1689 void *context, int vl, int mode,
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1694 return dd->cce_err_status_cnt[40];
1697 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1698 void *context, int vl, int mode,
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1703 return dd->cce_err_status_cnt[39];
1706 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1707 void *context, int vl, int mode,
1710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1712 return dd->cce_err_status_cnt[38];
1715 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1716 void *context, int vl, int mode,
1719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1721 return dd->cce_err_status_cnt[37];
1724 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl, int mode,
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1730 return dd->cce_err_status_cnt[36];
1733 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1734 const struct cntr_entry *entry,
1735 void *context, int vl, int mode, u64 data)
1737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1739 return dd->cce_err_status_cnt[35];
1742 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1743 const struct cntr_entry *entry,
1744 void *context, int vl, int mode, u64 data)
1746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1748 return dd->cce_err_status_cnt[34];
1751 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1752 void *context, int vl,
1755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1757 return dd->cce_err_status_cnt[33];
1760 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1761 void *context, int vl, int mode,
1764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1766 return dd->cce_err_status_cnt[32];
1769 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1770 void *context, int vl, int mode, u64 data)
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774 return dd->cce_err_status_cnt[31];
1777 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1783 return dd->cce_err_status_cnt[30];
1786 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1792 return dd->cce_err_status_cnt[29];
1795 static u64 access_pcic_transmit_back_parity_err_cnt(
1796 const struct cntr_entry *entry,
1797 void *context, int vl, int mode, u64 data)
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801 return dd->cce_err_status_cnt[28];
1804 static u64 access_pcic_transmit_front_parity_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810 return dd->cce_err_status_cnt[27];
1813 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1814 void *context, int vl, int mode,
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819 return dd->cce_err_status_cnt[26];
1822 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl, int mode,
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828 return dd->cce_err_status_cnt[25];
1831 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode,
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1837 return dd->cce_err_status_cnt[24];
1840 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1841 void *context, int vl, int mode,
1844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1846 return dd->cce_err_status_cnt[23];
1849 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1850 void *context, int vl,
1853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1855 return dd->cce_err_status_cnt[22];
1858 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1859 void *context, int vl, int mode,
1862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1864 return dd->cce_err_status_cnt[21];
1867 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1868 const struct cntr_entry *entry,
1869 void *context, int vl, int mode, u64 data)
1871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1873 return dd->cce_err_status_cnt[20];
1876 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1877 void *context, int vl,
1880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1882 return dd->cce_err_status_cnt[19];
1885 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1886 void *context, int vl, int mode,
1889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1891 return dd->cce_err_status_cnt[18];
1894 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1895 void *context, int vl, int mode,
1898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1900 return dd->cce_err_status_cnt[17];
1903 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1904 void *context, int vl, int mode,
1907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1909 return dd->cce_err_status_cnt[16];
1912 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1913 void *context, int vl, int mode,
1916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918 return dd->cce_err_status_cnt[15];
1921 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1922 void *context, int vl,
1925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927 return dd->cce_err_status_cnt[14];
1930 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1931 void *context, int vl, int mode,
1934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936 return dd->cce_err_status_cnt[13];
1939 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1940 const struct cntr_entry *entry,
1941 void *context, int vl, int mode, u64 data)
1943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945 return dd->cce_err_status_cnt[12];
1948 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1949 const struct cntr_entry *entry,
1950 void *context, int vl, int mode, u64 data)
1952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954 return dd->cce_err_status_cnt[11];
1957 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
1958 const struct cntr_entry *entry,
1959 void *context, int vl, int mode, u64 data)
1961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963 return dd->cce_err_status_cnt[10];
1966 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
1967 const struct cntr_entry *entry,
1968 void *context, int vl, int mode, u64 data)
1970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972 return dd->cce_err_status_cnt[9];
1975 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
1976 const struct cntr_entry *entry,
1977 void *context, int vl, int mode, u64 data)
1979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981 return dd->cce_err_status_cnt[8];
1984 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
1985 void *context, int vl,
1988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990 return dd->cce_err_status_cnt[7];
1993 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
1994 const struct cntr_entry *entry,
1995 void *context, int vl, int mode, u64 data)
1997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999 return dd->cce_err_status_cnt[6];
2002 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2003 void *context, int vl, int mode,
2006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008 return dd->cce_err_status_cnt[5];
2011 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2012 void *context, int vl, int mode,
2015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017 return dd->cce_err_status_cnt[4];
2020 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2021 const struct cntr_entry *entry,
2022 void *context, int vl, int mode, u64 data)
2024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026 return dd->cce_err_status_cnt[3];
2029 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2030 void *context, int vl,
2033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035 return dd->cce_err_status_cnt[2];
2038 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2039 void *context, int vl,
2042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044 return dd->cce_err_status_cnt[1];
2047 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2048 void *context, int vl, int mode,
2051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053 return dd->cce_err_status_cnt[0];
2057 * Software counters corresponding to each of the
2058 * error status bits within RcvErrStatus
2060 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2061 void *context, int vl, int mode,
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2066 return dd->rcv_err_status_cnt[63];
2069 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2070 void *context, int vl,
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2075 return dd->rcv_err_status_cnt[62];
2078 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl, int mode,
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2084 return dd->rcv_err_status_cnt[61];
2087 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2088 void *context, int vl, int mode,
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2093 return dd->rcv_err_status_cnt[60];
2096 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2097 void *context, int vl,
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2102 return dd->rcv_err_status_cnt[59];
2105 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2106 void *context, int vl,
2109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2111 return dd->rcv_err_status_cnt[58];
2114 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2115 void *context, int vl, int mode,
2118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2120 return dd->rcv_err_status_cnt[57];
2123 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2124 void *context, int vl, int mode,
2127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2129 return dd->rcv_err_status_cnt[56];
2132 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2133 void *context, int vl, int mode,
2136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2138 return dd->rcv_err_status_cnt[55];
2141 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2142 const struct cntr_entry *entry,
2143 void *context, int vl, int mode, u64 data)
2145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2147 return dd->rcv_err_status_cnt[54];
2150 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2151 const struct cntr_entry *entry,
2152 void *context, int vl, int mode, u64 data)
2154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2156 return dd->rcv_err_status_cnt[53];
2159 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2160 void *context, int vl,
2163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2165 return dd->rcv_err_status_cnt[52];
2168 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2169 void *context, int vl,
2172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2174 return dd->rcv_err_status_cnt[51];
2177 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2178 void *context, int vl,
2181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2183 return dd->rcv_err_status_cnt[50];
2186 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2187 void *context, int vl,
2190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2192 return dd->rcv_err_status_cnt[49];
2195 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2196 void *context, int vl,
2199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2201 return dd->rcv_err_status_cnt[48];
2204 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2205 void *context, int vl,
2208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210 return dd->rcv_err_status_cnt[47];
2213 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2214 void *context, int vl, int mode,
2217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219 return dd->rcv_err_status_cnt[46];
2222 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2223 const struct cntr_entry *entry,
2224 void *context, int vl, int mode, u64 data)
2226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228 return dd->rcv_err_status_cnt[45];
2231 static u64 access_rx_lookup_csr_parity_err_cnt(
2232 const struct cntr_entry *entry,
2233 void *context, int vl, int mode, u64 data)
2235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237 return dd->rcv_err_status_cnt[44];
2240 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2241 const struct cntr_entry *entry,
2242 void *context, int vl, int mode, u64 data)
2244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246 return dd->rcv_err_status_cnt[43];
2249 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2250 const struct cntr_entry *entry,
2251 void *context, int vl, int mode, u64 data)
2253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255 return dd->rcv_err_status_cnt[42];
2258 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2259 const struct cntr_entry *entry,
2260 void *context, int vl, int mode, u64 data)
2262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264 return dd->rcv_err_status_cnt[41];
2267 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2268 const struct cntr_entry *entry,
2269 void *context, int vl, int mode, u64 data)
2271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273 return dd->rcv_err_status_cnt[40];
2276 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2277 const struct cntr_entry *entry,
2278 void *context, int vl, int mode, u64 data)
2280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282 return dd->rcv_err_status_cnt[39];
2285 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2286 const struct cntr_entry *entry,
2287 void *context, int vl, int mode, u64 data)
2289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291 return dd->rcv_err_status_cnt[38];
2294 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2295 const struct cntr_entry *entry,
2296 void *context, int vl, int mode, u64 data)
2298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300 return dd->rcv_err_status_cnt[37];
2303 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2304 const struct cntr_entry *entry,
2305 void *context, int vl, int mode, u64 data)
2307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309 return dd->rcv_err_status_cnt[36];
2312 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2313 const struct cntr_entry *entry,
2314 void *context, int vl, int mode, u64 data)
2316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318 return dd->rcv_err_status_cnt[35];
2321 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2322 const struct cntr_entry *entry,
2323 void *context, int vl, int mode, u64 data)
2325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327 return dd->rcv_err_status_cnt[34];
2330 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2331 const struct cntr_entry *entry,
2332 void *context, int vl, int mode, u64 data)
2334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336 return dd->rcv_err_status_cnt[33];
2339 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2340 void *context, int vl, int mode,
2343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345 return dd->rcv_err_status_cnt[32];
2348 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2349 void *context, int vl, int mode,
2352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354 return dd->rcv_err_status_cnt[31];
2357 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2358 void *context, int vl, int mode,
2361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363 return dd->rcv_err_status_cnt[30];
2366 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2367 void *context, int vl, int mode,
2370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372 return dd->rcv_err_status_cnt[29];
2375 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2376 void *context, int vl,
2379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381 return dd->rcv_err_status_cnt[28];
2384 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2385 const struct cntr_entry *entry,
2386 void *context, int vl, int mode, u64 data)
2388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390 return dd->rcv_err_status_cnt[27];
2393 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2394 const struct cntr_entry *entry,
2395 void *context, int vl, int mode, u64 data)
2397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399 return dd->rcv_err_status_cnt[26];
2402 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2403 const struct cntr_entry *entry,
2404 void *context, int vl, int mode, u64 data)
2406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408 return dd->rcv_err_status_cnt[25];
2411 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2412 const struct cntr_entry *entry,
2413 void *context, int vl, int mode, u64 data)
2415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417 return dd->rcv_err_status_cnt[24];
2420 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2421 const struct cntr_entry *entry,
2422 void *context, int vl, int mode, u64 data)
2424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426 return dd->rcv_err_status_cnt[23];
2429 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2430 const struct cntr_entry *entry,
2431 void *context, int vl, int mode, u64 data)
2433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435 return dd->rcv_err_status_cnt[22];
2438 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2439 const struct cntr_entry *entry,
2440 void *context, int vl, int mode, u64 data)
2442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444 return dd->rcv_err_status_cnt[21];
2447 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2448 const struct cntr_entry *entry,
2449 void *context, int vl, int mode, u64 data)
2451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453 return dd->rcv_err_status_cnt[20];
2456 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2457 const struct cntr_entry *entry,
2458 void *context, int vl, int mode, u64 data)
2460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462 return dd->rcv_err_status_cnt[19];
2465 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2466 void *context, int vl,
2469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471 return dd->rcv_err_status_cnt[18];
2474 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2475 void *context, int vl,
2478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480 return dd->rcv_err_status_cnt[17];
2483 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2484 const struct cntr_entry *entry,
2485 void *context, int vl, int mode, u64 data)
2487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489 return dd->rcv_err_status_cnt[16];
2492 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2493 const struct cntr_entry *entry,
2494 void *context, int vl, int mode, u64 data)
2496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498 return dd->rcv_err_status_cnt[15];
2501 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2502 void *context, int vl,
2505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507 return dd->rcv_err_status_cnt[14];
2510 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2511 void *context, int vl,
2514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516 return dd->rcv_err_status_cnt[13];
2519 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2520 void *context, int vl, int mode,
2523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525 return dd->rcv_err_status_cnt[12];
2528 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2529 void *context, int vl, int mode,
2532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534 return dd->rcv_err_status_cnt[11];
2537 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2538 void *context, int vl, int mode,
2541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543 return dd->rcv_err_status_cnt[10];
2546 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2547 void *context, int vl, int mode,
2550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552 return dd->rcv_err_status_cnt[9];
2555 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2556 void *context, int vl, int mode,
2559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561 return dd->rcv_err_status_cnt[8];
2564 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2565 const struct cntr_entry *entry,
2566 void *context, int vl, int mode, u64 data)
2568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570 return dd->rcv_err_status_cnt[7];
2573 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2574 const struct cntr_entry *entry,
2575 void *context, int vl, int mode, u64 data)
2577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579 return dd->rcv_err_status_cnt[6];
2582 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2583 void *context, int vl, int mode,
2586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588 return dd->rcv_err_status_cnt[5];
2591 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2592 void *context, int vl, int mode,
2595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597 return dd->rcv_err_status_cnt[4];
2600 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2601 void *context, int vl, int mode,
2604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606 return dd->rcv_err_status_cnt[3];
2609 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2610 void *context, int vl, int mode,
2613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615 return dd->rcv_err_status_cnt[2];
2618 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2619 void *context, int vl, int mode,
2622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624 return dd->rcv_err_status_cnt[1];
2627 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2628 void *context, int vl, int mode,
2631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633 return dd->rcv_err_status_cnt[0];
2637 * Software counters corresponding to each of the
2638 * error status bits within SendPioErrStatus
2640 static u64 access_pio_pec_sop_head_parity_err_cnt(
2641 const struct cntr_entry *entry,
2642 void *context, int vl, int mode, u64 data)
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2646 return dd->send_pio_err_status_cnt[35];
2649 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2650 const struct cntr_entry *entry,
2651 void *context, int vl, int mode, u64 data)
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2655 return dd->send_pio_err_status_cnt[34];
2658 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2659 const struct cntr_entry *entry,
2660 void *context, int vl, int mode, u64 data)
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2664 return dd->send_pio_err_status_cnt[33];
2667 static u64 access_pio_current_free_cnt_parity_err_cnt(
2668 const struct cntr_entry *entry,
2669 void *context, int vl, int mode, u64 data)
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2673 return dd->send_pio_err_status_cnt[32];
2676 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2682 return dd->send_pio_err_status_cnt[31];
2685 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2686 void *context, int vl, int mode,
2689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2691 return dd->send_pio_err_status_cnt[30];
2694 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2695 void *context, int vl, int mode,
2698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2700 return dd->send_pio_err_status_cnt[29];
2703 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2704 const struct cntr_entry *entry,
2705 void *context, int vl, int mode, u64 data)
2707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2709 return dd->send_pio_err_status_cnt[28];
2712 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2713 void *context, int vl, int mode,
2716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2718 return dd->send_pio_err_status_cnt[27];
2721 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2722 void *context, int vl, int mode,
2725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2727 return dd->send_pio_err_status_cnt[26];
2730 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2731 void *context, int vl,
2734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2736 return dd->send_pio_err_status_cnt[25];
2739 static u64 access_pio_block_qw_count_parity_err_cnt(
2740 const struct cntr_entry *entry,
2741 void *context, int vl, int mode, u64 data)
2743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2745 return dd->send_pio_err_status_cnt[24];
2748 static u64 access_pio_write_qw_valid_parity_err_cnt(
2749 const struct cntr_entry *entry,
2750 void *context, int vl, int mode, u64 data)
2752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2754 return dd->send_pio_err_status_cnt[23];
2757 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2758 void *context, int vl, int mode,
2761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2763 return dd->send_pio_err_status_cnt[22];
2766 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2767 void *context, int vl,
2770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2772 return dd->send_pio_err_status_cnt[21];
2775 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2776 void *context, int vl,
2779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2781 return dd->send_pio_err_status_cnt[20];
2784 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2785 void *context, int vl,
2788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790 return dd->send_pio_err_status_cnt[19];
2793 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2794 const struct cntr_entry *entry,
2795 void *context, int vl, int mode, u64 data)
2797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799 return dd->send_pio_err_status_cnt[18];
2802 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2803 void *context, int vl, int mode,
2806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808 return dd->send_pio_err_status_cnt[17];
2811 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2812 void *context, int vl, int mode,
2815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817 return dd->send_pio_err_status_cnt[16];
2820 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2821 const struct cntr_entry *entry,
2822 void *context, int vl, int mode, u64 data)
2824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826 return dd->send_pio_err_status_cnt[15];
2829 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2830 const struct cntr_entry *entry,
2831 void *context, int vl, int mode, u64 data)
2833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835 return dd->send_pio_err_status_cnt[14];
2838 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2839 const struct cntr_entry *entry,
2840 void *context, int vl, int mode, u64 data)
2842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844 return dd->send_pio_err_status_cnt[13];
2847 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2848 const struct cntr_entry *entry,
2849 void *context, int vl, int mode, u64 data)
2851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853 return dd->send_pio_err_status_cnt[12];
2856 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2857 const struct cntr_entry *entry,
2858 void *context, int vl, int mode, u64 data)
2860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862 return dd->send_pio_err_status_cnt[11];
2865 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2866 const struct cntr_entry *entry,
2867 void *context, int vl, int mode, u64 data)
2869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871 return dd->send_pio_err_status_cnt[10];
2874 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2875 const struct cntr_entry *entry,
2876 void *context, int vl, int mode, u64 data)
2878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880 return dd->send_pio_err_status_cnt[9];
2883 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2884 const struct cntr_entry *entry,
2885 void *context, int vl, int mode, u64 data)
2887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889 return dd->send_pio_err_status_cnt[8];
2892 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2893 const struct cntr_entry *entry,
2894 void *context, int vl, int mode, u64 data)
2896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898 return dd->send_pio_err_status_cnt[7];
2901 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2902 void *context, int vl, int mode,
2905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907 return dd->send_pio_err_status_cnt[6];
2910 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2911 void *context, int vl, int mode,
2914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916 return dd->send_pio_err_status_cnt[5];
2919 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2920 void *context, int vl, int mode,
2923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925 return dd->send_pio_err_status_cnt[4];
2928 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2929 void *context, int vl, int mode,
2932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934 return dd->send_pio_err_status_cnt[3];
2937 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2938 void *context, int vl, int mode,
2941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943 return dd->send_pio_err_status_cnt[2];
2946 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2947 void *context, int vl,
2950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952 return dd->send_pio_err_status_cnt[1];
2955 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
2956 void *context, int vl, int mode,
2959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961 return dd->send_pio_err_status_cnt[0];
2965 * Software counters corresponding to each of the
2966 * error status bits within SendDmaErrStatus
2968 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
2969 const struct cntr_entry *entry,
2970 void *context, int vl, int mode, u64 data)
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2974 return dd->send_dma_err_status_cnt[3];
2977 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
2978 const struct cntr_entry *entry,
2979 void *context, int vl, int mode, u64 data)
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2983 return dd->send_dma_err_status_cnt[2];
2986 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2992 return dd->send_dma_err_status_cnt[1];
2995 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl, int mode,
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3001 return dd->send_dma_err_status_cnt[0];
3005 * Software counters corresponding to each of the
3006 * error status bits within SendEgressErrStatus
3008 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3009 const struct cntr_entry *entry,
3010 void *context, int vl, int mode, u64 data)
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3014 return dd->send_egress_err_status_cnt[63];
3017 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3018 const struct cntr_entry *entry,
3019 void *context, int vl, int mode, u64 data)
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3023 return dd->send_egress_err_status_cnt[62];
3026 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3027 void *context, int vl, int mode,
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3032 return dd->send_egress_err_status_cnt[61];
3035 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3036 void *context, int vl,
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3041 return dd->send_egress_err_status_cnt[60];
3044 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3045 const struct cntr_entry *entry,
3046 void *context, int vl, int mode, u64 data)
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3050 return dd->send_egress_err_status_cnt[59];
3053 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3054 void *context, int vl, int mode,
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3059 return dd->send_egress_err_status_cnt[58];
3062 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3063 void *context, int vl, int mode,
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3068 return dd->send_egress_err_status_cnt[57];
3071 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3072 void *context, int vl, int mode,
3075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3077 return dd->send_egress_err_status_cnt[56];
3080 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3081 void *context, int vl, int mode,
3084 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3086 return dd->send_egress_err_status_cnt[55];
3089 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3090 void *context, int vl, int mode,
3093 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3095 return dd->send_egress_err_status_cnt[54];
3098 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3099 void *context, int vl, int mode,
3102 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3104 return dd->send_egress_err_status_cnt[53];
3107 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3108 void *context, int vl, int mode,
3111 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3113 return dd->send_egress_err_status_cnt[52];
3116 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3117 void *context, int vl, int mode,
3120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3122 return dd->send_egress_err_status_cnt[51];
3125 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3126 void *context, int vl, int mode,
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3131 return dd->send_egress_err_status_cnt[50];
3134 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3135 void *context, int vl, int mode,
3138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3140 return dd->send_egress_err_status_cnt[49];
3143 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3144 void *context, int vl, int mode,
3147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3149 return dd->send_egress_err_status_cnt[48];
3152 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3153 void *context, int vl, int mode,
3156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158 return dd->send_egress_err_status_cnt[47];
3161 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3162 void *context, int vl, int mode,
3165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167 return dd->send_egress_err_status_cnt[46];
3170 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3171 void *context, int vl, int mode,
3174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176 return dd->send_egress_err_status_cnt[45];
3179 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3180 void *context, int vl,
3183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185 return dd->send_egress_err_status_cnt[44];
3188 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3189 const struct cntr_entry *entry,
3190 void *context, int vl, int mode, u64 data)
3192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194 return dd->send_egress_err_status_cnt[43];
3197 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3198 void *context, int vl, int mode,
3201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203 return dd->send_egress_err_status_cnt[42];
3206 static u64 access_tx_credit_return_partiy_err_cnt(
3207 const struct cntr_entry *entry,
3208 void *context, int vl, int mode, u64 data)
3210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212 return dd->send_egress_err_status_cnt[41];
3215 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3216 const struct cntr_entry *entry,
3217 void *context, int vl, int mode, u64 data)
3219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221 return dd->send_egress_err_status_cnt[40];
3224 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3225 const struct cntr_entry *entry,
3226 void *context, int vl, int mode, u64 data)
3228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230 return dd->send_egress_err_status_cnt[39];
3233 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3234 const struct cntr_entry *entry,
3235 void *context, int vl, int mode, u64 data)
3237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239 return dd->send_egress_err_status_cnt[38];
3242 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3243 const struct cntr_entry *entry,
3244 void *context, int vl, int mode, u64 data)
3246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248 return dd->send_egress_err_status_cnt[37];
3251 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3252 const struct cntr_entry *entry,
3253 void *context, int vl, int mode, u64 data)
3255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257 return dd->send_egress_err_status_cnt[36];
3260 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3261 const struct cntr_entry *entry,
3262 void *context, int vl, int mode, u64 data)
3264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266 return dd->send_egress_err_status_cnt[35];
3269 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3270 const struct cntr_entry *entry,
3271 void *context, int vl, int mode, u64 data)
3273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275 return dd->send_egress_err_status_cnt[34];
3278 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3279 const struct cntr_entry *entry,
3280 void *context, int vl, int mode, u64 data)
3282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284 return dd->send_egress_err_status_cnt[33];
3287 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3288 const struct cntr_entry *entry,
3289 void *context, int vl, int mode, u64 data)
3291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293 return dd->send_egress_err_status_cnt[32];
3296 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3297 const struct cntr_entry *entry,
3298 void *context, int vl, int mode, u64 data)
3300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302 return dd->send_egress_err_status_cnt[31];
3305 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3306 const struct cntr_entry *entry,
3307 void *context, int vl, int mode, u64 data)
3309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311 return dd->send_egress_err_status_cnt[30];
3314 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3315 const struct cntr_entry *entry,
3316 void *context, int vl, int mode, u64 data)
3318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320 return dd->send_egress_err_status_cnt[29];
3323 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3324 const struct cntr_entry *entry,
3325 void *context, int vl, int mode, u64 data)
3327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329 return dd->send_egress_err_status_cnt[28];
3332 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3333 const struct cntr_entry *entry,
3334 void *context, int vl, int mode, u64 data)
3336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338 return dd->send_egress_err_status_cnt[27];
3341 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3342 const struct cntr_entry *entry,
3343 void *context, int vl, int mode, u64 data)
3345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347 return dd->send_egress_err_status_cnt[26];
3350 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3351 const struct cntr_entry *entry,
3352 void *context, int vl, int mode, u64 data)
3354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356 return dd->send_egress_err_status_cnt[25];
3359 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3360 const struct cntr_entry *entry,
3361 void *context, int vl, int mode, u64 data)
3363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365 return dd->send_egress_err_status_cnt[24];
3368 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3369 const struct cntr_entry *entry,
3370 void *context, int vl, int mode, u64 data)
3372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374 return dd->send_egress_err_status_cnt[23];
3377 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3378 const struct cntr_entry *entry,
3379 void *context, int vl, int mode, u64 data)
3381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383 return dd->send_egress_err_status_cnt[22];
3386 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3387 const struct cntr_entry *entry,
3388 void *context, int vl, int mode, u64 data)
3390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392 return dd->send_egress_err_status_cnt[21];
3395 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3396 const struct cntr_entry *entry,
3397 void *context, int vl, int mode, u64 data)
3399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401 return dd->send_egress_err_status_cnt[20];
3404 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3405 const struct cntr_entry *entry,
3406 void *context, int vl, int mode, u64 data)
3408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410 return dd->send_egress_err_status_cnt[19];
3413 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3414 const struct cntr_entry *entry,
3415 void *context, int vl, int mode, u64 data)
3417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419 return dd->send_egress_err_status_cnt[18];
3422 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3423 const struct cntr_entry *entry,
3424 void *context, int vl, int mode, u64 data)
3426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428 return dd->send_egress_err_status_cnt[17];
3431 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3432 const struct cntr_entry *entry,
3433 void *context, int vl, int mode, u64 data)
3435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437 return dd->send_egress_err_status_cnt[16];
3440 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3441 void *context, int vl, int mode,
3444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446 return dd->send_egress_err_status_cnt[15];
3449 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3450 void *context, int vl,
3453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455 return dd->send_egress_err_status_cnt[14];
3458 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3459 void *context, int vl, int mode,
3462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464 return dd->send_egress_err_status_cnt[13];
3467 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3468 void *context, int vl, int mode,
3471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473 return dd->send_egress_err_status_cnt[12];
3476 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3477 const struct cntr_entry *entry,
3478 void *context, int vl, int mode, u64 data)
3480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482 return dd->send_egress_err_status_cnt[11];
3485 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3486 void *context, int vl, int mode,
3489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491 return dd->send_egress_err_status_cnt[10];
3494 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3495 void *context, int vl, int mode,
3498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500 return dd->send_egress_err_status_cnt[9];
3503 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3504 const struct cntr_entry *entry,
3505 void *context, int vl, int mode, u64 data)
3507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509 return dd->send_egress_err_status_cnt[8];
3512 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3513 const struct cntr_entry *entry,
3514 void *context, int vl, int mode, u64 data)
3516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518 return dd->send_egress_err_status_cnt[7];
3521 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3522 void *context, int vl, int mode,
3525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527 return dd->send_egress_err_status_cnt[6];
3530 static u64 access_tx_incorrect_link_state_err_cnt(
3531 const struct cntr_entry *entry,
3532 void *context, int vl, int mode, u64 data)
3534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536 return dd->send_egress_err_status_cnt[5];
3539 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3540 void *context, int vl, int mode,
3543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545 return dd->send_egress_err_status_cnt[4];
3548 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3549 const struct cntr_entry *entry,
3550 void *context, int vl, int mode, u64 data)
3552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554 return dd->send_egress_err_status_cnt[3];
3557 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3558 void *context, int vl, int mode,
3561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563 return dd->send_egress_err_status_cnt[2];
3566 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3567 const struct cntr_entry *entry,
3568 void *context, int vl, int mode, u64 data)
3570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572 return dd->send_egress_err_status_cnt[1];
3575 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3576 const struct cntr_entry *entry,
3577 void *context, int vl, int mode, u64 data)
3579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581 return dd->send_egress_err_status_cnt[0];
3585 * Software counters corresponding to each of the
3586 * error status bits within SendErrStatus
3588 static u64 access_send_csr_write_bad_addr_err_cnt(
3589 const struct cntr_entry *entry,
3590 void *context, int vl, int mode, u64 data)
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3594 return dd->send_err_status_cnt[2];
3597 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3598 void *context, int vl,
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3603 return dd->send_err_status_cnt[1];
3606 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3607 void *context, int vl, int mode,
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3612 return dd->send_err_status_cnt[0];
3616 * Software counters corresponding to each of the
3617 * error status bits within SendCtxtErrStatus
3619 static u64 access_pio_write_out_of_bounds_err_cnt(
3620 const struct cntr_entry *entry,
3621 void *context, int vl, int mode, u64 data)
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3625 return dd->sw_ctxt_err_status_cnt[4];
3628 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3629 void *context, int vl, int mode,
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3634 return dd->sw_ctxt_err_status_cnt[3];
3637 static u64 access_pio_write_crosses_boundary_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3643 return dd->sw_ctxt_err_status_cnt[2];
3646 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3647 void *context, int vl,
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3652 return dd->sw_ctxt_err_status_cnt[1];
3655 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3656 void *context, int vl, int mode,
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3661 return dd->sw_ctxt_err_status_cnt[0];
3665 * Software counters corresponding to each of the
3666 * error status bits within SendDmaEngErrStatus
3668 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3669 const struct cntr_entry *entry,
3670 void *context, int vl, int mode, u64 data)
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3674 return dd->sw_send_dma_eng_err_status_cnt[23];
3677 static u64 access_sdma_header_storage_cor_err_cnt(
3678 const struct cntr_entry *entry,
3679 void *context, int vl, int mode, u64 data)
3681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3683 return dd->sw_send_dma_eng_err_status_cnt[22];
3686 static u64 access_sdma_packet_tracking_cor_err_cnt(
3687 const struct cntr_entry *entry,
3688 void *context, int vl, int mode, u64 data)
3690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3692 return dd->sw_send_dma_eng_err_status_cnt[21];
3695 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3696 void *context, int vl, int mode,
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3701 return dd->sw_send_dma_eng_err_status_cnt[20];
3704 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3705 void *context, int vl, int mode,
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3710 return dd->sw_send_dma_eng_err_status_cnt[19];
3713 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3714 const struct cntr_entry *entry,
3715 void *context, int vl, int mode, u64 data)
3717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3719 return dd->sw_send_dma_eng_err_status_cnt[18];
3722 static u64 access_sdma_header_storage_unc_err_cnt(
3723 const struct cntr_entry *entry,
3724 void *context, int vl, int mode, u64 data)
3726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3728 return dd->sw_send_dma_eng_err_status_cnt[17];
3731 static u64 access_sdma_packet_tracking_unc_err_cnt(
3732 const struct cntr_entry *entry,
3733 void *context, int vl, int mode, u64 data)
3735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3737 return dd->sw_send_dma_eng_err_status_cnt[16];
3740 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3741 void *context, int vl, int mode,
3744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3746 return dd->sw_send_dma_eng_err_status_cnt[15];
3749 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3750 void *context, int vl, int mode,
3753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3755 return dd->sw_send_dma_eng_err_status_cnt[14];
3758 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3759 void *context, int vl, int mode,
3762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3764 return dd->sw_send_dma_eng_err_status_cnt[13];
3767 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3768 void *context, int vl, int mode,
3771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3773 return dd->sw_send_dma_eng_err_status_cnt[12];
3776 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3777 void *context, int vl, int mode,
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3782 return dd->sw_send_dma_eng_err_status_cnt[11];
3785 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3786 void *context, int vl, int mode,
3789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3791 return dd->sw_send_dma_eng_err_status_cnt[10];
3794 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3795 void *context, int vl, int mode,
3798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3800 return dd->sw_send_dma_eng_err_status_cnt[9];
3803 static u64 access_sdma_packet_desc_overflow_err_cnt(
3804 const struct cntr_entry *entry,
3805 void *context, int vl, int mode, u64 data)
3807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3809 return dd->sw_send_dma_eng_err_status_cnt[8];
3812 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3813 void *context, int vl,
3816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818 return dd->sw_send_dma_eng_err_status_cnt[7];
3821 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3822 void *context, int vl, int mode, u64 data)
3824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3826 return dd->sw_send_dma_eng_err_status_cnt[6];
3829 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3830 void *context, int vl, int mode,
3833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3835 return dd->sw_send_dma_eng_err_status_cnt[5];
3838 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3839 void *context, int vl, int mode,
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3844 return dd->sw_send_dma_eng_err_status_cnt[4];
3847 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3848 const struct cntr_entry *entry,
3849 void *context, int vl, int mode, u64 data)
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3853 return dd->sw_send_dma_eng_err_status_cnt[3];
3856 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3857 void *context, int vl, int mode,
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3862 return dd->sw_send_dma_eng_err_status_cnt[2];
3865 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3866 void *context, int vl, int mode,
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3871 return dd->sw_send_dma_eng_err_status_cnt[1];
3874 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3875 void *context, int vl, int mode,
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3880 return dd->sw_send_dma_eng_err_status_cnt[0];
3883 #define def_access_sw_cpu(cntr) \
3884 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3885 void *context, int vl, int mode, u64 data) \
3887 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3888 return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \
3889 ppd->ibport_data.cntr, vl, \
3893 def_access_sw_cpu(rc_acks);
3894 def_access_sw_cpu(rc_qacks);
3895 def_access_sw_cpu(rc_delayed_comp);
3897 #define def_access_ibp_counter(cntr) \
3898 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3899 void *context, int vl, int mode, u64 data) \
3901 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3903 if (vl != CNTR_INVALID_VL) \
3906 return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \
3910 def_access_ibp_counter(loop_pkts);
3911 def_access_ibp_counter(rc_resends);
3912 def_access_ibp_counter(rnr_naks);
3913 def_access_ibp_counter(other_naks);
3914 def_access_ibp_counter(rc_timeouts);
3915 def_access_ibp_counter(pkt_drops);
3916 def_access_ibp_counter(dmawait);
3917 def_access_ibp_counter(rc_seqnak);
3918 def_access_ibp_counter(rc_dupreq);
3919 def_access_ibp_counter(rdma_seq);
3920 def_access_ibp_counter(unaligned);
3921 def_access_ibp_counter(seq_naks);
3923 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3924 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3925 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3927 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3929 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3930 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3932 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3934 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3935 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3936 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3937 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3938 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3940 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3942 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3944 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3946 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3948 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3950 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
3951 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
3952 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
3953 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
3954 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
3956 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
3957 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
3959 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
3961 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
3963 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
3964 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
3965 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
3966 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
3968 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
3969 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
3970 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
3972 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
3974 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
3976 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
3978 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
3980 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
3982 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
3984 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
3985 CNTR_SYNTH | CNTR_VL),
3986 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
3987 CNTR_SYNTH | CNTR_VL),
3988 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
3989 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
3990 CNTR_SYNTH | CNTR_VL),
3991 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
3992 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
3993 CNTR_SYNTH | CNTR_VL),
3994 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
3996 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
3997 CNTR_SYNTH | CNTR_VL),
3998 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4000 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4001 CNTR_SYNTH | CNTR_VL),
4003 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4005 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4007 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4009 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4011 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4013 [C_DC_CRC_MULT_LN] =
4014 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4016 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4018 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4020 [C_DC_SEQ_CRC_CNT] =
4021 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4023 [C_DC_ESC0_ONLY_CNT] =
4024 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4026 [C_DC_ESC0_PLUS1_CNT] =
4027 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4029 [C_DC_ESC0_PLUS2_CNT] =
4030 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4032 [C_DC_REINIT_FROM_PEER_CNT] =
4033 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4035 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4037 [C_DC_MISC_FLG_CNT] =
4038 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4040 [C_DC_PRF_GOOD_LTP_CNT] =
4041 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4042 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4043 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4045 [C_DC_PRF_RX_FLIT_CNT] =
4046 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4047 [C_DC_PRF_TX_FLIT_CNT] =
4048 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4049 [C_DC_PRF_CLK_CNTR] =
4050 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4051 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4052 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4053 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4054 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4056 [C_DC_PG_STS_TX_SBE_CNT] =
4057 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4058 [C_DC_PG_STS_TX_MBE_CNT] =
4059 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4061 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4062 access_sw_cpu_intr),
4063 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4064 access_sw_cpu_rcv_limit),
4065 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4066 access_sw_vtx_wait),
4067 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4068 access_sw_pio_wait),
4069 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4070 access_sw_kmem_wait),
4071 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4072 access_sw_send_schedule),
4073 /* MISC_ERR_STATUS */
4074 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4076 access_misc_pll_lock_fail_err_cnt),
4077 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4079 access_misc_mbist_fail_err_cnt),
4080 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4082 access_misc_invalid_eep_cmd_err_cnt),
4083 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4085 access_misc_efuse_done_parity_err_cnt),
4086 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4088 access_misc_efuse_write_err_cnt),
4089 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4091 access_misc_efuse_read_bad_addr_err_cnt),
4092 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4094 access_misc_efuse_csr_parity_err_cnt),
4095 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4097 access_misc_fw_auth_failed_err_cnt),
4098 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4100 access_misc_key_mismatch_err_cnt),
4101 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4103 access_misc_sbus_write_failed_err_cnt),
4104 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4106 access_misc_csr_write_bad_addr_err_cnt),
4107 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4109 access_misc_csr_read_bad_addr_err_cnt),
4110 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4112 access_misc_csr_parity_err_cnt),
4114 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4116 access_sw_cce_err_status_aggregated_cnt),
4117 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4119 access_cce_msix_csr_parity_err_cnt),
4120 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4122 access_cce_int_map_unc_err_cnt),
4123 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4125 access_cce_int_map_cor_err_cnt),
4126 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4128 access_cce_msix_table_unc_err_cnt),
4129 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4131 access_cce_msix_table_cor_err_cnt),
4132 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4134 access_cce_rxdma_conv_fifo_parity_err_cnt),
4135 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4137 access_cce_rcpl_async_fifo_parity_err_cnt),
4138 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4140 access_cce_seg_write_bad_addr_err_cnt),
4141 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4143 access_cce_seg_read_bad_addr_err_cnt),
4144 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4146 access_la_triggered_cnt),
4147 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4149 access_cce_trgt_cpl_timeout_err_cnt),
4150 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4152 access_pcic_receive_parity_err_cnt),
4153 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4155 access_pcic_transmit_back_parity_err_cnt),
4156 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4158 access_pcic_transmit_front_parity_err_cnt),
4159 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4161 access_pcic_cpl_dat_q_unc_err_cnt),
4162 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4164 access_pcic_cpl_hd_q_unc_err_cnt),
4165 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4167 access_pcic_post_dat_q_unc_err_cnt),
4168 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4170 access_pcic_post_hd_q_unc_err_cnt),
4171 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4173 access_pcic_retry_sot_mem_unc_err_cnt),
4174 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4176 access_pcic_retry_mem_unc_err),
4177 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4179 access_pcic_n_post_dat_q_parity_err_cnt),
4180 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4182 access_pcic_n_post_h_q_parity_err_cnt),
4183 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4185 access_pcic_cpl_dat_q_cor_err_cnt),
4186 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4188 access_pcic_cpl_hd_q_cor_err_cnt),
4189 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4191 access_pcic_post_dat_q_cor_err_cnt),
4192 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4194 access_pcic_post_hd_q_cor_err_cnt),
4195 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4197 access_pcic_retry_sot_mem_cor_err_cnt),
4198 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4200 access_pcic_retry_mem_cor_err_cnt),
4201 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4202 "CceCli1AsyncFifoDbgParityError", 0, 0,
4204 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4205 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4206 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4208 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4210 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4211 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4213 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4214 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4215 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4217 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4218 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4220 access_cce_cli2_async_fifo_parity_err_cnt),
4221 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4223 access_cce_csr_cfg_bus_parity_err_cnt),
4224 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4226 access_cce_cli0_async_fifo_parity_err_cnt),
4227 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4229 access_cce_rspd_data_parity_err_cnt),
4230 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4232 access_cce_trgt_access_err_cnt),
4233 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4235 access_cce_trgt_async_fifo_parity_err_cnt),
4236 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4238 access_cce_csr_write_bad_addr_err_cnt),
4239 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4241 access_cce_csr_read_bad_addr_err_cnt),
4242 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4244 access_ccs_csr_parity_err_cnt),
4247 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4249 access_rx_csr_parity_err_cnt),
4250 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4252 access_rx_csr_write_bad_addr_err_cnt),
4253 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4255 access_rx_csr_read_bad_addr_err_cnt),
4256 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4258 access_rx_dma_csr_unc_err_cnt),
4259 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4261 access_rx_dma_dq_fsm_encoding_err_cnt),
4262 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4264 access_rx_dma_eq_fsm_encoding_err_cnt),
4265 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4267 access_rx_dma_csr_parity_err_cnt),
4268 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4270 access_rx_rbuf_data_cor_err_cnt),
4271 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4273 access_rx_rbuf_data_unc_err_cnt),
4274 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4276 access_rx_dma_data_fifo_rd_cor_err_cnt),
4277 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4279 access_rx_dma_data_fifo_rd_unc_err_cnt),
4280 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4282 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4283 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4285 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4286 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4288 access_rx_rbuf_desc_part2_cor_err_cnt),
4289 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4291 access_rx_rbuf_desc_part2_unc_err_cnt),
4292 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4294 access_rx_rbuf_desc_part1_cor_err_cnt),
4295 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4297 access_rx_rbuf_desc_part1_unc_err_cnt),
4298 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4300 access_rx_hq_intr_fsm_err_cnt),
4301 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4303 access_rx_hq_intr_csr_parity_err_cnt),
4304 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4306 access_rx_lookup_csr_parity_err_cnt),
4307 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4309 access_rx_lookup_rcv_array_cor_err_cnt),
4310 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4312 access_rx_lookup_rcv_array_unc_err_cnt),
4313 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4315 access_rx_lookup_des_part2_parity_err_cnt),
4316 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4318 access_rx_lookup_des_part1_unc_cor_err_cnt),
4319 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4321 access_rx_lookup_des_part1_unc_err_cnt),
4322 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4324 access_rx_rbuf_next_free_buf_cor_err_cnt),
4325 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4327 access_rx_rbuf_next_free_buf_unc_err_cnt),
4328 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4329 "RxRbufFlInitWrAddrParityErr", 0, 0,
4331 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4332 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4334 access_rx_rbuf_fl_initdone_parity_err_cnt),
4335 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4337 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4338 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4340 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4341 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4343 access_rx_rbuf_empty_err_cnt),
4344 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4346 access_rx_rbuf_full_err_cnt),
4347 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4349 access_rbuf_bad_lookup_err_cnt),
4350 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4352 access_rbuf_ctx_id_parity_err_cnt),
4353 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4355 access_rbuf_csr_qeopdw_parity_err_cnt),
4356 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4357 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4359 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4360 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4361 "RxRbufCsrQTlPtrParityErr", 0, 0,
4363 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4364 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4366 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4367 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4369 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4370 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4372 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4373 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4375 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4376 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4377 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4379 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4380 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4382 access_rx_rbuf_block_list_read_cor_err_cnt),
4383 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4385 access_rx_rbuf_block_list_read_unc_err_cnt),
4386 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4388 access_rx_rbuf_lookup_des_cor_err_cnt),
4389 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4391 access_rx_rbuf_lookup_des_unc_err_cnt),
4392 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4393 "RxRbufLookupDesRegUncCorErr", 0, 0,
4395 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4396 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4398 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4399 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4401 access_rx_rbuf_free_list_cor_err_cnt),
4402 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4404 access_rx_rbuf_free_list_unc_err_cnt),
4405 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4407 access_rx_rcv_fsm_encoding_err_cnt),
4408 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4410 access_rx_dma_flag_cor_err_cnt),
4411 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4413 access_rx_dma_flag_unc_err_cnt),
4414 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4416 access_rx_dc_sop_eop_parity_err_cnt),
4417 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4419 access_rx_rcv_csr_parity_err_cnt),
4420 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4422 access_rx_rcv_qp_map_table_cor_err_cnt),
4423 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4425 access_rx_rcv_qp_map_table_unc_err_cnt),
4426 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4428 access_rx_rcv_data_cor_err_cnt),
4429 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4431 access_rx_rcv_data_unc_err_cnt),
4432 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4434 access_rx_rcv_hdr_cor_err_cnt),
4435 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4437 access_rx_rcv_hdr_unc_err_cnt),
4438 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4440 access_rx_dc_intf_parity_err_cnt),
4441 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4443 access_rx_dma_csr_cor_err_cnt),
4444 /* SendPioErrStatus */
4445 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4447 access_pio_pec_sop_head_parity_err_cnt),
4448 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4450 access_pio_pcc_sop_head_parity_err_cnt),
4451 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4453 access_pio_last_returned_cnt_parity_err_cnt),
4454 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4456 access_pio_current_free_cnt_parity_err_cnt),
4457 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4459 access_pio_reserved_31_err_cnt),
4460 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4462 access_pio_reserved_30_err_cnt),
4463 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4465 access_pio_ppmc_sop_len_err_cnt),
4466 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4468 access_pio_ppmc_bqc_mem_parity_err_cnt),
4469 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4471 access_pio_vl_fifo_parity_err_cnt),
4472 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4474 access_pio_vlf_sop_parity_err_cnt),
4475 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4477 access_pio_vlf_v1_len_parity_err_cnt),
4478 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4480 access_pio_block_qw_count_parity_err_cnt),
4481 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4483 access_pio_write_qw_valid_parity_err_cnt),
4484 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4486 access_pio_state_machine_err_cnt),
4487 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4489 access_pio_write_data_parity_err_cnt),
4490 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4492 access_pio_host_addr_mem_cor_err_cnt),
4493 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4495 access_pio_host_addr_mem_unc_err_cnt),
4496 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4498 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4499 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4501 access_pio_init_sm_in_err_cnt),
4502 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4504 access_pio_ppmc_pbl_fifo_err_cnt),
4505 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4507 access_pio_credit_ret_fifo_parity_err_cnt),
4508 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4510 access_pio_v1_len_mem_bank1_cor_err_cnt),
4511 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4513 access_pio_v1_len_mem_bank0_cor_err_cnt),
4514 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4516 access_pio_v1_len_mem_bank1_unc_err_cnt),
4517 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4519 access_pio_v1_len_mem_bank0_unc_err_cnt),
4520 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4522 access_pio_sm_pkt_reset_parity_err_cnt),
4523 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4525 access_pio_pkt_evict_fifo_parity_err_cnt),
4526 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4527 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4529 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4530 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4532 access_pio_sbrdctl_crrel_parity_err_cnt),
4533 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4535 access_pio_pec_fifo_parity_err_cnt),
4536 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4538 access_pio_pcc_fifo_parity_err_cnt),
4539 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4541 access_pio_sb_mem_fifo1_err_cnt),
4542 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4544 access_pio_sb_mem_fifo0_err_cnt),
4545 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4547 access_pio_csr_parity_err_cnt),
4548 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4550 access_pio_write_addr_parity_err_cnt),
4551 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4553 access_pio_write_bad_ctxt_err_cnt),
4554 /* SendDmaErrStatus */
4555 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4557 access_sdma_pcie_req_tracking_cor_err_cnt),
4558 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4560 access_sdma_pcie_req_tracking_unc_err_cnt),
4561 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4563 access_sdma_csr_parity_err_cnt),
4564 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4566 access_sdma_rpy_tag_err_cnt),
4567 /* SendEgressErrStatus */
4568 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4570 access_tx_read_pio_memory_csr_unc_err_cnt),
4571 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4573 access_tx_read_sdma_memory_csr_err_cnt),
4574 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4576 access_tx_egress_fifo_cor_err_cnt),
4577 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4579 access_tx_read_pio_memory_cor_err_cnt),
4580 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4582 access_tx_read_sdma_memory_cor_err_cnt),
4583 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4585 access_tx_sb_hdr_cor_err_cnt),
4586 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4588 access_tx_credit_overrun_err_cnt),
4589 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4591 access_tx_launch_fifo8_cor_err_cnt),
4592 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4594 access_tx_launch_fifo7_cor_err_cnt),
4595 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4597 access_tx_launch_fifo6_cor_err_cnt),
4598 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4600 access_tx_launch_fifo5_cor_err_cnt),
4601 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4603 access_tx_launch_fifo4_cor_err_cnt),
4604 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4606 access_tx_launch_fifo3_cor_err_cnt),
4607 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4609 access_tx_launch_fifo2_cor_err_cnt),
4610 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4612 access_tx_launch_fifo1_cor_err_cnt),
4613 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4615 access_tx_launch_fifo0_cor_err_cnt),
4616 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4618 access_tx_credit_return_vl_err_cnt),
4619 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4621 access_tx_hcrc_insertion_err_cnt),
4622 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4624 access_tx_egress_fifo_unc_err_cnt),
4625 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4627 access_tx_read_pio_memory_unc_err_cnt),
4628 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4630 access_tx_read_sdma_memory_unc_err_cnt),
4631 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4633 access_tx_sb_hdr_unc_err_cnt),
4634 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4636 access_tx_credit_return_partiy_err_cnt),
4637 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4639 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4640 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4642 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4643 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4645 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4646 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4648 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4649 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4651 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4652 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4654 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4655 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4657 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4658 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4660 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4661 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4663 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4664 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4666 access_tx_sdma15_disallowed_packet_err_cnt),
4667 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4669 access_tx_sdma14_disallowed_packet_err_cnt),
4670 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4672 access_tx_sdma13_disallowed_packet_err_cnt),
4673 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4675 access_tx_sdma12_disallowed_packet_err_cnt),
4676 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4678 access_tx_sdma11_disallowed_packet_err_cnt),
4679 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4681 access_tx_sdma10_disallowed_packet_err_cnt),
4682 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4684 access_tx_sdma9_disallowed_packet_err_cnt),
4685 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4687 access_tx_sdma8_disallowed_packet_err_cnt),
4688 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4690 access_tx_sdma7_disallowed_packet_err_cnt),
4691 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4693 access_tx_sdma6_disallowed_packet_err_cnt),
4694 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4696 access_tx_sdma5_disallowed_packet_err_cnt),
4697 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4699 access_tx_sdma4_disallowed_packet_err_cnt),
4700 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4702 access_tx_sdma3_disallowed_packet_err_cnt),
4703 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4705 access_tx_sdma2_disallowed_packet_err_cnt),
4706 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4708 access_tx_sdma1_disallowed_packet_err_cnt),
4709 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4711 access_tx_sdma0_disallowed_packet_err_cnt),
4712 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4714 access_tx_config_parity_err_cnt),
4715 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4717 access_tx_sbrd_ctl_csr_parity_err_cnt),
4718 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4720 access_tx_launch_csr_parity_err_cnt),
4721 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4723 access_tx_illegal_vl_err_cnt),
4724 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4725 "TxSbrdCtlStateMachineParityErr", 0, 0,
4727 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4728 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4730 access_egress_reserved_10_err_cnt),
4731 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4733 access_egress_reserved_9_err_cnt),
4734 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4736 access_tx_sdma_launch_intf_parity_err_cnt),
4737 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4739 access_tx_pio_launch_intf_parity_err_cnt),
4740 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4742 access_egress_reserved_6_err_cnt),
4743 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4745 access_tx_incorrect_link_state_err_cnt),
4746 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4748 access_tx_linkdown_err_cnt),
4749 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4750 "EgressFifoUnderrunOrParityErr", 0, 0,
4752 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4753 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4755 access_egress_reserved_2_err_cnt),
4756 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4758 access_tx_pkt_integrity_mem_unc_err_cnt),
4759 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4761 access_tx_pkt_integrity_mem_cor_err_cnt),
4763 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4765 access_send_csr_write_bad_addr_err_cnt),
4766 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4768 access_send_csr_read_bad_addr_err_cnt),
4769 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4771 access_send_csr_parity_cnt),
4772 /* SendCtxtErrStatus */
4773 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4775 access_pio_write_out_of_bounds_err_cnt),
4776 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4778 access_pio_write_overflow_err_cnt),
4779 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4781 access_pio_write_crosses_boundary_err_cnt),
4782 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4784 access_pio_disallowed_packet_err_cnt),
4785 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4787 access_pio_inconsistent_sop_err_cnt),
4788 /* SendDmaEngErrStatus */
4789 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4791 access_sdma_header_request_fifo_cor_err_cnt),
4792 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4794 access_sdma_header_storage_cor_err_cnt),
4795 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4797 access_sdma_packet_tracking_cor_err_cnt),
4798 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4800 access_sdma_assembly_cor_err_cnt),
4801 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4803 access_sdma_desc_table_cor_err_cnt),
4804 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4806 access_sdma_header_request_fifo_unc_err_cnt),
4807 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4809 access_sdma_header_storage_unc_err_cnt),
4810 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4812 access_sdma_packet_tracking_unc_err_cnt),
4813 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4815 access_sdma_assembly_unc_err_cnt),
4816 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4818 access_sdma_desc_table_unc_err_cnt),
4819 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4821 access_sdma_timeout_err_cnt),
4822 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4824 access_sdma_header_length_err_cnt),
4825 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4827 access_sdma_header_address_err_cnt),
4828 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4830 access_sdma_header_select_err_cnt),
4831 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4833 access_sdma_reserved_9_err_cnt),
4834 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4836 access_sdma_packet_desc_overflow_err_cnt),
4837 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4839 access_sdma_length_mismatch_err_cnt),
4840 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4842 access_sdma_halt_err_cnt),
4843 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4845 access_sdma_mem_read_err_cnt),
4846 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4848 access_sdma_first_desc_err_cnt),
4849 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4851 access_sdma_tail_out_of_bounds_err_cnt),
4852 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4854 access_sdma_too_long_err_cnt),
4855 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4857 access_sdma_gen_mismatch_err_cnt),
4858 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4860 access_sdma_wrong_dw_err_cnt),
4863 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4864 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4866 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4868 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4870 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4872 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4874 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4876 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4878 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4879 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4880 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4881 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4882 CNTR_SYNTH | CNTR_VL),
4883 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4884 CNTR_SYNTH | CNTR_VL),
4885 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4886 CNTR_SYNTH | CNTR_VL),
4887 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4888 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4889 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4890 access_sw_link_dn_cnt),
4891 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4892 access_sw_link_up_cnt),
4893 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4894 access_sw_unknown_frame_cnt),
4895 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4896 access_sw_xmit_discards),
4897 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4898 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4899 access_sw_xmit_discards),
4900 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4901 access_xmit_constraint_errs),
4902 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4903 access_rcv_constraint_errs),
4904 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4905 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4906 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4907 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4908 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4909 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4910 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4911 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4912 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4913 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4914 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4915 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4916 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4917 access_sw_cpu_rc_acks),
4918 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4919 access_sw_cpu_rc_qacks),
4920 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4921 access_sw_cpu_rc_delayed_comp),
4922 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4923 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4924 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4925 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4926 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4927 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4928 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4929 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4930 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4931 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4932 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4933 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4934 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
4935 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
4936 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
4937 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
4938 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
4939 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
4940 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
4941 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
4942 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
4943 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
4944 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
4945 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
4946 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
4947 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
4948 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
4949 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
4950 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
4951 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
4952 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
4953 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
4954 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
4955 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
4956 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
4957 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
4958 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
4959 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
4960 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
4961 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
4962 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
4963 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
4964 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
4965 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
4966 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
4967 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
4968 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
4969 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
4970 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
4971 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
4972 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
4973 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
4974 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
4975 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
4976 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
4977 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
4978 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
4979 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
4980 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
4981 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
4982 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
4983 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
4984 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
4985 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
4986 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
4987 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
4988 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
4989 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
4990 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
4991 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
4992 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
4993 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
4994 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
4995 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
4996 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
4997 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
4998 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
4999 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5000 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5001 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5004 /* ======================================================================== */
5006 /* return true if this is chip revision revision a */
5007 int is_ax(struct hfi1_devdata *dd)
5010 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5011 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5012 return (chip_rev_minor & 0xf0) == 0;
5015 /* return true if this is chip revision revision b */
5016 int is_bx(struct hfi1_devdata *dd)
5019 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5020 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5021 return (chip_rev_minor & 0xF0) == 0x10;
5025 * Append string s to buffer buf. Arguments curp and len are the current
5026 * position and remaining length, respectively.
5028 * return 0 on success, 1 on out of room
5030 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5034 int result = 0; /* success */
5037 /* add a comma, if first in the buffer */
5040 result = 1; /* out of room */
5047 /* copy the string */
5048 while ((c = *s++) != 0) {
5050 result = 1; /* out of room */
5058 /* write return values */
5066 * Using the given flag table, print a comma separated string into
5067 * the buffer. End in '*' if the buffer is too short.
5069 static char *flag_string(char *buf, int buf_len, u64 flags,
5070 struct flag_table *table, int table_size)
5078 /* make sure there is at least 2 so we can form "*" */
5082 len--; /* leave room for a nul */
5083 for (i = 0; i < table_size; i++) {
5084 if (flags & table[i].flag) {
5085 no_room = append_str(buf, &p, &len, table[i].str);
5088 flags &= ~table[i].flag;
5092 /* any undocumented bits left? */
5093 if (!no_room && flags) {
5094 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5095 no_room = append_str(buf, &p, &len, extra);
5098 /* add * if ran out of room */
5100 /* may need to back up to add space for a '*' */
5106 /* add final nul - space already allocated above */
5111 /* first 8 CCE error interrupt source names */
5112 static const char * const cce_misc_names[] = {
5113 "CceErrInt", /* 0 */
5114 "RxeErrInt", /* 1 */
5115 "MiscErrInt", /* 2 */
5116 "Reserved3", /* 3 */
5117 "PioErrInt", /* 4 */
5118 "SDmaErrInt", /* 5 */
5119 "EgressErrInt", /* 6 */
5124 * Return the miscellaneous error interrupt name.
5126 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5128 if (source < ARRAY_SIZE(cce_misc_names))
5129 strncpy(buf, cce_misc_names[source], bsize);
5134 source + IS_GENERAL_ERR_START);
5140 * Return the SDMA engine error interrupt name.
5142 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5144 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5149 * Return the send context error interrupt name.
5151 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5153 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5157 static const char * const various_names[] = {
5166 * Return the various interrupt name.
5168 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5170 if (source < ARRAY_SIZE(various_names))
5171 strncpy(buf, various_names[source], bsize);
5173 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5178 * Return the DC interrupt name.
5180 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5182 static const char * const dc_int_names[] = {
5186 "lbm" /* local block merge */
5189 if (source < ARRAY_SIZE(dc_int_names))
5190 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5192 snprintf(buf, bsize, "DCInt%u", source);
5196 static const char * const sdma_int_names[] = {
5203 * Return the SDMA engine interrupt name.
5205 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5207 /* what interrupt */
5208 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5210 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5212 if (likely(what < 3))
5213 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5215 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5220 * Return the receive available interrupt name.
5222 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5224 snprintf(buf, bsize, "RcvAvailInt%u", source);
5229 * Return the receive urgent interrupt name.
5231 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5233 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5238 * Return the send credit interrupt name.
5240 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5242 snprintf(buf, bsize, "SendCreditInt%u", source);
5247 * Return the reserved interrupt name.
5249 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5251 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5255 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5257 return flag_string(buf, buf_len, flags,
5258 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5261 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5263 return flag_string(buf, buf_len, flags,
5264 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5267 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5269 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5270 ARRAY_SIZE(misc_err_status_flags));
5273 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5275 return flag_string(buf, buf_len, flags,
5276 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5279 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5281 return flag_string(buf, buf_len, flags,
5282 sdma_err_status_flags,
5283 ARRAY_SIZE(sdma_err_status_flags));
5286 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5288 return flag_string(buf, buf_len, flags,
5289 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5292 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5294 return flag_string(buf, buf_len, flags,
5295 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5298 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5300 return flag_string(buf, buf_len, flags,
5301 send_err_status_flags,
5302 ARRAY_SIZE(send_err_status_flags));
5305 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5311 * For most these errors, there is nothing that can be done except
5312 * report or record it.
5314 dd_dev_info(dd, "CCE Error: %s\n",
5315 cce_err_status_string(buf, sizeof(buf), reg));
5317 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5318 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5319 /* this error requires a manual drop into SPC freeze mode */
5321 start_freeze_handling(dd->pport, FREEZE_SELF);
5324 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5325 if (reg & (1ull << i)) {
5326 incr_cntr64(&dd->cce_err_status_cnt[i]);
5327 /* maintain a counter over all cce_err_status errors */
5328 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5334 * Check counters for receive errors that do not have an interrupt
5335 * associated with them.
5337 #define RCVERR_CHECK_TIME 10
5338 static void update_rcverr_timer(unsigned long opaque)
5340 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5341 struct hfi1_pportdata *ppd = dd->pport;
5342 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5344 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5345 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5346 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5347 set_link_down_reason(ppd,
5348 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5349 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5350 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5352 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5354 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5357 static int init_rcverr(struct hfi1_devdata *dd)
5359 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5360 /* Assume the hardware counter has been reset */
5361 dd->rcv_ovfl_cnt = 0;
5362 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5365 static void free_rcverr(struct hfi1_devdata *dd)
5367 if (dd->rcverr_timer.data)
5368 del_timer_sync(&dd->rcverr_timer);
5369 dd->rcverr_timer.data = 0;
5372 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5377 dd_dev_info(dd, "Receive Error: %s\n",
5378 rxe_err_status_string(buf, sizeof(buf), reg));
5380 if (reg & ALL_RXE_FREEZE_ERR) {
5384 * Freeze mode recovery is disabled for the errors
5385 * in RXE_FREEZE_ABORT_MASK
5387 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5388 flags = FREEZE_ABORT;
5390 start_freeze_handling(dd->pport, flags);
5393 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5394 if (reg & (1ull << i))
5395 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5399 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5404 dd_dev_info(dd, "Misc Error: %s",
5405 misc_err_status_string(buf, sizeof(buf), reg));
5406 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5407 if (reg & (1ull << i))
5408 incr_cntr64(&dd->misc_err_status_cnt[i]);
5412 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5417 dd_dev_info(dd, "PIO Error: %s\n",
5418 pio_err_status_string(buf, sizeof(buf), reg));
5420 if (reg & ALL_PIO_FREEZE_ERR)
5421 start_freeze_handling(dd->pport, 0);
5423 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5424 if (reg & (1ull << i))
5425 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5429 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5434 dd_dev_info(dd, "SDMA Error: %s\n",
5435 sdma_err_status_string(buf, sizeof(buf), reg));
5437 if (reg & ALL_SDMA_FREEZE_ERR)
5438 start_freeze_handling(dd->pport, 0);
5440 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5441 if (reg & (1ull << i))
5442 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5446 static void count_port_inactive(struct hfi1_devdata *dd)
5448 struct hfi1_pportdata *ppd = dd->pport;
5450 if (ppd->port_xmit_discards < ~(u64)0)
5451 ppd->port_xmit_discards++;
5455 * We have had a "disallowed packet" error during egress. Determine the
5456 * integrity check which failed, and update relevant error counter, etc.
5458 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5459 * bit of state per integrity check, and so we can miss the reason for an
5460 * egress error if more than one packet fails the same integrity check
5461 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5463 static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5465 struct hfi1_pportdata *ppd = dd->pport;
5466 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5467 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5470 /* clear down all observed info as quickly as possible after read */
5471 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5474 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5475 info, egress_err_info_string(buf, sizeof(buf), info), src);
5477 /* Eventually add other counters for each bit */
5479 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5480 if (ppd->port_xmit_discards < ~(u64)0)
5481 ppd->port_xmit_discards++;
5486 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5487 * register. Does it represent a 'port inactive' error?
5489 static inline int port_inactive_err(u64 posn)
5491 return (posn >= SEES(TX_LINKDOWN) &&
5492 posn <= SEES(TX_INCORRECT_LINK_STATE));
5496 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5497 * register. Does it represent a 'disallowed packet' error?
5499 static inline int disallowed_pkt_err(u64 posn)
5501 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5502 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5505 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5507 u64 reg_copy = reg, handled = 0;
5511 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5512 start_freeze_handling(dd->pport, 0);
5513 if (is_ax(dd) && (reg &
5514 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5515 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5516 start_freeze_handling(dd->pport, 0);
5519 int posn = fls64(reg_copy);
5521 * fls64() returns a 1-based offset, but we generally
5522 * want 0-based offsets.
5524 int shift = posn - 1;
5526 if (port_inactive_err(shift)) {
5527 count_port_inactive(dd);
5528 handled |= (1ULL << shift);
5529 } else if (disallowed_pkt_err(shift)) {
5530 handle_send_egress_err_info(dd);
5531 handled |= (1ULL << shift);
5533 clear_bit(shift, (unsigned long *)®_copy);
5539 dd_dev_info(dd, "Egress Error: %s\n",
5540 egress_err_status_string(buf, sizeof(buf), reg));
5542 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5543 if (reg & (1ull << i))
5544 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5548 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5553 dd_dev_info(dd, "Send Error: %s\n",
5554 send_err_status_string(buf, sizeof(buf), reg));
5556 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5557 if (reg & (1ull << i))
5558 incr_cntr64(&dd->send_err_status_cnt[i]);
5563 * The maximum number of times the error clear down will loop before
5564 * blocking a repeating error. This value is arbitrary.
5566 #define MAX_CLEAR_COUNT 20
5569 * Clear and handle an error register. All error interrupts are funneled
5570 * through here to have a central location to correctly handle single-
5571 * or multi-shot errors.
5573 * For non per-context registers, call this routine with a context value
5574 * of 0 so the per-context offset is zero.
5576 * If the handler loops too many times, assume that something is wrong
5577 * and can't be fixed, so mask the error bits.
5579 static void interrupt_clear_down(struct hfi1_devdata *dd,
5581 const struct err_reg_info *eri)
5586 /* read in a loop until no more errors are seen */
5589 reg = read_kctxt_csr(dd, context, eri->status);
5592 write_kctxt_csr(dd, context, eri->clear, reg);
5593 if (likely(eri->handler))
5594 eri->handler(dd, context, reg);
5596 if (count > MAX_CLEAR_COUNT) {
5599 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5602 * Read-modify-write so any other masked bits
5605 mask = read_kctxt_csr(dd, context, eri->mask);
5607 write_kctxt_csr(dd, context, eri->mask, mask);
5614 * CCE block "misc" interrupt. Source is < 16.
5616 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5618 const struct err_reg_info *eri = &misc_errs[source];
5621 interrupt_clear_down(dd, 0, eri);
5623 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5628 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5630 return flag_string(buf, buf_len, flags,
5631 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5635 * Send context error interrupt. Source (hw_context) is < 160.
5637 * All send context errors cause the send context to halt. The normal
5638 * clear-down mechanism cannot be used because we cannot clear the
5639 * error bits until several other long-running items are done first.
5640 * This is OK because with the context halted, nothing else is going
5641 * to happen on it anyway.
5643 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5644 unsigned int hw_context)
5646 struct send_context_info *sci;
5647 struct send_context *sc;
5653 sw_index = dd->hw_to_sw[hw_context];
5654 if (sw_index >= dd->num_send_contexts) {
5656 "out of range sw index %u for send context %u\n",
5657 sw_index, hw_context);
5660 sci = &dd->send_contexts[sw_index];
5663 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5664 sw_index, hw_context);
5668 /* tell the software that a halt has begun */
5669 sc_stop(sc, SCF_HALTED);
5671 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5673 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5674 send_context_err_status_string(flags, sizeof(flags), status));
5676 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5677 handle_send_egress_err_info(dd);
5680 * Automatically restart halted kernel contexts out of interrupt
5681 * context. User contexts must ask the driver to restart the context.
5683 if (sc->type != SC_USER)
5684 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5687 * Update the counters for the corresponding status bits.
5688 * Note that these particular counters are aggregated over all
5691 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5692 if (status & (1ull << i))
5693 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5697 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5698 unsigned int source, u64 status)
5700 struct sdma_engine *sde;
5703 sde = &dd->per_sdma[source];
5704 #ifdef CONFIG_SDMA_VERBOSITY
5705 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5706 slashstrip(__FILE__), __LINE__, __func__);
5707 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5708 sde->this_idx, source, (unsigned long long)status);
5710 sdma_engine_error(sde, status);
5713 * Update the counters for the corresponding status bits.
5714 * Note that these particular counters are aggregated over
5715 * all 16 DMA engines.
5717 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5718 if (status & (1ull << i))
5719 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5724 * CCE block SDMA error interrupt. Source is < 16.
5726 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5728 #ifdef CONFIG_SDMA_VERBOSITY
5729 struct sdma_engine *sde = &dd->per_sdma[source];
5731 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5732 slashstrip(__FILE__), __LINE__, __func__);
5733 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5735 sdma_dumpstate(sde);
5737 interrupt_clear_down(dd, source, &sdma_eng_err);
5741 * CCE block "various" interrupt. Source is < 8.
5743 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5745 const struct err_reg_info *eri = &various_err[source];
5748 * TCritInt cannot go through interrupt_clear_down()
5749 * because it is not a second tier interrupt. The handler
5750 * should be called directly.
5752 if (source == TCRIT_INT_SOURCE)
5753 handle_temp_err(dd);
5754 else if (eri->handler)
5755 interrupt_clear_down(dd, 0, eri);
5758 "%s: Unimplemented/reserved interrupt %d\n",
5762 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5764 /* source is always zero */
5765 struct hfi1_pportdata *ppd = dd->pport;
5766 unsigned long flags;
5767 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5769 if (reg & QSFP_HFI0_MODPRST_N) {
5771 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5774 if (!qsfp_mod_present(ppd)) {
5775 ppd->driver_link_ready = 0;
5777 * Cable removed, reset all our information about the
5778 * cache and cable capabilities
5781 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5783 * We don't set cache_refresh_required here as we expect
5784 * an interrupt when a cable is inserted
5786 ppd->qsfp_info.cache_valid = 0;
5787 ppd->qsfp_info.qsfp_interrupt_functional = 0;
5788 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5795 if (ppd->host_link_state == HLS_DN_POLL) {
5797 * The link is still in POLL. This means
5798 * that the normal link down processing
5799 * will not happen. We have to do it here
5800 * before turning the DC off.
5802 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5805 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5806 ppd->qsfp_info.cache_valid = 0;
5807 ppd->qsfp_info.cache_refresh_required = 1;
5808 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5811 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
5820 if (reg & QSFP_HFI0_INT_N) {
5822 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5824 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5825 ppd->qsfp_info.check_interrupt_flags = 1;
5826 ppd->qsfp_info.qsfp_interrupt_functional = 1;
5827 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5830 /* Schedule the QSFP work only if there is a cable attached. */
5831 if (qsfp_mod_present(ppd))
5832 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5835 static int request_host_lcb_access(struct hfi1_devdata *dd)
5839 ret = do_8051_command(dd, HCMD_MISC,
5840 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5842 if (ret != HCMD_SUCCESS) {
5843 dd_dev_err(dd, "%s: command failed with error %d\n",
5846 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5849 static int request_8051_lcb_access(struct hfi1_devdata *dd)
5853 ret = do_8051_command(dd, HCMD_MISC,
5854 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5856 if (ret != HCMD_SUCCESS) {
5857 dd_dev_err(dd, "%s: command failed with error %d\n",
5860 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5864 * Set the LCB selector - allow host access. The DCC selector always
5865 * points to the host.
5867 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5869 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5870 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5871 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5875 * Clear the LCB selector - allow 8051 access. The DCC selector always
5876 * points to the host.
5878 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5880 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5881 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5885 * Acquire LCB access from the 8051. If the host already has access,
5886 * just increment a counter. Otherwise, inform the 8051 that the
5887 * host is taking access.
5891 * -EBUSY if the 8051 has control and cannot be disturbed
5892 * -errno if unable to acquire access from the 8051
5894 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5896 struct hfi1_pportdata *ppd = dd->pport;
5900 * Use the host link state lock so the operation of this routine
5901 * { link state check, selector change, count increment } can occur
5902 * as a unit against a link state change. Otherwise there is a
5903 * race between the state change and the count increment.
5906 mutex_lock(&ppd->hls_lock);
5908 while (!mutex_trylock(&ppd->hls_lock))
5912 /* this access is valid only when the link is up */
5913 if ((ppd->host_link_state & HLS_UP) == 0) {
5914 dd_dev_info(dd, "%s: link state %s not up\n",
5915 __func__, link_state_name(ppd->host_link_state));
5920 if (dd->lcb_access_count == 0) {
5921 ret = request_host_lcb_access(dd);
5924 "%s: unable to acquire LCB access, err %d\n",
5928 set_host_lcb_access(dd);
5930 dd->lcb_access_count++;
5932 mutex_unlock(&ppd->hls_lock);
5937 * Release LCB access by decrementing the use count. If the count is moving
5938 * from 1 to 0, inform 8051 that it has control back.
5942 * -errno if unable to release access to the 8051
5944 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5949 * Use the host link state lock because the acquire needed it.
5950 * Here, we only need to keep { selector change, count decrement }
5954 mutex_lock(&dd->pport->hls_lock);
5956 while (!mutex_trylock(&dd->pport->hls_lock))
5960 if (dd->lcb_access_count == 0) {
5961 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
5966 if (dd->lcb_access_count == 1) {
5967 set_8051_lcb_access(dd);
5968 ret = request_8051_lcb_access(dd);
5971 "%s: unable to release LCB access, err %d\n",
5973 /* restore host access if the grant didn't work */
5974 set_host_lcb_access(dd);
5978 dd->lcb_access_count--;
5980 mutex_unlock(&dd->pport->hls_lock);
5985 * Initialize LCB access variables and state. Called during driver load,
5986 * after most of the initialization is finished.
5988 * The DC default is LCB access on for the host. The driver defaults to
5989 * leaving access to the 8051. Assign access now - this constrains the call
5990 * to this routine to be after all LCB set-up is done. In particular, after
5991 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
5993 static void init_lcb_access(struct hfi1_devdata *dd)
5995 dd->lcb_access_count = 0;
5999 * Write a response back to a 8051 request.
6001 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6003 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6004 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6005 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6006 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6010 * Handle requests from the 8051.
6012 static void handle_8051_request(struct hfi1_devdata *dd)
6018 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6019 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6020 return; /* no request */
6022 /* zero out COMPLETED so the response is seen */
6023 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6025 /* extract request details */
6026 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6027 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6028 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6029 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6032 case HREQ_LOAD_CONFIG:
6033 case HREQ_SAVE_CONFIG:
6034 case HREQ_READ_CONFIG:
6035 case HREQ_SET_TX_EQ_ABS:
6036 case HREQ_SET_TX_EQ_REL:
6038 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6040 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6043 case HREQ_CONFIG_DONE:
6044 hreq_response(dd, HREQ_SUCCESS, 0);
6047 case HREQ_INTERFACE_TEST:
6048 hreq_response(dd, HREQ_SUCCESS, data);
6052 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6053 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6058 static void write_global_credit(struct hfi1_devdata *dd,
6059 u8 vau, u16 total, u16 shared)
6061 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6063 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6065 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6066 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6070 * Set up initial VL15 credits of the remote. Assumes the rest of
6071 * the CM credit registers are zero from a previous global or credit reset .
6073 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6075 /* leave shared count at zero for both global and VL15 */
6076 write_global_credit(dd, vau, vl15buf, 0);
6078 /* We may need some credits for another VL when sending packets
6079 * with the snoop interface. Dividing it down the middle for VL15
6080 * and VL0 should suffice.
6082 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6083 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6084 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6085 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6086 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6088 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6089 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6094 * Zero all credit details from the previous connection and
6095 * reset the CM manager's internal counters.
6097 void reset_link_credits(struct hfi1_devdata *dd)
6101 /* remove all previous VL credit limits */
6102 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6103 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6104 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6105 write_global_credit(dd, 0, 0, 0);
6106 /* reset the CM block */
6107 pio_send_control(dd, PSC_CM_RESET);
6110 /* convert a vCU to a CU */
6111 static u32 vcu_to_cu(u8 vcu)
6116 /* convert a CU to a vCU */
6117 static u8 cu_to_vcu(u32 cu)
6122 /* convert a vAU to an AU */
6123 static u32 vau_to_au(u8 vau)
6125 return 8 * (1 << vau);
6128 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6130 ppd->sm_trap_qp = 0x0;
6135 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6137 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6141 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6142 write_csr(dd, DC_LCB_CFG_RUN, 0);
6143 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6144 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6145 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6146 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6147 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6148 reg = read_csr(dd, DCC_CFG_RESET);
6149 write_csr(dd, DCC_CFG_RESET,
6151 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6152 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6153 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6155 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6156 write_csr(dd, DCC_CFG_RESET, reg);
6157 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6162 * This routine should be called after the link has been transitioned to
6163 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6166 * The expectation is that the caller of this routine would have taken
6167 * care of properly transitioning the link into the correct state.
6169 static void dc_shutdown(struct hfi1_devdata *dd)
6171 unsigned long flags;
6173 spin_lock_irqsave(&dd->dc8051_lock, flags);
6174 if (dd->dc_shutdown) {
6175 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6178 dd->dc_shutdown = 1;
6179 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6180 /* Shutdown the LCB */
6181 lcb_shutdown(dd, 1);
6182 /* Going to OFFLINE would have causes the 8051 to put the
6183 * SerDes into reset already. Just need to shut down the 8051,
6185 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6188 /* Calling this after the DC has been brought out of reset should not
6190 static void dc_start(struct hfi1_devdata *dd)
6192 unsigned long flags;
6195 spin_lock_irqsave(&dd->dc8051_lock, flags);
6196 if (!dd->dc_shutdown)
6198 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6199 /* Take the 8051 out of reset */
6200 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6201 /* Wait until 8051 is ready */
6202 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6204 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6207 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6208 write_csr(dd, DCC_CFG_RESET, 0x10);
6209 /* lcb_shutdown() with abort=1 does not restore these */
6210 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6211 spin_lock_irqsave(&dd->dc8051_lock, flags);
6212 dd->dc_shutdown = 0;
6214 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6218 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6220 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6222 u64 rx_radr, tx_radr;
6225 if (dd->icode != ICODE_FPGA_EMULATION)
6229 * These LCB defaults on emulator _s are good, nothing to do here:
6230 * LCB_CFG_TX_FIFOS_RADR
6231 * LCB_CFG_RX_FIFOS_RADR
6233 * LCB_CFG_IGNORE_LOST_RCLK
6235 if (is_emulator_s(dd))
6237 /* else this is _p */
6239 version = emulator_rev(dd);
6241 version = 0x2d; /* all B0 use 0x2d or higher settings */
6243 if (version <= 0x12) {
6244 /* release 0x12 and below */
6247 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6248 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6249 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6252 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6253 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6254 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6256 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6257 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6259 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6260 } else if (version <= 0x18) {
6261 /* release 0x13 up to 0x18 */
6262 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6264 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6265 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6266 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6267 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6268 } else if (version == 0x19) {
6270 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6272 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6273 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6274 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6275 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6276 } else if (version == 0x1a) {
6278 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6280 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6281 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6282 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6283 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6284 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6286 /* release 0x1b and higher */
6287 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6289 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6290 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6291 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6292 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6295 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6296 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6297 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6298 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6299 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6303 * Handle a SMA idle message
6305 * This is a work-queue function outside of the interrupt.
6307 void handle_sma_message(struct work_struct *work)
6309 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6311 struct hfi1_devdata *dd = ppd->dd;
6315 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6317 ret = read_idle_sma(dd, &msg);
6320 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6322 * React to the SMA message. Byte[1] (0 for us) is the command.
6324 switch (msg & 0xff) {
6327 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6330 * Only expected in INIT or ARMED, discard otherwise.
6332 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6333 ppd->neighbor_normal = 1;
6335 case SMA_IDLE_ACTIVE:
6337 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6340 * Can activate the node. Discard otherwise.
6342 if (ppd->host_link_state == HLS_UP_ARMED
6343 && ppd->is_active_optimize_enabled) {
6344 ppd->neighbor_normal = 1;
6345 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6349 "%s: received Active SMA idle message, couldn't set link to Active\n",
6355 "%s: received unexpected SMA idle message 0x%llx\n",
6361 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6364 unsigned long flags;
6366 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6367 rcvctrl = read_csr(dd, RCV_CTRL);
6370 write_csr(dd, RCV_CTRL, rcvctrl);
6371 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6374 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6376 adjust_rcvctrl(dd, add, 0);
6379 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6381 adjust_rcvctrl(dd, 0, clear);
6385 * Called from all interrupt handlers to start handling an SPC freeze.
6387 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6389 struct hfi1_devdata *dd = ppd->dd;
6390 struct send_context *sc;
6393 if (flags & FREEZE_SELF)
6394 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6396 /* enter frozen mode */
6397 dd->flags |= HFI1_FROZEN;
6399 /* notify all SDMA engines that they are going into a freeze */
6400 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6402 /* do halt pre-handling on all enabled send contexts */
6403 for (i = 0; i < dd->num_send_contexts; i++) {
6404 sc = dd->send_contexts[i].sc;
6405 if (sc && (sc->flags & SCF_ENABLED))
6406 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6409 /* Send context are frozen. Notify user space */
6410 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6412 if (flags & FREEZE_ABORT) {
6414 "Aborted freeze recovery. Please REBOOT system\n");
6417 /* queue non-interrupt handler */
6418 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6422 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6423 * depending on the "freeze" parameter.
6425 * No need to return an error if it times out, our only option
6426 * is to proceed anyway.
6428 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6430 unsigned long timeout;
6433 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6435 reg = read_csr(dd, CCE_STATUS);
6437 /* waiting until all indicators are set */
6438 if ((reg & ALL_FROZE) == ALL_FROZE)
6439 return; /* all done */
6441 /* waiting until all indicators are clear */
6442 if ((reg & ALL_FROZE) == 0)
6443 return; /* all done */
6446 if (time_after(jiffies, timeout)) {
6448 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6451 freeze ? ALL_FROZE : 0ull);
6454 usleep_range(80, 120);
6459 * Do all freeze handling for the RXE block.
6461 static void rxe_freeze(struct hfi1_devdata *dd)
6466 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6468 /* disable all receive contexts */
6469 for (i = 0; i < dd->num_rcv_contexts; i++)
6470 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6474 * Unfreeze handling for the RXE block - kernel contexts only.
6475 * This will also enable the port. User contexts will do unfreeze
6476 * handling on a per-context basis as they call into the driver.
6479 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6483 /* enable all kernel contexts */
6484 for (i = 0; i < dd->n_krcv_queues; i++)
6485 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6488 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6492 * Non-interrupt SPC freeze handling.
6494 * This is a work-queue function outside of the triggering interrupt.
6496 void handle_freeze(struct work_struct *work)
6498 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6500 struct hfi1_devdata *dd = ppd->dd;
6502 /* wait for freeze indicators on all affected blocks */
6503 wait_for_freeze_status(dd, 1);
6505 /* SPC is now frozen */
6507 /* do send PIO freeze steps */
6510 /* do send DMA freeze steps */
6513 /* do send egress freeze steps - nothing to do */
6515 /* do receive freeze steps */
6519 * Unfreeze the hardware - clear the freeze, wait for each
6520 * block's frozen bit to clear, then clear the frozen flag.
6522 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6523 wait_for_freeze_status(dd, 0);
6526 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6527 wait_for_freeze_status(dd, 1);
6528 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6529 wait_for_freeze_status(dd, 0);
6532 /* do send PIO unfreeze steps for kernel contexts */
6533 pio_kernel_unfreeze(dd);
6535 /* do send DMA unfreeze steps */
6538 /* do send egress unfreeze steps - nothing to do */
6540 /* do receive unfreeze steps for kernel contexts */
6541 rxe_kernel_unfreeze(dd);
6544 * The unfreeze procedure touches global device registers when
6545 * it disables and re-enables RXE. Mark the device unfrozen
6546 * after all that is done so other parts of the driver waiting
6547 * for the device to unfreeze don't do things out of order.
6549 * The above implies that the meaning of HFI1_FROZEN flag is
6550 * "Device has gone into freeze mode and freeze mode handling
6551 * is still in progress."
6553 * The flag will be removed when freeze mode processing has
6556 dd->flags &= ~HFI1_FROZEN;
6557 wake_up(&dd->event_queue);
6559 /* no longer frozen */
6563 * Handle a link up interrupt from the 8051.
6565 * This is a work-queue function outside of the interrupt.
6567 void handle_link_up(struct work_struct *work)
6569 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6571 set_link_state(ppd, HLS_UP_INIT);
6573 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6574 read_ltp_rtt(ppd->dd);
6576 * OPA specifies that certain counters are cleared on a transition
6577 * to link up, so do that.
6579 clear_linkup_counters(ppd->dd);
6581 * And (re)set link up default values.
6583 set_linkup_defaults(ppd);
6585 /* enforce link speed enabled */
6586 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6587 /* oops - current speed is not enabled, bounce */
6589 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6590 ppd->link_speed_active, ppd->link_speed_enabled);
6591 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6592 OPA_LINKDOWN_REASON_SPEED_POLICY);
6593 set_link_state(ppd, HLS_DN_OFFLINE);
6598 /* Several pieces of LNI information were cached for SMA in ppd.
6599 * Reset these on link down */
6600 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6602 ppd->neighbor_guid = 0;
6603 ppd->neighbor_port_number = 0;
6604 ppd->neighbor_type = 0;
6605 ppd->neighbor_fm_security = 0;
6609 * Handle a link down interrupt from the 8051.
6611 * This is a work-queue function outside of the interrupt.
6613 void handle_link_down(struct work_struct *work)
6615 u8 lcl_reason, neigh_reason = 0;
6616 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6619 /* go offline first, then deal with reasons */
6620 set_link_state(ppd, HLS_DN_OFFLINE);
6623 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6626 * If no reason, assume peer-initiated but missed
6627 * LinkGoingDown idle flits.
6629 if (neigh_reason == 0)
6630 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6632 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6634 reset_neighbor_info(ppd);
6636 /* disable the port */
6637 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6639 /* If there is no cable attached, turn the DC off. Otherwise,
6640 * start the link bring up. */
6641 if (!qsfp_mod_present(ppd))
6642 dc_shutdown(ppd->dd);
6647 void handle_link_bounce(struct work_struct *work)
6649 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6653 * Only do something if the link is currently up.
6655 if (ppd->host_link_state & HLS_UP) {
6656 set_link_state(ppd, HLS_DN_OFFLINE);
6659 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6660 __func__, link_state_name(ppd->host_link_state));
6665 * Mask conversion: Capability exchange to Port LTP. The capability
6666 * exchange has an implicit 16b CRC that is mandatory.
6668 static int cap_to_port_ltp(int cap)
6670 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6672 if (cap & CAP_CRC_14B)
6673 port_ltp |= PORT_LTP_CRC_MODE_14;
6674 if (cap & CAP_CRC_48B)
6675 port_ltp |= PORT_LTP_CRC_MODE_48;
6676 if (cap & CAP_CRC_12B_16B_PER_LANE)
6677 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6683 * Convert an OPA Port LTP mask to capability mask
6685 int port_ltp_to_cap(int port_ltp)
6689 if (port_ltp & PORT_LTP_CRC_MODE_14)
6690 cap_mask |= CAP_CRC_14B;
6691 if (port_ltp & PORT_LTP_CRC_MODE_48)
6692 cap_mask |= CAP_CRC_48B;
6693 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6694 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6700 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6702 static int lcb_to_port_ltp(int lcb_crc)
6706 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6707 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6708 else if (lcb_crc == LCB_CRC_48B)
6709 port_ltp = PORT_LTP_CRC_MODE_48;
6710 else if (lcb_crc == LCB_CRC_14B)
6711 port_ltp = PORT_LTP_CRC_MODE_14;
6713 port_ltp = PORT_LTP_CRC_MODE_16;
6719 * Our neighbor has indicated that we are allowed to act as a fabric
6720 * manager, so place the full management partition key in the second
6721 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6722 * that we should already have the limited management partition key in
6723 * array element 1, and also that the port is not yet up when
6724 * add_full_mgmt_pkey() is invoked.
6726 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6728 struct hfi1_devdata *dd = ppd->dd;
6730 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6731 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6732 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6733 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6734 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6735 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6739 * Convert the given link width to the OPA link width bitmask.
6741 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6746 * Simulator and quick linkup do not set the width.
6747 * Just set it to 4x without complaint.
6749 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6750 return OPA_LINK_WIDTH_4X;
6751 return 0; /* no lanes up */
6752 case 1: return OPA_LINK_WIDTH_1X;
6753 case 2: return OPA_LINK_WIDTH_2X;
6754 case 3: return OPA_LINK_WIDTH_3X;
6756 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6759 case 4: return OPA_LINK_WIDTH_4X;
6764 * Do a population count on the bottom nibble.
6766 static const u8 bit_counts[16] = {
6767 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6769 static inline u8 nibble_to_count(u8 nibble)
6771 return bit_counts[nibble & 0xf];
6775 * Read the active lane information from the 8051 registers and return
6778 * Active lane information is found in these 8051 registers:
6782 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6788 u8 tx_polarity_inversion;
6789 u8 rx_polarity_inversion;
6792 /* read the active lanes */
6793 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6794 &rx_polarity_inversion, &max_rate);
6795 read_local_lni(dd, &enable_lane_rx);
6797 /* convert to counts */
6798 tx = nibble_to_count(enable_lane_tx);
6799 rx = nibble_to_count(enable_lane_rx);
6802 * Set link_speed_active here, overriding what was set in
6803 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6804 * set the max_rate field in handle_verify_cap until v0.19.
6806 if ((dd->icode == ICODE_RTL_SILICON)
6807 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6808 /* max_rate: 0 = 12.5G, 1 = 25G */
6811 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6815 "%s: unexpected max rate %d, using 25Gb\n",
6816 __func__, (int)max_rate);
6819 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6825 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6826 enable_lane_tx, tx, enable_lane_rx, rx);
6827 *tx_width = link_width_to_bits(dd, tx);
6828 *rx_width = link_width_to_bits(dd, rx);
6832 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6833 * Valid after the end of VerifyCap and during LinkUp. Does not change
6834 * after link up. I.e. look elsewhere for downgrade information.
6837 * + bits [7:4] contain the number of active transmitters
6838 * + bits [3:0] contain the number of active receivers
6839 * These are numbers 1 through 4 and can be different values if the
6840 * link is asymmetric.
6842 * verify_cap_local_fm_link_width[0] retains its original value.
6844 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6848 u8 misc_bits, local_flags;
6849 u16 active_tx, active_rx;
6851 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6853 rx = (widths >> 8) & 0xf;
6855 *tx_width = link_width_to_bits(dd, tx);
6856 *rx_width = link_width_to_bits(dd, rx);
6858 /* print the active widths */
6859 get_link_widths(dd, &active_tx, &active_rx);
6863 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6864 * hardware information when the link first comes up.
6866 * The link width is not available until after VerifyCap.AllFramesReceived
6867 * (the trigger for handle_verify_cap), so this is outside that routine
6868 * and should be called when the 8051 signals linkup.
6870 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6872 u16 tx_width, rx_width;
6874 /* get end-of-LNI link widths */
6875 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6877 /* use tx_width as the link is supposed to be symmetric on link up */
6878 ppd->link_width_active = tx_width;
6879 /* link width downgrade active (LWD.A) starts out matching LW.A */
6880 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6881 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6882 /* per OPA spec, on link up LWD.E resets to LWD.S */
6883 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6884 /* cache the active egress rate (units {10^6 bits/sec]) */
6885 ppd->current_egress_rate = active_egress_rate(ppd);
6889 * Handle a verify capabilities interrupt from the 8051.
6891 * This is a work-queue function outside of the interrupt.
6893 void handle_verify_cap(struct work_struct *work)
6895 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6897 struct hfi1_devdata *dd = ppd->dd;
6899 u8 power_management;
6909 u16 active_tx, active_rx;
6910 u8 partner_supported_crc;
6914 set_link_state(ppd, HLS_VERIFY_CAP);
6916 lcb_shutdown(dd, 0);
6917 adjust_lcb_for_fpga_serdes(dd);
6920 * These are now valid:
6921 * remote VerifyCap fields in the general LNI config
6922 * CSR DC8051_STS_REMOTE_GUID
6923 * CSR DC8051_STS_REMOTE_NODE_TYPE
6924 * CSR DC8051_STS_REMOTE_FM_SECURITY
6925 * CSR DC8051_STS_REMOTE_PORT_NO
6928 read_vc_remote_phy(dd, &power_management, &continious);
6929 read_vc_remote_fabric(
6935 &partner_supported_crc);
6936 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
6937 read_remote_device_id(dd, &device_id, &device_rev);
6939 * And the 'MgmtAllowed' information, which is exchanged during
6940 * LNI, is also be available at this point.
6942 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
6943 /* print the active widths */
6944 get_link_widths(dd, &active_tx, &active_rx);
6946 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
6947 (int)power_management, (int)continious);
6949 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
6954 (int)partner_supported_crc);
6955 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
6956 (u32)remote_tx_rate, (u32)link_widths);
6957 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
6958 (u32)device_id, (u32)device_rev);
6960 * The peer vAU value just read is the peer receiver value. HFI does
6961 * not support a transmit vAU of 0 (AU == 8). We advertised that
6962 * with Z=1 in the fabric capabilities sent to the peer. The peer
6963 * will see our Z=1, and, if it advertised a vAU of 0, will move its
6964 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
6965 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
6966 * subject to the Z value exception.
6970 set_up_vl15(dd, vau, vl15buf);
6972 /* set up the LCB CRC mode */
6973 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
6975 /* order is important: use the lowest bit in common */
6976 if (crc_mask & CAP_CRC_14B)
6977 crc_val = LCB_CRC_14B;
6978 else if (crc_mask & CAP_CRC_48B)
6979 crc_val = LCB_CRC_48B;
6980 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
6981 crc_val = LCB_CRC_12B_16B_PER_LANE;
6983 crc_val = LCB_CRC_16B;
6985 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
6986 write_csr(dd, DC_LCB_CFG_CRC_MODE,
6987 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
6989 /* set (14b only) or clear sideband credit */
6990 reg = read_csr(dd, SEND_CM_CTRL);
6991 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
6992 write_csr(dd, SEND_CM_CTRL,
6993 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6995 write_csr(dd, SEND_CM_CTRL,
6996 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6999 ppd->link_speed_active = 0; /* invalid value */
7000 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7001 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7002 switch (remote_tx_rate) {
7004 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7007 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7011 /* actual rate is highest bit of the ANDed rates */
7012 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7015 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7017 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7019 if (ppd->link_speed_active == 0) {
7020 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7021 __func__, (int)remote_tx_rate);
7022 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7026 * Cache the values of the supported, enabled, and active
7027 * LTP CRC modes to return in 'portinfo' queries. But the bit
7028 * flags that are returned in the portinfo query differ from
7029 * what's in the link_crc_mask, crc_sizes, and crc_val
7030 * variables. Convert these here.
7032 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7033 /* supported crc modes */
7034 ppd->port_ltp_crc_mode |=
7035 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7036 /* enabled crc modes */
7037 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7038 /* active crc mode */
7040 /* set up the remote credit return table */
7041 assign_remote_cm_au_table(dd, vcu);
7044 * The LCB is reset on entry to handle_verify_cap(), so this must
7045 * be applied on every link up.
7047 * Adjust LCB error kill enable to kill the link if
7048 * these RBUF errors are seen:
7049 * REPLAY_BUF_MBE_SMASK
7050 * FLIT_INPUT_BUF_MBE_SMASK
7052 if (is_ax(dd)) { /* fixed in B0 */
7053 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7054 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7055 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7056 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7059 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7060 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7062 /* give 8051 access to the LCB CSRs */
7063 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7064 set_8051_lcb_access(dd);
7066 ppd->neighbor_guid =
7067 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7068 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7069 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7070 ppd->neighbor_type =
7071 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7072 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7073 ppd->neighbor_fm_security =
7074 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7075 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7077 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7078 ppd->neighbor_guid, ppd->neighbor_type,
7079 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7080 if (ppd->mgmt_allowed)
7081 add_full_mgmt_pkey(ppd);
7083 /* tell the 8051 to go to LinkUp */
7084 set_link_state(ppd, HLS_GOING_UP);
7088 * Apply the link width downgrade enabled policy against the current active
7091 * Called when the enabled policy changes or the active link widths change.
7093 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7100 /* use the hls lock to avoid a race with actual link up */
7103 mutex_lock(&ppd->hls_lock);
7104 /* only apply if the link is up */
7105 if (!(ppd->host_link_state & HLS_UP)) {
7106 /* still going up..wait and retry */
7107 if (ppd->host_link_state & HLS_GOING_UP) {
7108 if (++tries < 1000) {
7109 mutex_unlock(&ppd->hls_lock);
7110 usleep_range(100, 120); /* arbitrary */
7114 "%s: giving up waiting for link state change\n",
7120 lwde = ppd->link_width_downgrade_enabled;
7122 if (refresh_widths) {
7123 get_link_widths(ppd->dd, &tx, &rx);
7124 ppd->link_width_downgrade_tx_active = tx;
7125 ppd->link_width_downgrade_rx_active = rx;
7129 /* downgrade is disabled */
7131 /* bounce if not at starting active width */
7132 if ((ppd->link_width_active !=
7133 ppd->link_width_downgrade_tx_active)
7134 || (ppd->link_width_active !=
7135 ppd->link_width_downgrade_rx_active)) {
7137 "Link downgrade is disabled and link has downgraded, downing link\n");
7139 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7140 ppd->link_width_active,
7141 ppd->link_width_downgrade_tx_active,
7142 ppd->link_width_downgrade_rx_active);
7145 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7146 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7147 /* Tx or Rx is outside the enabled policy */
7149 "Link is outside of downgrade allowed, downing link\n");
7151 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7153 ppd->link_width_downgrade_tx_active,
7154 ppd->link_width_downgrade_rx_active);
7159 mutex_unlock(&ppd->hls_lock);
7162 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7163 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7164 set_link_state(ppd, HLS_DN_OFFLINE);
7170 * Handle a link downgrade interrupt from the 8051.
7172 * This is a work-queue function outside of the interrupt.
7174 void handle_link_downgrade(struct work_struct *work)
7176 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7177 link_downgrade_work);
7179 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7180 apply_link_downgrade_policy(ppd, 1);
7183 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7185 return flag_string(buf, buf_len, flags, dcc_err_flags,
7186 ARRAY_SIZE(dcc_err_flags));
7189 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7191 return flag_string(buf, buf_len, flags, lcb_err_flags,
7192 ARRAY_SIZE(lcb_err_flags));
7195 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7197 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7198 ARRAY_SIZE(dc8051_err_flags));
7201 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7203 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7204 ARRAY_SIZE(dc8051_info_err_flags));
7207 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7209 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7210 ARRAY_SIZE(dc8051_info_host_msg_flags));
7213 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7215 struct hfi1_pportdata *ppd = dd->pport;
7216 u64 info, err, host_msg;
7217 int queue_link_down = 0;
7220 /* look at the flags */
7221 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7222 /* 8051 information set by firmware */
7223 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7224 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7225 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7226 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7228 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7229 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7232 * Handle error flags.
7234 if (err & FAILED_LNI) {
7236 * LNI error indications are cleared by the 8051
7237 * only when starting polling. Only pay attention
7238 * to them when in the states that occur during
7241 if (ppd->host_link_state
7242 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7243 queue_link_down = 1;
7244 dd_dev_info(dd, "Link error: %s\n",
7245 dc8051_info_err_string(buf,
7249 err &= ~(u64)FAILED_LNI;
7251 /* unknown frames can happen durning LNI, just count */
7252 if (err & UNKNOWN_FRAME) {
7253 ppd->unknown_frame_count++;
7254 err &= ~(u64)UNKNOWN_FRAME;
7257 /* report remaining errors, but do not do anything */
7258 dd_dev_err(dd, "8051 info error: %s\n",
7259 dc8051_info_err_string(buf, sizeof(buf), err));
7263 * Handle host message flags.
7265 if (host_msg & HOST_REQ_DONE) {
7267 * Presently, the driver does a busy wait for
7268 * host requests to complete. This is only an
7269 * informational message.
7270 * NOTE: The 8051 clears the host message
7271 * information *on the next 8051 command*.
7272 * Therefore, when linkup is achieved,
7273 * this flag will still be set.
7275 host_msg &= ~(u64)HOST_REQ_DONE;
7277 if (host_msg & BC_SMA_MSG) {
7278 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7279 host_msg &= ~(u64)BC_SMA_MSG;
7281 if (host_msg & LINKUP_ACHIEVED) {
7282 dd_dev_info(dd, "8051: Link up\n");
7283 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7284 host_msg &= ~(u64)LINKUP_ACHIEVED;
7286 if (host_msg & EXT_DEVICE_CFG_REQ) {
7287 handle_8051_request(dd);
7288 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7290 if (host_msg & VERIFY_CAP_FRAME) {
7291 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7292 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7294 if (host_msg & LINK_GOING_DOWN) {
7295 const char *extra = "";
7296 /* no downgrade action needed if going down */
7297 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7298 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7299 extra = " (ignoring downgrade)";
7301 dd_dev_info(dd, "8051: Link down%s\n", extra);
7302 queue_link_down = 1;
7303 host_msg &= ~(u64)LINK_GOING_DOWN;
7305 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7306 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7307 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7310 /* report remaining messages, but do not do anything */
7311 dd_dev_info(dd, "8051 info host message: %s\n",
7312 dc8051_info_host_msg_string(buf, sizeof(buf),
7316 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7318 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7320 * Lost the 8051 heartbeat. If this happens, we
7321 * receive constant interrupts about it. Disable
7322 * the interrupt after the first.
7324 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7325 write_csr(dd, DC_DC8051_ERR_EN,
7326 read_csr(dd, DC_DC8051_ERR_EN)
7327 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7329 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7332 /* report the error, but do not do anything */
7333 dd_dev_err(dd, "8051 error: %s\n",
7334 dc8051_err_string(buf, sizeof(buf), reg));
7337 if (queue_link_down) {
7338 /* if the link is already going down or disabled, do not
7340 if ((ppd->host_link_state
7341 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7342 || ppd->link_enabled == 0) {
7343 dd_dev_info(dd, "%s: not queuing link down\n",
7346 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7351 static const char * const fm_config_txt[] = {
7353 "BadHeadDist: Distance violation between two head flits",
7355 "BadTailDist: Distance violation between two tail flits",
7357 "BadCtrlDist: Distance violation between two credit control flits",
7359 "BadCrdAck: Credits return for unsupported VL",
7361 "UnsupportedVLMarker: Received VL Marker",
7363 "BadPreempt: Exceeded the preemption nesting level",
7365 "BadControlFlit: Received unsupported control flit",
7368 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7371 static const char * const port_rcv_txt[] = {
7373 "BadPktLen: Illegal PktLen",
7375 "PktLenTooLong: Packet longer than PktLen",
7377 "PktLenTooShort: Packet shorter than PktLen",
7379 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7381 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7383 "BadL2: Illegal L2 opcode",
7385 "BadSC: Unsupported SC",
7387 "BadRC: Illegal RC",
7389 "PreemptError: Preempting with same VL",
7391 "PreemptVL15: Preempting a VL15 packet",
7394 #define OPA_LDR_FMCONFIG_OFFSET 16
7395 #define OPA_LDR_PORTRCV_OFFSET 0
7396 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7398 u64 info, hdr0, hdr1;
7401 struct hfi1_pportdata *ppd = dd->pport;
7405 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7406 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7407 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7408 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7409 /* set status bit */
7410 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7412 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7415 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7416 struct hfi1_pportdata *ppd = dd->pport;
7417 /* this counter saturates at (2^32) - 1 */
7418 if (ppd->link_downed < (u32)UINT_MAX)
7420 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7423 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7424 u8 reason_valid = 1;
7426 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7427 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7428 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7429 /* set status bit */
7430 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7440 extra = fm_config_txt[info];
7443 extra = fm_config_txt[info];
7444 if (ppd->port_error_action &
7445 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7448 * lcl_reason cannot be derived from info
7452 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7457 snprintf(buf, sizeof(buf), "reserved%lld", info);
7462 if (reason_valid && !do_bounce) {
7463 do_bounce = ppd->port_error_action &
7464 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7465 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7468 /* just report this */
7469 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7470 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7473 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7474 u8 reason_valid = 1;
7476 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7477 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7478 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7479 if (!(dd->err_info_rcvport.status_and_code &
7480 OPA_EI_STATUS_SMASK)) {
7481 dd->err_info_rcvport.status_and_code =
7482 info & OPA_EI_CODE_SMASK;
7483 /* set status bit */
7484 dd->err_info_rcvport.status_and_code |=
7485 OPA_EI_STATUS_SMASK;
7486 /* save first 2 flits in the packet that caused
7488 dd->err_info_rcvport.packet_flit1 = hdr0;
7489 dd->err_info_rcvport.packet_flit2 = hdr1;
7502 extra = port_rcv_txt[info];
7506 snprintf(buf, sizeof(buf), "reserved%lld", info);
7511 if (reason_valid && !do_bounce) {
7512 do_bounce = ppd->port_error_action &
7513 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7514 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7517 /* just report this */
7518 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7519 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7522 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7525 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7526 /* informative only */
7527 dd_dev_info(dd, "8051 access to LCB blocked\n");
7528 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7530 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7531 /* informative only */
7532 dd_dev_info(dd, "host access to LCB blocked\n");
7533 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7536 /* report any remaining errors */
7538 dd_dev_info(dd, "DCC Error: %s\n",
7539 dcc_err_string(buf, sizeof(buf), reg));
7541 if (lcl_reason == 0)
7542 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7545 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7546 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7547 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7551 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7555 dd_dev_info(dd, "LCB Error: %s\n",
7556 lcb_err_string(buf, sizeof(buf), reg));
7560 * CCE block DC interrupt. Source is < 8.
7562 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7564 const struct err_reg_info *eri = &dc_errs[source];
7567 interrupt_clear_down(dd, 0, eri);
7568 } else if (source == 3 /* dc_lbm_int */) {
7570 * This indicates that a parity error has occurred on the
7571 * address/control lines presented to the LBM. The error
7572 * is a single pulse, there is no associated error flag,
7573 * and it is non-maskable. This is because if a parity
7574 * error occurs on the request the request is dropped.
7575 * This should never occur, but it is nice to know if it
7578 dd_dev_err(dd, "Parity error in DC LBM block\n");
7580 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7585 * TX block send credit interrupt. Source is < 160.
7587 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7589 sc_group_release_update(dd, source);
7593 * TX block SDMA interrupt. Source is < 48.
7595 * SDMA interrupts are grouped by type:
7598 * N - 2N-1 = SDmaProgress
7599 * 2N - 3N-1 = SDmaIdle
7601 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7603 /* what interrupt */
7604 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7606 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7608 #ifdef CONFIG_SDMA_VERBOSITY
7609 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7610 slashstrip(__FILE__), __LINE__, __func__);
7611 sdma_dumpstate(&dd->per_sdma[which]);
7614 if (likely(what < 3 && which < dd->num_sdma)) {
7615 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7617 /* should not happen */
7618 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7623 * RX block receive available interrupt. Source is < 160.
7625 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7627 struct hfi1_ctxtdata *rcd;
7630 if (likely(source < dd->num_rcv_contexts)) {
7631 rcd = dd->rcd[source];
7633 if (source < dd->first_user_ctxt)
7634 rcd->do_interrupt(rcd, 0);
7636 handle_user_interrupt(rcd);
7639 /* received an interrupt, but no rcd */
7640 err_detail = "dataless";
7642 /* received an interrupt, but are not using that context */
7643 err_detail = "out of range";
7645 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7646 err_detail, source);
7650 * RX block receive urgent interrupt. Source is < 160.
7652 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7654 struct hfi1_ctxtdata *rcd;
7657 if (likely(source < dd->num_rcv_contexts)) {
7658 rcd = dd->rcd[source];
7660 /* only pay attention to user urgent interrupts */
7661 if (source >= dd->first_user_ctxt)
7662 handle_user_interrupt(rcd);
7665 /* received an interrupt, but no rcd */
7666 err_detail = "dataless";
7668 /* received an interrupt, but are not using that context */
7669 err_detail = "out of range";
7671 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7672 err_detail, source);
7676 * Reserved range interrupt. Should not be called in normal operation.
7678 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7682 dd_dev_err(dd, "unexpected %s interrupt\n",
7683 is_reserved_name(name, sizeof(name), source));
7686 static const struct is_table is_table[] = {
7688 name func interrupt func */
7689 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7690 is_misc_err_name, is_misc_err_int },
7691 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7692 is_sdma_eng_err_name, is_sdma_eng_err_int },
7693 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7694 is_sendctxt_err_name, is_sendctxt_err_int },
7695 { IS_SDMA_START, IS_SDMA_END,
7696 is_sdma_eng_name, is_sdma_eng_int },
7697 { IS_VARIOUS_START, IS_VARIOUS_END,
7698 is_various_name, is_various_int },
7699 { IS_DC_START, IS_DC_END,
7700 is_dc_name, is_dc_int },
7701 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7702 is_rcv_avail_name, is_rcv_avail_int },
7703 { IS_RCVURGENT_START, IS_RCVURGENT_END,
7704 is_rcv_urgent_name, is_rcv_urgent_int },
7705 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7706 is_send_credit_name, is_send_credit_int},
7707 { IS_RESERVED_START, IS_RESERVED_END,
7708 is_reserved_name, is_reserved_int},
7712 * Interrupt source interrupt - called when the given source has an interrupt.
7713 * Source is a bit index into an array of 64-bit integers.
7715 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7717 const struct is_table *entry;
7719 /* avoids a double compare by walking the table in-order */
7720 for (entry = &is_table[0]; entry->is_name; entry++) {
7721 if (source < entry->end) {
7722 trace_hfi1_interrupt(dd, entry, source);
7723 entry->is_int(dd, source - entry->start);
7727 /* fell off the end */
7728 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7732 * General interrupt handler. This is able to correctly handle
7733 * all interrupts in case INTx is used.
7735 static irqreturn_t general_interrupt(int irq, void *data)
7737 struct hfi1_devdata *dd = data;
7738 u64 regs[CCE_NUM_INT_CSRS];
7742 this_cpu_inc(*dd->int_counter);
7744 /* phase 1: scan and clear all handled interrupts */
7745 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7746 if (dd->gi_mask[i] == 0) {
7747 regs[i] = 0; /* used later */
7750 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7752 /* only clear if anything is set */
7754 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7757 /* phase 2: call the appropriate handler */
7758 for_each_set_bit(bit, (unsigned long *)®s[0],
7759 CCE_NUM_INT_CSRS*64) {
7760 is_interrupt(dd, bit);
7766 static irqreturn_t sdma_interrupt(int irq, void *data)
7768 struct sdma_engine *sde = data;
7769 struct hfi1_devdata *dd = sde->dd;
7772 #ifdef CONFIG_SDMA_VERBOSITY
7773 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7774 slashstrip(__FILE__), __LINE__, __func__);
7775 sdma_dumpstate(sde);
7778 this_cpu_inc(*dd->int_counter);
7780 /* This read_csr is really bad in the hot path */
7781 status = read_csr(dd,
7782 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7784 if (likely(status)) {
7785 /* clear the interrupt(s) */
7787 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7790 /* handle the interrupt(s) */
7791 sdma_engine_interrupt(sde, status);
7793 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7800 * Clear the receive interrupt, forcing the write and making sure
7801 * we have data from the chip, pushing everything in front of it
7804 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7806 struct hfi1_devdata *dd = rcd->dd;
7807 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7809 mmiowb(); /* make sure everything before is written */
7810 write_csr(dd, addr, rcd->imask);
7811 /* force the above write on the chip and get a value back */
7812 (void)read_csr(dd, addr);
7815 /* force the receive interrupt */
7816 static inline void force_recv_intr(struct hfi1_ctxtdata *rcd)
7818 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7821 /* return non-zero if a packet is present */
7822 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7824 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7825 return (rcd->seq_cnt ==
7826 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7828 /* else is RDMA rtail */
7829 return (rcd->head != get_rcvhdrtail(rcd));
7833 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7834 * This routine will try to handle packets immediately (latency), but if
7835 * it finds too many, it will invoke the thread handler (bandwitdh). The
7836 * chip receive interupt is *not* cleared down until this or the thread (if
7837 * invoked) is finished. The intent is to avoid extra interrupts while we
7838 * are processing packets anyway.
7840 static irqreturn_t receive_context_interrupt(int irq, void *data)
7842 struct hfi1_ctxtdata *rcd = data;
7843 struct hfi1_devdata *dd = rcd->dd;
7847 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7848 this_cpu_inc(*dd->int_counter);
7850 /* receive interrupt remains blocked while processing packets */
7851 disposition = rcd->do_interrupt(rcd, 0);
7854 * Too many packets were seen while processing packets in this
7855 * IRQ handler. Invoke the handler thread. The receive interrupt
7858 if (disposition == RCV_PKT_LIMIT)
7859 return IRQ_WAKE_THREAD;
7862 * The packet processor detected no more packets. Clear the receive
7863 * interrupt and recheck for a packet packet that may have arrived
7864 * after the previous check and interrupt clear. If a packet arrived,
7865 * force another interrupt.
7867 clear_recv_intr(rcd);
7868 present = check_packet_present(rcd);
7870 force_recv_intr(rcd);
7876 * Receive packet thread handler. This expects to be invoked with the
7877 * receive interrupt still blocked.
7879 static irqreturn_t receive_context_thread(int irq, void *data)
7881 struct hfi1_ctxtdata *rcd = data;
7884 /* receive interrupt is still blocked from the IRQ handler */
7885 (void)rcd->do_interrupt(rcd, 1);
7888 * The packet processor will only return if it detected no more
7889 * packets. Hold IRQs here so we can safely clear the interrupt and
7890 * recheck for a packet that may have arrived after the previous
7891 * check and the interrupt clear. If a packet arrived, force another
7894 local_irq_disable();
7895 clear_recv_intr(rcd);
7896 present = check_packet_present(rcd);
7898 force_recv_intr(rcd);
7904 /* ========================================================================= */
7906 u32 read_physical_state(struct hfi1_devdata *dd)
7910 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
7911 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
7912 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
7915 static u32 read_logical_state(struct hfi1_devdata *dd)
7919 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7920 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
7921 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
7924 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
7928 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7929 /* clear current state, set new state */
7930 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
7931 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
7932 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
7936 * Use the 8051 to read a LCB CSR.
7938 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
7943 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
7944 if (acquire_lcb_access(dd, 0) == 0) {
7945 *data = read_csr(dd, addr);
7946 release_lcb_access(dd, 0);
7952 /* register is an index of LCB registers: (offset - base) / 8 */
7953 regno = (addr - DC_LCB_CFG_RUN) >> 3;
7954 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
7955 if (ret != HCMD_SUCCESS)
7961 * Read an LCB CSR. Access may not be in host control, so check.
7962 * Return 0 on success, -EBUSY on failure.
7964 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
7966 struct hfi1_pportdata *ppd = dd->pport;
7968 /* if up, go through the 8051 for the value */
7969 if (ppd->host_link_state & HLS_UP)
7970 return read_lcb_via_8051(dd, addr, data);
7971 /* if going up or down, no access */
7972 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
7974 /* otherwise, host has access */
7975 *data = read_csr(dd, addr);
7980 * Use the 8051 to write a LCB CSR.
7982 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
7987 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
7988 (dd->dc8051_ver < dc8051_ver(0, 20))) {
7989 if (acquire_lcb_access(dd, 0) == 0) {
7990 write_csr(dd, addr, data);
7991 release_lcb_access(dd, 0);
7997 /* register is an index of LCB registers: (offset - base) / 8 */
7998 regno = (addr - DC_LCB_CFG_RUN) >> 3;
7999 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8000 if (ret != HCMD_SUCCESS)
8006 * Write an LCB CSR. Access may not be in host control, so check.
8007 * Return 0 on success, -EBUSY on failure.
8009 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8011 struct hfi1_pportdata *ppd = dd->pport;
8013 /* if up, go through the 8051 for the value */
8014 if (ppd->host_link_state & HLS_UP)
8015 return write_lcb_via_8051(dd, addr, data);
8016 /* if going up or down, no access */
8017 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8019 /* otherwise, host has access */
8020 write_csr(dd, addr, data);
8026 * < 0 = Linux error, not able to get access
8027 * > 0 = 8051 command RETURN_CODE
8029 static int do_8051_command(
8030 struct hfi1_devdata *dd,
8037 unsigned long flags;
8038 unsigned long timeout;
8040 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8043 * Alternative to holding the lock for a long time:
8044 * - keep busy wait - have other users bounce off
8046 spin_lock_irqsave(&dd->dc8051_lock, flags);
8048 /* We can't send any commands to the 8051 if it's in reset */
8049 if (dd->dc_shutdown) {
8050 return_code = -ENODEV;
8055 * If an 8051 host command timed out previously, then the 8051 is
8058 * On first timeout, attempt to reset and restart the entire DC
8059 * block (including 8051). (Is this too big of a hammer?)
8061 * If the 8051 times out a second time, the reset did not bring it
8062 * back to healthy life. In that case, fail any subsequent commands.
8064 if (dd->dc8051_timed_out) {
8065 if (dd->dc8051_timed_out > 1) {
8067 "Previous 8051 host command timed out, skipping command %u\n",
8069 return_code = -ENXIO;
8072 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8075 spin_lock_irqsave(&dd->dc8051_lock, flags);
8079 * If there is no timeout, then the 8051 command interface is
8080 * waiting for a command.
8084 * When writing a LCB CSR, out_data contains the full value to
8085 * to be written, while in_data contains the relative LCB
8086 * address in 7:0. Do the work here, rather than the caller,
8087 * of distrubting the write data to where it needs to go:
8090 * 39:00 -> in_data[47:8]
8091 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8092 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8094 if (type == HCMD_WRITE_LCB_CSR) {
8095 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8096 reg = ((((*out_data) >> 40) & 0xff) <<
8097 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8098 | ((((*out_data) >> 48) & 0xffff) <<
8099 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8100 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8104 * Do two writes: the first to stabilize the type and req_data, the
8105 * second to activate.
8107 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8108 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8109 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8110 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8111 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8112 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8113 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8115 /* wait for completion, alternate: interrupt */
8116 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8118 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8119 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8122 if (time_after(jiffies, timeout)) {
8123 dd->dc8051_timed_out++;
8124 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8127 return_code = -ETIMEDOUT;
8134 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8135 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8136 if (type == HCMD_READ_LCB_CSR) {
8137 /* top 16 bits are in a different register */
8138 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8139 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8141 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8144 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8145 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8146 dd->dc8051_timed_out = 0;
8148 * Clear command for next user.
8150 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8153 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8158 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8160 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8163 static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8164 u8 lane_id, u32 config_data)
8169 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8170 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8171 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8172 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8173 if (ret != HCMD_SUCCESS) {
8175 "load 8051 config: field id %d, lane %d, err %d\n",
8176 (int)field_id, (int)lane_id, ret);
8182 * Read the 8051 firmware "registers". Use the RAM directly. Always
8183 * set the result, even on error.
8184 * Return 0 on success, -errno on failure
8186 static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8193 /* address start depends on the lane_id */
8195 addr = (4 * NUM_GENERAL_FIELDS)
8196 + (lane_id * 4 * NUM_LANE_FIELDS);
8199 addr += field_id * 4;
8201 /* read is in 8-byte chunks, hardware will truncate the address down */
8202 ret = read_8051_data(dd, addr, 8, &big_data);
8205 /* extract the 4 bytes we want */
8207 *result = (u32)(big_data >> 32);
8209 *result = (u32)big_data;
8212 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8213 __func__, lane_id, field_id);
8219 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8224 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8225 | power_management << POWER_MANAGEMENT_SHIFT;
8226 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8227 GENERAL_CONFIG, frame);
8230 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8231 u16 vl15buf, u8 crc_sizes)
8235 frame = (u32)vau << VAU_SHIFT
8237 | (u32)vcu << VCU_SHIFT
8238 | (u32)vl15buf << VL15BUF_SHIFT
8239 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8240 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8241 GENERAL_CONFIG, frame);
8244 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8245 u8 *flag_bits, u16 *link_widths)
8249 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8251 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8252 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8253 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8256 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8263 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8264 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8265 | (u32)link_widths << LINK_WIDTH_SHIFT;
8266 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8270 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8275 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8276 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8277 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8280 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8285 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8286 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8287 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8288 & REMOTE_DEVICE_REV_MASK;
8291 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8295 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8296 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8297 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8300 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8305 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8306 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8307 & POWER_MANAGEMENT_MASK;
8308 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8309 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8312 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8313 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8317 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8318 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8319 *z = (frame >> Z_SHIFT) & Z_MASK;
8320 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8321 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8322 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8325 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8331 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8333 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8334 & REMOTE_TX_RATE_MASK;
8335 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8338 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8342 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8343 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8346 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8350 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8351 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8354 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8356 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8359 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8361 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8364 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8370 if (dd->pport->host_link_state & HLS_UP) {
8371 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8374 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8375 & LINK_QUALITY_MASK;
8379 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8383 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8384 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8387 static int read_tx_settings(struct hfi1_devdata *dd,
8389 u8 *tx_polarity_inversion,
8390 u8 *rx_polarity_inversion,
8396 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8397 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8398 & ENABLE_LANE_TX_MASK;
8399 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8400 & TX_POLARITY_INVERSION_MASK;
8401 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8402 & RX_POLARITY_INVERSION_MASK;
8403 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8407 static int write_tx_settings(struct hfi1_devdata *dd,
8409 u8 tx_polarity_inversion,
8410 u8 rx_polarity_inversion,
8415 /* no need to mask, all variable sizes match field widths */
8416 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8417 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8418 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8419 | max_rate << MAX_RATE_SHIFT;
8420 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8423 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8425 u32 frame, version, prod_id;
8429 for (lane = 0; lane < 4; lane++) {
8430 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8434 "Unable to read lane %d firmware details\n",
8438 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8439 & SPICO_ROM_VERSION_MASK;
8440 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8441 & SPICO_ROM_PROD_ID_MASK;
8443 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8444 lane, version, prod_id);
8449 * Read an idle LCB message.
8451 * Returns 0 on success, -EINVAL on error
8453 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8457 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8459 if (ret != HCMD_SUCCESS) {
8460 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8464 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8465 /* return only the payload as we already know the type */
8466 *data_out >>= IDLE_PAYLOAD_SHIFT;
8471 * Read an idle SMA message. To be done in response to a notification from
8474 * Returns 0 on success, -EINVAL on error
8476 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8478 return read_idle_message(dd,
8479 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8483 * Send an idle LCB message.
8485 * Returns 0 on success, -EINVAL on error
8487 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8491 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8492 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8493 if (ret != HCMD_SUCCESS) {
8494 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8502 * Send an idle SMA message.
8504 * Returns 0 on success, -EINVAL on error
8506 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8510 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8511 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8512 return send_idle_message(dd, data);
8516 * Initialize the LCB then do a quick link up. This may or may not be
8519 * return 0 on success, -errno on error
8521 static int do_quick_linkup(struct hfi1_devdata *dd)
8524 unsigned long timeout;
8527 lcb_shutdown(dd, 0);
8530 /* LCB_CFG_LOOPBACK.VAL = 2 */
8531 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8532 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8533 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8534 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8537 /* start the LCBs */
8538 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8539 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8541 /* simulator only loopback steps */
8542 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8543 /* LCB_CFG_RUN.EN = 1 */
8544 write_csr(dd, DC_LCB_CFG_RUN,
8545 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8547 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8548 timeout = jiffies + msecs_to_jiffies(10);
8551 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8554 if (time_after(jiffies, timeout)) {
8556 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8562 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8563 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8568 * When doing quick linkup and not in loopback, both
8569 * sides must be done with LCB set-up before either
8570 * starts the quick linkup. Put a delay here so that
8571 * both sides can be started and have a chance to be
8572 * done with LCB set up before resuming.
8575 "Pausing for peer to be finished with LCB set up\n");
8578 "Continuing with quick linkup\n");
8581 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8582 set_8051_lcb_access(dd);
8585 * State "quick" LinkUp request sets the physical link state to
8586 * LinkUp without a verify capability sequence.
8587 * This state is in simulator v37 and later.
8589 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8590 if (ret != HCMD_SUCCESS) {
8592 "%s: set physical link state to quick LinkUp failed with return %d\n",
8595 set_host_lcb_access(dd);
8596 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8603 return 0; /* success */
8607 * Set the SerDes to internal loopback mode.
8608 * Returns 0 on success, -errno on error.
8610 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8614 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8615 if (ret == HCMD_SUCCESS)
8618 "Set physical link state to SerDes Loopback failed with return %d\n",
8626 * Do all special steps to set up loopback.
8628 static int init_loopback(struct hfi1_devdata *dd)
8630 dd_dev_info(dd, "Entering loopback mode\n");
8632 /* all loopbacks should disable self GUID check */
8633 write_csr(dd, DC_DC8051_CFG_MODE,
8634 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8637 * The simulator has only one loopback option - LCB. Switch
8638 * to that option, which includes quick link up.
8640 * Accept all valid loopback values.
8642 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8643 && (loopback == LOOPBACK_SERDES
8644 || loopback == LOOPBACK_LCB
8645 || loopback == LOOPBACK_CABLE)) {
8646 loopback = LOOPBACK_LCB;
8651 /* handle serdes loopback */
8652 if (loopback == LOOPBACK_SERDES) {
8653 /* internal serdes loopack needs quick linkup on RTL */
8654 if (dd->icode == ICODE_RTL_SILICON)
8656 return set_serdes_loopback_mode(dd);
8659 /* LCB loopback - handled at poll time */
8660 if (loopback == LOOPBACK_LCB) {
8661 quick_linkup = 1; /* LCB is always quick linkup */
8663 /* not supported in emulation due to emulation RTL changes */
8664 if (dd->icode == ICODE_FPGA_EMULATION) {
8666 "LCB loopback not supported in emulation\n");
8672 /* external cable loopback requires no extra steps */
8673 if (loopback == LOOPBACK_CABLE)
8676 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8681 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8682 * used in the Verify Capability link width attribute.
8684 static u16 opa_to_vc_link_widths(u16 opa_widths)
8689 static const struct link_bits {
8692 } opa_link_xlate[] = {
8693 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8694 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8695 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8696 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8699 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8700 if (opa_widths & opa_link_xlate[i].from)
8701 result |= opa_link_xlate[i].to;
8707 * Set link attributes before moving to polling.
8709 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8711 struct hfi1_devdata *dd = ppd->dd;
8713 u8 tx_polarity_inversion;
8714 u8 rx_polarity_inversion;
8717 /* reset our fabric serdes to clear any lingering problems */
8718 fabric_serdes_reset(dd);
8720 /* set the local tx rate - need to read-modify-write */
8721 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8722 &rx_polarity_inversion, &ppd->local_tx_rate);
8724 goto set_local_link_attributes_fail;
8726 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8727 /* set the tx rate to the fastest enabled */
8728 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8729 ppd->local_tx_rate = 1;
8731 ppd->local_tx_rate = 0;
8733 /* set the tx rate to all enabled */
8734 ppd->local_tx_rate = 0;
8735 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8736 ppd->local_tx_rate |= 2;
8737 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8738 ppd->local_tx_rate |= 1;
8741 enable_lane_tx = 0xF; /* enable all four lanes */
8742 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8743 rx_polarity_inversion, ppd->local_tx_rate);
8744 if (ret != HCMD_SUCCESS)
8745 goto set_local_link_attributes_fail;
8748 * DC supports continuous updates.
8750 ret = write_vc_local_phy(dd, 0 /* no power management */,
8751 1 /* continuous updates */);
8752 if (ret != HCMD_SUCCESS)
8753 goto set_local_link_attributes_fail;
8755 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8756 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8757 ppd->port_crc_mode_enabled);
8758 if (ret != HCMD_SUCCESS)
8759 goto set_local_link_attributes_fail;
8761 ret = write_vc_local_link_width(dd, 0, 0,
8762 opa_to_vc_link_widths(ppd->link_width_enabled));
8763 if (ret != HCMD_SUCCESS)
8764 goto set_local_link_attributes_fail;
8766 /* let peer know who we are */
8767 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8768 if (ret == HCMD_SUCCESS)
8771 set_local_link_attributes_fail:
8773 "Failed to set local link attributes, return 0x%x\n",
8779 * Call this to start the link. Schedule a retry if the cable is not
8780 * present or if unable to start polling. Do not do anything if the
8781 * link is disabled. Returns 0 if link is disabled or moved to polling
8783 int start_link(struct hfi1_pportdata *ppd)
8785 if (!ppd->link_enabled) {
8786 dd_dev_info(ppd->dd,
8787 "%s: stopping link start because link is disabled\n",
8791 if (!ppd->driver_link_ready) {
8792 dd_dev_info(ppd->dd,
8793 "%s: stopping link start because driver is not ready\n",
8798 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8799 loopback == LOOPBACK_LCB ||
8800 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8801 return set_link_state(ppd, HLS_DN_POLL);
8803 dd_dev_info(ppd->dd,
8804 "%s: stopping link start because no cable is present\n",
8809 static void reset_qsfp(struct hfi1_pportdata *ppd)
8811 struct hfi1_devdata *dd = ppd->dd;
8812 u64 mask, qsfp_mask;
8814 mask = (u64)QSFP_HFI0_RESET_N;
8815 qsfp_mask = read_csr(dd,
8816 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
8819 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
8822 qsfp_mask = read_csr(dd,
8823 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
8826 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8833 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8837 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8838 u8 *qsfp_interrupt_status)
8840 struct hfi1_devdata *dd = ppd->dd;
8842 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8843 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8845 "%s: QSFP cable on fire\n",
8848 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8849 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8851 "%s: QSFP cable temperature too low\n",
8854 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8855 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8857 "%s: QSFP supply voltage too high\n",
8860 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8861 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8863 "%s: QSFP supply voltage too low\n",
8866 /* Byte 2 is vendor specific */
8868 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
8869 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
8871 "%s: Cable RX channel 1/2 power too high\n",
8874 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
8875 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
8877 "%s: Cable RX channel 1/2 power too low\n",
8880 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
8881 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
8883 "%s: Cable RX channel 3/4 power too high\n",
8886 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
8887 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
8889 "%s: Cable RX channel 3/4 power too low\n",
8892 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
8893 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
8895 "%s: Cable TX channel 1/2 bias too high\n",
8898 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
8899 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
8901 "%s: Cable TX channel 1/2 bias too low\n",
8904 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
8905 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
8907 "%s: Cable TX channel 3/4 bias too high\n",
8910 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
8911 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
8913 "%s: Cable TX channel 3/4 bias too low\n",
8916 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
8917 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
8919 "%s: Cable TX channel 1/2 power too high\n",
8922 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
8923 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
8925 "%s: Cable TX channel 1/2 power too low\n",
8928 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
8929 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
8931 "%s: Cable TX channel 3/4 power too high\n",
8934 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
8935 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
8937 "%s: Cable TX channel 3/4 power too low\n",
8940 /* Bytes 9-10 and 11-12 are reserved */
8941 /* Bytes 13-15 are vendor specific */
8946 static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
8948 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
8953 static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
8955 struct hfi1_devdata *dd = ppd->dd;
8956 u8 qsfp_interrupt_status = 0;
8958 if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
8961 "%s: Failed to read status of QSFP module\n",
8966 /* We don't care about alarms & warnings with a non-functional INT_N */
8967 if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
8968 do_pre_lni_host_behaviors(ppd);
8973 /* This routine will only be scheduled if the QSFP module is present */
8974 static void qsfp_event(struct work_struct *work)
8976 struct qsfp_data *qd;
8977 struct hfi1_pportdata *ppd;
8978 struct hfi1_devdata *dd;
8980 qd = container_of(work, struct qsfp_data, qsfp_work);
8985 if (!qsfp_mod_present(ppd))
8989 * Turn DC back on after cables has been
8990 * re-inserted. Up until now, the DC has been in
8991 * reset to save power.
8995 if (qd->cache_refresh_required) {
8999 /* Check for QSFP interrupt after t_init (SFF 8679)
9003 if (!qd->qsfp_interrupt_functional) {
9004 if (do_qsfp_intr_fallback(ppd) < 0)
9005 dd_dev_info(dd, "%s: QSFP fallback failed\n",
9007 ppd->driver_link_ready = 1;
9012 if (qd->check_interrupt_flags) {
9013 u8 qsfp_interrupt_status[16] = {0,};
9015 if (qsfp_read(ppd, dd->hfi1_id, 6,
9016 &qsfp_interrupt_status[0], 16) != 16) {
9018 "%s: Failed to read status of QSFP module\n",
9021 unsigned long flags;
9024 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9025 ppd->qsfp_info.check_interrupt_flags = 0;
9026 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9029 if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
9032 "%s: Failed to read status of QSFP module\n",
9035 if (!(data_status & QSFP_DATA_NOT_READY)) {
9036 do_pre_lni_host_behaviors(ppd);
9039 handle_qsfp_error_conditions(ppd,
9040 qsfp_interrupt_status);
9045 void init_qsfp(struct hfi1_pportdata *ppd)
9047 struct hfi1_devdata *dd = ppd->dd;
9050 if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9051 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9052 ppd->driver_link_ready = 1;
9056 ppd->qsfp_info.ppd = ppd;
9057 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
9059 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9060 /* Clear current status to avoid spurious interrupts */
9067 /* Handle active low nature of INT_N and MODPRST_N pins */
9068 if (qsfp_mod_present(ppd))
9069 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9071 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9074 /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
9075 qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
9077 dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9080 if (qsfp_mod_present(ppd)) {
9084 /* Check for QSFP interrupt after t_init (SFF 8679)
9088 if (!ppd->qsfp_info.qsfp_interrupt_functional) {
9089 if (do_qsfp_intr_fallback(ppd) < 0)
9091 "%s: QSFP fallback failed\n",
9093 ppd->driver_link_ready = 1;
9099 * Do a one-time initialize of the LCB block.
9101 static void init_lcb(struct hfi1_devdata *dd)
9103 /* the DC has been reset earlier in the driver load */
9105 /* set LCB for cclk loopback on the port */
9106 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9107 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9108 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9109 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9110 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9111 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9112 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9115 int bringup_serdes(struct hfi1_pportdata *ppd)
9117 struct hfi1_devdata *dd = ppd->dd;
9121 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9122 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9127 guid = dd->base_guid + ppd->port - 1;
9131 /* the link defaults to enabled */
9132 ppd->link_enabled = 1;
9133 /* Set linkinit_reason on power up per OPA spec */
9134 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9136 /* one-time init of the LCB */
9140 ret = init_loopback(dd);
9145 return start_link(ppd);
9148 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9150 struct hfi1_devdata *dd = ppd->dd;
9153 * Shut down the link and keep it down. First turn off that the
9154 * driver wants to allow the link to be up (driver_link_ready).
9155 * Then make sure the link is not automatically restarted
9156 * (link_enabled). Cancel any pending restart. And finally
9159 ppd->driver_link_ready = 0;
9160 ppd->link_enabled = 0;
9162 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9163 OPA_LINKDOWN_REASON_SMA_DISABLED);
9164 set_link_state(ppd, HLS_DN_OFFLINE);
9166 /* disable the port */
9167 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9170 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9172 struct hfi1_pportdata *ppd;
9175 ppd = (struct hfi1_pportdata *)(dd + 1);
9176 for (i = 0; i < dd->num_pports; i++, ppd++) {
9177 ppd->ibport_data.rc_acks = NULL;
9178 ppd->ibport_data.rc_qacks = NULL;
9179 ppd->ibport_data.rc_acks = alloc_percpu(u64);
9180 ppd->ibport_data.rc_qacks = alloc_percpu(u64);
9181 ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
9182 if ((ppd->ibport_data.rc_acks == NULL) ||
9183 (ppd->ibport_data.rc_delayed_comp == NULL) ||
9184 (ppd->ibport_data.rc_qacks == NULL))
9191 static const char * const pt_names[] = {
9197 static const char *pt_name(u32 type)
9199 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9203 * index is the index into the receive array
9205 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9206 u32 type, unsigned long pa, u16 order)
9209 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9210 (dd->kregbase + RCV_ARRAY));
9212 if (!(dd->flags & HFI1_PRESENT))
9215 if (type == PT_INVALID) {
9217 } else if (type > PT_INVALID) {
9219 "unexpected receive array type %u for index %u, not handled\n",
9224 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9225 pt_name(type), index, pa, (unsigned long)order);
9227 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9228 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9229 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9230 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9231 << RCV_ARRAY_RT_ADDR_SHIFT;
9232 writeq(reg, base + (index * 8));
9234 if (type == PT_EAGER)
9236 * Eager entries are written one-by-one so we have to push them
9237 * after we write the entry.
9244 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9246 struct hfi1_devdata *dd = rcd->dd;
9249 /* this could be optimized */
9250 for (i = rcd->eager_base; i < rcd->eager_base +
9251 rcd->egrbufs.alloced; i++)
9252 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9254 for (i = rcd->expected_base;
9255 i < rcd->expected_base + rcd->expected_count; i++)
9256 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9259 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9260 struct hfi1_ctxt_info *kinfo)
9262 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9263 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9267 struct hfi1_message_header *hfi1_get_msgheader(
9268 struct hfi1_devdata *dd, __le32 *rhf_addr)
9270 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9272 return (struct hfi1_message_header *)
9273 (rhf_addr - dd->rhf_offset + offset);
9276 static const char * const ib_cfg_name_strings[] = {
9277 "HFI1_IB_CFG_LIDLMC",
9278 "HFI1_IB_CFG_LWID_DG_ENB",
9279 "HFI1_IB_CFG_LWID_ENB",
9281 "HFI1_IB_CFG_SPD_ENB",
9283 "HFI1_IB_CFG_RXPOL_ENB",
9284 "HFI1_IB_CFG_LREV_ENB",
9285 "HFI1_IB_CFG_LINKLATENCY",
9286 "HFI1_IB_CFG_HRTBT",
9287 "HFI1_IB_CFG_OP_VLS",
9288 "HFI1_IB_CFG_VL_HIGH_CAP",
9289 "HFI1_IB_CFG_VL_LOW_CAP",
9290 "HFI1_IB_CFG_OVERRUN_THRESH",
9291 "HFI1_IB_CFG_PHYERR_THRESH",
9292 "HFI1_IB_CFG_LINKDEFAULT",
9293 "HFI1_IB_CFG_PKEYS",
9295 "HFI1_IB_CFG_LSTATE",
9296 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9297 "HFI1_IB_CFG_PMA_TICKS",
9301 static const char *ib_cfg_name(int which)
9303 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9305 return ib_cfg_name_strings[which];
9308 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9310 struct hfi1_devdata *dd = ppd->dd;
9314 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9315 val = ppd->link_width_enabled;
9317 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9318 val = ppd->link_width_active;
9320 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9321 val = ppd->link_speed_enabled;
9323 case HFI1_IB_CFG_SPD: /* current Link speed */
9324 val = ppd->link_speed_active;
9327 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9328 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9329 case HFI1_IB_CFG_LINKLATENCY:
9332 case HFI1_IB_CFG_OP_VLS:
9333 val = ppd->vls_operational;
9335 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9336 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9338 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9339 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9341 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9342 val = ppd->overrun_threshold;
9344 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9345 val = ppd->phy_error_threshold;
9347 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9348 val = dd->link_default;
9351 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9352 case HFI1_IB_CFG_PMA_TICKS:
9355 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9358 "%s: which %s: not implemented\n",
9360 ib_cfg_name(which));
9368 * The largest MAD packet size.
9370 #define MAX_MAD_PACKET 2048
9373 * Return the maximum header bytes that can go on the _wire_
9374 * for this device. This count includes the ICRC which is
9375 * not part of the packet held in memory but it is appended
9377 * This is dependent on the device's receive header entry size.
9378 * HFI allows this to be set per-receive context, but the
9379 * driver presently enforces a global value.
9381 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9384 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9385 * the Receive Header Entry Size minus the PBC (or RHF) size
9386 * plus one DW for the ICRC appended by HW.
9388 * dd->rcd[0].rcvhdrqentsize is in DW.
9389 * We use rcd[0] as all context will have the same value. Also,
9390 * the first kernel context would have been allocated by now so
9391 * we are guaranteed a valid value.
9393 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9398 * @ppd - per port data
9400 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9401 * registers compare against LRH.PktLen, so use the max bytes included
9404 * This routine changes all VL values except VL15, which it maintains at
9407 static void set_send_length(struct hfi1_pportdata *ppd)
9409 struct hfi1_devdata *dd = ppd->dd;
9410 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9411 u32 maxvlmtu = dd->vld[15].mtu;
9412 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9413 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9414 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9417 for (i = 0; i < ppd->vls_supported; i++) {
9418 if (dd->vld[i].mtu > maxvlmtu)
9419 maxvlmtu = dd->vld[i].mtu;
9421 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9422 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9423 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9425 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9426 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9427 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9429 write_csr(dd, SEND_LEN_CHECK0, len1);
9430 write_csr(dd, SEND_LEN_CHECK1, len2);
9431 /* adjust kernel credit return thresholds based on new MTUs */
9432 /* all kernel receive contexts have the same hdrqentsize */
9433 for (i = 0; i < ppd->vls_supported; i++) {
9434 sc_set_cr_threshold(dd->vld[i].sc,
9435 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9436 dd->rcd[0]->rcvhdrqentsize));
9438 sc_set_cr_threshold(dd->vld[15].sc,
9439 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9440 dd->rcd[0]->rcvhdrqentsize));
9442 /* Adjust maximum MTU for the port in DC */
9443 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9444 (ilog2(maxvlmtu >> 8) + 1);
9445 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9446 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9447 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9448 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9449 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9452 static void set_lidlmc(struct hfi1_pportdata *ppd)
9456 struct hfi1_devdata *dd = ppd->dd;
9457 u32 mask = ~((1U << ppd->lmc) - 1);
9458 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9460 if (dd->hfi1_snoop.mode_flag)
9461 dd_dev_info(dd, "Set lid/lmc while snooping");
9463 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9464 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9465 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9466 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9467 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9468 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9469 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9472 * Iterate over all the send contexts and set their SLID check
9474 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9475 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9476 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9477 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9479 for (i = 0; i < dd->chip_send_contexts; i++) {
9480 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9482 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9485 /* Now we have to do the same thing for the sdma engines */
9486 sdma_update_lmc(dd, mask, ppd->lid);
9489 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9491 unsigned long timeout;
9494 timeout = jiffies + msecs_to_jiffies(msecs);
9496 curr_state = read_physical_state(dd);
9497 if (curr_state == state)
9499 if (time_after(jiffies, timeout)) {
9501 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9505 usleep_range(1950, 2050); /* sleep 2ms-ish */
9512 * Helper for set_link_state(). Do not call except from that routine.
9513 * Expects ppd->hls_mutex to be held.
9515 * @rem_reason value to be sent to the neighbor
9517 * LinkDownReasons only set if transition succeeds.
9519 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9521 struct hfi1_devdata *dd = ppd->dd;
9522 u32 pstate, previous_state;
9523 u32 last_local_state;
9524 u32 last_remote_state;
9529 previous_state = ppd->host_link_state;
9530 ppd->host_link_state = HLS_GOING_OFFLINE;
9531 pstate = read_physical_state(dd);
9532 if (pstate == PLS_OFFLINE) {
9533 do_transition = 0; /* in right state */
9534 do_wait = 0; /* ...no need to wait */
9535 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9536 do_transition = 0; /* in an offline transient state */
9537 do_wait = 1; /* ...wait for it to settle */
9539 do_transition = 1; /* need to move to offline */
9540 do_wait = 1; /* ...will need to wait */
9543 if (do_transition) {
9544 ret = set_physical_link_state(dd,
9545 PLS_OFFLINE | (rem_reason << 8));
9547 if (ret != HCMD_SUCCESS) {
9549 "Failed to transition to Offline link state, return %d\n",
9553 if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
9554 ppd->offline_disabled_reason =
9555 OPA_LINKDOWN_REASON_TRANSIENT;
9559 /* it can take a while for the link to go down */
9560 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9565 /* make sure the logical state is also down */
9566 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9569 * Now in charge of LCB - must be after the physical state is
9570 * offline.quiet and before host_link_state is changed.
9572 set_host_lcb_access(dd);
9573 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9574 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9577 * The LNI has a mandatory wait time after the physical state
9578 * moves to Offline.Quiet. The wait time may be different
9579 * depending on how the link went down. The 8051 firmware
9580 * will observe the needed wait time and only move to ready
9581 * when that is completed. The largest of the quiet timeouts
9582 * is 6s, so wait that long and then at least 0.5s more for
9583 * other transitions, and another 0.5s for a buffer.
9585 ret = wait_fm_ready(dd, 7000);
9588 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9589 /* state is really offline, so make it so */
9590 ppd->host_link_state = HLS_DN_OFFLINE;
9595 * The state is now offline and the 8051 is ready to accept host
9597 * - change our state
9598 * - notify others if we were previously in a linkup state
9600 ppd->host_link_state = HLS_DN_OFFLINE;
9601 if (previous_state & HLS_UP) {
9602 /* went down while link was up */
9603 handle_linkup_change(dd, 0);
9604 } else if (previous_state
9605 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9606 /* went down while attempting link up */
9607 /* byte 1 of last_*_state is the failure reason */
9608 read_last_local_state(dd, &last_local_state);
9609 read_last_remote_state(dd, &last_remote_state);
9611 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9612 last_local_state, last_remote_state);
9615 /* the active link width (downgrade) is 0 on link down */
9616 ppd->link_width_active = 0;
9617 ppd->link_width_downgrade_tx_active = 0;
9618 ppd->link_width_downgrade_rx_active = 0;
9619 ppd->current_egress_rate = 0;
9623 /* return the link state name */
9624 static const char *link_state_name(u32 state)
9627 int n = ilog2(state);
9628 static const char * const names[] = {
9629 [__HLS_UP_INIT_BP] = "INIT",
9630 [__HLS_UP_ARMED_BP] = "ARMED",
9631 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9632 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9633 [__HLS_DN_POLL_BP] = "POLL",
9634 [__HLS_DN_DISABLE_BP] = "DISABLE",
9635 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9636 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9637 [__HLS_GOING_UP_BP] = "GOING_UP",
9638 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9639 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9642 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9643 return name ? name : "unknown";
9646 /* return the link state reason name */
9647 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9649 if (state == HLS_UP_INIT) {
9650 switch (ppd->linkinit_reason) {
9651 case OPA_LINKINIT_REASON_LINKUP:
9653 case OPA_LINKINIT_REASON_FLAPPING:
9654 return "(FLAPPING)";
9655 case OPA_LINKINIT_OUTSIDE_POLICY:
9656 return "(OUTSIDE_POLICY)";
9657 case OPA_LINKINIT_QUARANTINED:
9658 return "(QUARANTINED)";
9659 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9660 return "(INSUFIC_CAPABILITY)";
9669 * driver_physical_state - convert the driver's notion of a port's
9670 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9671 * Return -1 (converted to a u32) to indicate error.
9673 u32 driver_physical_state(struct hfi1_pportdata *ppd)
9675 switch (ppd->host_link_state) {
9679 return IB_PORTPHYSSTATE_LINKUP;
9681 return IB_PORTPHYSSTATE_POLLING;
9682 case HLS_DN_DISABLE:
9683 return IB_PORTPHYSSTATE_DISABLED;
9684 case HLS_DN_OFFLINE:
9685 return OPA_PORTPHYSSTATE_OFFLINE;
9686 case HLS_VERIFY_CAP:
9687 return IB_PORTPHYSSTATE_POLLING;
9689 return IB_PORTPHYSSTATE_POLLING;
9690 case HLS_GOING_OFFLINE:
9691 return OPA_PORTPHYSSTATE_OFFLINE;
9692 case HLS_LINK_COOLDOWN:
9693 return OPA_PORTPHYSSTATE_OFFLINE;
9694 case HLS_DN_DOWNDEF:
9696 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9697 ppd->host_link_state);
9703 * driver_logical_state - convert the driver's notion of a port's
9704 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9705 * (converted to a u32) to indicate error.
9707 u32 driver_logical_state(struct hfi1_pportdata *ppd)
9709 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9710 return IB_PORT_DOWN;
9712 switch (ppd->host_link_state & HLS_UP) {
9714 return IB_PORT_INIT;
9716 return IB_PORT_ARMED;
9718 return IB_PORT_ACTIVE;
9720 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9721 ppd->host_link_state);
9726 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9727 u8 neigh_reason, u8 rem_reason)
9729 if (ppd->local_link_down_reason.latest == 0 &&
9730 ppd->neigh_link_down_reason.latest == 0) {
9731 ppd->local_link_down_reason.latest = lcl_reason;
9732 ppd->neigh_link_down_reason.latest = neigh_reason;
9733 ppd->remote_link_down_reason = rem_reason;
9738 * Change the physical and/or logical link state.
9740 * Do not call this routine while inside an interrupt. It contains
9741 * calls to routines that can take multiple seconds to finish.
9743 * Returns 0 on success, -errno on failure.
9745 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9747 struct hfi1_devdata *dd = ppd->dd;
9748 struct ib_event event = {.device = NULL};
9750 int was_up, is_down;
9751 int orig_new_state, poll_bounce;
9753 mutex_lock(&ppd->hls_lock);
9755 orig_new_state = state;
9756 if (state == HLS_DN_DOWNDEF)
9757 state = dd->link_default;
9759 /* interpret poll -> poll as a link bounce */
9760 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9761 && state == HLS_DN_POLL;
9763 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9764 link_state_name(ppd->host_link_state),
9765 link_state_name(orig_new_state),
9766 poll_bounce ? "(bounce) " : "",
9767 link_state_reason_name(ppd, state));
9769 was_up = !!(ppd->host_link_state & HLS_UP);
9772 * If we're going to a (HLS_*) link state that implies the logical
9773 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9774 * reset is_sm_config_started to 0.
9776 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9777 ppd->is_sm_config_started = 0;
9780 * Do nothing if the states match. Let a poll to poll link bounce
9783 if (ppd->host_link_state == state && !poll_bounce)
9788 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9789 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9791 * Quick link up jumps from polling to here.
9793 * Whether in normal or loopback mode, the
9794 * simulator jumps from polling to link up.
9798 } else if (ppd->host_link_state != HLS_GOING_UP) {
9802 ppd->host_link_state = HLS_UP_INIT;
9803 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9805 /* logical state didn't change, stay at going_up */
9806 ppd->host_link_state = HLS_GOING_UP;
9808 "%s: logical state did not change to INIT\n",
9811 /* clear old transient LINKINIT_REASON code */
9812 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9813 ppd->linkinit_reason =
9814 OPA_LINKINIT_REASON_LINKUP;
9816 /* enable the port */
9817 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9819 handle_linkup_change(dd, 1);
9823 if (ppd->host_link_state != HLS_UP_INIT)
9826 ppd->host_link_state = HLS_UP_ARMED;
9827 set_logical_state(dd, LSTATE_ARMED);
9828 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9830 /* logical state didn't change, stay at init */
9831 ppd->host_link_state = HLS_UP_INIT;
9833 "%s: logical state did not change to ARMED\n",
9837 * The simulator does not currently implement SMA messages,
9838 * so neighbor_normal is not set. Set it here when we first
9841 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9842 ppd->neighbor_normal = 1;
9845 if (ppd->host_link_state != HLS_UP_ARMED)
9848 ppd->host_link_state = HLS_UP_ACTIVE;
9849 set_logical_state(dd, LSTATE_ACTIVE);
9850 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9852 /* logical state didn't change, stay at armed */
9853 ppd->host_link_state = HLS_UP_ARMED;
9855 "%s: logical state did not change to ACTIVE\n",
9859 /* tell all engines to go running */
9860 sdma_all_running(dd);
9862 /* Signal the IB layer that the port has went active */
9863 event.device = &dd->verbs_dev.ibdev;
9864 event.element.port_num = ppd->port;
9865 event.event = IB_EVENT_PORT_ACTIVE;
9869 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9870 ppd->host_link_state == HLS_DN_OFFLINE) &&
9873 /* Hand LED control to the DC */
9874 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9876 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9877 u8 tmp = ppd->link_enabled;
9879 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9881 ppd->link_enabled = tmp;
9884 ppd->remote_link_down_reason = 0;
9886 if (ppd->driver_link_ready)
9887 ppd->link_enabled = 1;
9890 ret = set_local_link_attributes(ppd);
9894 ppd->port_error_action = 0;
9895 ppd->host_link_state = HLS_DN_POLL;
9898 /* quick linkup does not go into polling */
9899 ret = do_quick_linkup(dd);
9901 ret1 = set_physical_link_state(dd, PLS_POLLING);
9902 if (ret1 != HCMD_SUCCESS) {
9904 "Failed to transition to Polling link state, return 0x%x\n",
9909 ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
9911 * If an error occurred above, go back to offline. The
9912 * caller may reschedule another attempt.
9915 goto_offline(ppd, 0);
9917 case HLS_DN_DISABLE:
9918 /* link is disabled */
9919 ppd->link_enabled = 0;
9921 /* allow any state to transition to disabled */
9923 /* must transition to offline first */
9924 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9925 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9928 ppd->remote_link_down_reason = 0;
9931 ret1 = set_physical_link_state(dd, PLS_DISABLED);
9932 if (ret1 != HCMD_SUCCESS) {
9934 "Failed to transition to Disabled link state, return 0x%x\n",
9939 ppd->host_link_state = HLS_DN_DISABLE;
9942 case HLS_DN_OFFLINE:
9943 if (ppd->host_link_state == HLS_DN_DISABLE)
9946 /* allow any state to transition to offline */
9947 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9949 ppd->remote_link_down_reason = 0;
9951 case HLS_VERIFY_CAP:
9952 if (ppd->host_link_state != HLS_DN_POLL)
9954 ppd->host_link_state = HLS_VERIFY_CAP;
9957 if (ppd->host_link_state != HLS_VERIFY_CAP)
9960 ret1 = set_physical_link_state(dd, PLS_LINKUP);
9961 if (ret1 != HCMD_SUCCESS) {
9963 "Failed to transition to link up state, return 0x%x\n",
9968 ppd->host_link_state = HLS_GOING_UP;
9971 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
9972 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
9974 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
9980 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
9981 HLS_DN_DISABLE | HLS_DN_OFFLINE));
9983 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
9984 ppd->neigh_link_down_reason.sma == 0) {
9985 ppd->local_link_down_reason.sma =
9986 ppd->local_link_down_reason.latest;
9987 ppd->neigh_link_down_reason.sma =
9988 ppd->neigh_link_down_reason.latest;
9994 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
9995 __func__, link_state_name(ppd->host_link_state),
9996 link_state_name(state));
10000 mutex_unlock(&ppd->hls_lock);
10003 ib_dispatch_event(&event);
10008 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10014 case HFI1_IB_CFG_LIDLMC:
10017 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10019 * The VL Arbitrator high limit is sent in units of 4k
10020 * bytes, while HFI stores it in units of 64 bytes.
10023 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10024 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10025 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10027 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10028 /* HFI only supports POLL as the default link down state */
10029 if (val != HLS_DN_POLL)
10032 case HFI1_IB_CFG_OP_VLS:
10033 if (ppd->vls_operational != val) {
10034 ppd->vls_operational = val;
10038 ret = sdma_map_init(
10046 * For link width, link width downgrade, and speed enable, always AND
10047 * the setting with what is actually supported. This has two benefits.
10048 * First, enabled can't have unsupported values, no matter what the
10049 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10050 * "fill in with your supported value" have all the bits in the
10051 * field set, so simply ANDing with supported has the desired result.
10053 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10054 ppd->link_width_enabled = val & ppd->link_width_supported;
10056 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10057 ppd->link_width_downgrade_enabled =
10058 val & ppd->link_width_downgrade_supported;
10060 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10061 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10063 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10065 * HFI does not follow IB specs, save this value
10066 * so we can report it, if asked.
10068 ppd->overrun_threshold = val;
10070 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10072 * HFI does not follow IB specs, save this value
10073 * so we can report it, if asked.
10075 ppd->phy_error_threshold = val;
10078 case HFI1_IB_CFG_MTU:
10079 set_send_length(ppd);
10082 case HFI1_IB_CFG_PKEYS:
10083 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10084 set_partition_keys(ppd);
10088 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10089 dd_dev_info(ppd->dd,
10090 "%s: which %s, val 0x%x: not implemented\n",
10091 __func__, ib_cfg_name(which), val);
10097 /* begin functions related to vl arbitration table caching */
10098 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10102 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10103 VL_ARB_LOW_PRIO_TABLE_SIZE);
10104 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10105 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10108 * Note that we always return values directly from the
10109 * 'vl_arb_cache' (and do no CSR reads) in response to a
10110 * 'Get(VLArbTable)'. This is obviously correct after a
10111 * 'Set(VLArbTable)', since the cache will then be up to
10112 * date. But it's also correct prior to any 'Set(VLArbTable)'
10113 * since then both the cache, and the relevant h/w registers
10117 for (i = 0; i < MAX_PRIO_TABLE; i++)
10118 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10122 * vl_arb_lock_cache
10124 * All other vl_arb_* functions should be called only after locking
10127 static inline struct vl_arb_cache *
10128 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10130 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10132 spin_lock(&ppd->vl_arb_cache[idx].lock);
10133 return &ppd->vl_arb_cache[idx];
10136 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10138 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10141 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10142 struct ib_vl_weight_elem *vl)
10144 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10147 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10148 struct ib_vl_weight_elem *vl)
10150 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10153 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10154 struct ib_vl_weight_elem *vl)
10156 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10158 /* end functions related to vl arbitration table caching */
10160 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10161 u32 size, struct ib_vl_weight_elem *vl)
10163 struct hfi1_devdata *dd = ppd->dd;
10165 unsigned int i, is_up = 0;
10166 int drain, ret = 0;
10168 mutex_lock(&ppd->hls_lock);
10170 if (ppd->host_link_state & HLS_UP)
10173 drain = !is_ax(dd) && is_up;
10177 * Before adjusting VL arbitration weights, empty per-VL
10178 * FIFOs, otherwise a packet whose VL weight is being
10179 * set to 0 could get stuck in a FIFO with no chance to
10182 ret = stop_drain_data_vls(dd);
10187 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10192 for (i = 0; i < size; i++, vl++) {
10194 * NOTE: The low priority shift and mask are used here, but
10195 * they are the same for both the low and high registers.
10197 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10198 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10199 | (((u64)vl->weight
10200 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10201 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10202 write_csr(dd, target + (i * 8), reg);
10204 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10207 open_fill_data_vls(dd); /* reopen all VLs */
10210 mutex_unlock(&ppd->hls_lock);
10216 * Read one credit merge VL register.
10218 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10219 struct vl_limit *vll)
10221 u64 reg = read_csr(dd, csr);
10223 vll->dedicated = cpu_to_be16(
10224 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10225 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10226 vll->shared = cpu_to_be16(
10227 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10228 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10232 * Read the current credit merge limits.
10234 static int get_buffer_control(struct hfi1_devdata *dd,
10235 struct buffer_control *bc, u16 *overall_limit)
10240 /* not all entries are filled in */
10241 memset(bc, 0, sizeof(*bc));
10243 /* OPA and HFI have a 1-1 mapping */
10244 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10245 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10247 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10248 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10250 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10251 bc->overall_shared_limit = cpu_to_be16(
10252 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10253 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10255 *overall_limit = (reg
10256 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10257 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10258 return sizeof(struct buffer_control);
10261 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10266 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10267 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10268 for (i = 0; i < sizeof(u64); i++) {
10269 u8 byte = *(((u8 *)®) + i);
10271 dp->vlnt[2 * i] = byte & 0xf;
10272 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10275 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10276 for (i = 0; i < sizeof(u64); i++) {
10277 u8 byte = *(((u8 *)®) + i);
10279 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10280 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10282 return sizeof(struct sc2vlnt);
10285 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10286 struct ib_vl_weight_elem *vl)
10290 for (i = 0; i < nelems; i++, vl++) {
10296 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10298 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10300 0, dp->vlnt[0] & 0xf,
10301 1, dp->vlnt[1] & 0xf,
10302 2, dp->vlnt[2] & 0xf,
10303 3, dp->vlnt[3] & 0xf,
10304 4, dp->vlnt[4] & 0xf,
10305 5, dp->vlnt[5] & 0xf,
10306 6, dp->vlnt[6] & 0xf,
10307 7, dp->vlnt[7] & 0xf,
10308 8, dp->vlnt[8] & 0xf,
10309 9, dp->vlnt[9] & 0xf,
10310 10, dp->vlnt[10] & 0xf,
10311 11, dp->vlnt[11] & 0xf,
10312 12, dp->vlnt[12] & 0xf,
10313 13, dp->vlnt[13] & 0xf,
10314 14, dp->vlnt[14] & 0xf,
10315 15, dp->vlnt[15] & 0xf));
10316 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10317 DC_SC_VL_VAL(31_16,
10318 16, dp->vlnt[16] & 0xf,
10319 17, dp->vlnt[17] & 0xf,
10320 18, dp->vlnt[18] & 0xf,
10321 19, dp->vlnt[19] & 0xf,
10322 20, dp->vlnt[20] & 0xf,
10323 21, dp->vlnt[21] & 0xf,
10324 22, dp->vlnt[22] & 0xf,
10325 23, dp->vlnt[23] & 0xf,
10326 24, dp->vlnt[24] & 0xf,
10327 25, dp->vlnt[25] & 0xf,
10328 26, dp->vlnt[26] & 0xf,
10329 27, dp->vlnt[27] & 0xf,
10330 28, dp->vlnt[28] & 0xf,
10331 29, dp->vlnt[29] & 0xf,
10332 30, dp->vlnt[30] & 0xf,
10333 31, dp->vlnt[31] & 0xf));
10336 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10340 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10341 what, (int)limit, idx);
10344 /* change only the shared limit portion of SendCmGLobalCredit */
10345 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10349 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10350 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10351 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10352 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10355 /* change only the total credit limit portion of SendCmGLobalCredit */
10356 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10360 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10361 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10362 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10363 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10366 /* set the given per-VL shared limit */
10367 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10372 if (vl < TXE_NUM_DATA_VL)
10373 addr = SEND_CM_CREDIT_VL + (8 * vl);
10375 addr = SEND_CM_CREDIT_VL15;
10377 reg = read_csr(dd, addr);
10378 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10379 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10380 write_csr(dd, addr, reg);
10383 /* set the given per-VL dedicated limit */
10384 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10389 if (vl < TXE_NUM_DATA_VL)
10390 addr = SEND_CM_CREDIT_VL + (8 * vl);
10392 addr = SEND_CM_CREDIT_VL15;
10394 reg = read_csr(dd, addr);
10395 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10396 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10397 write_csr(dd, addr, reg);
10400 /* spin until the given per-VL status mask bits clear */
10401 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10404 unsigned long timeout;
10407 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10409 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10412 return; /* success */
10413 if (time_after(jiffies, timeout))
10414 break; /* timed out */
10419 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10420 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10422 * If this occurs, it is likely there was a credit loss on the link.
10423 * The only recovery from that is a link bounce.
10426 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10430 * The number of credits on the VLs may be changed while everything
10431 * is "live", but the following algorithm must be followed due to
10432 * how the hardware is actually implemented. In particular,
10433 * Return_Credit_Status[] is the only correct status check.
10435 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10436 * set Global_Shared_Credit_Limit = 0
10438 * mask0 = all VLs that are changing either dedicated or shared limits
10439 * set Shared_Limit[mask0] = 0
10440 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10441 * if (changing any dedicated limit)
10442 * mask1 = all VLs that are lowering dedicated limits
10443 * lower Dedicated_Limit[mask1]
10444 * spin until Return_Credit_Status[mask1] == 0
10445 * raise Dedicated_Limits
10446 * raise Shared_Limits
10447 * raise Global_Shared_Credit_Limit
10449 * lower = if the new limit is lower, set the limit to the new value
10450 * raise = if the new limit is higher than the current value (may be changed
10451 * earlier in the algorithm), set the new limit to the new value
10453 static int set_buffer_control(struct hfi1_devdata *dd,
10454 struct buffer_control *new_bc)
10456 u64 changing_mask, ld_mask, stat_mask;
10458 int i, use_all_mask;
10459 int this_shared_changing;
10461 * A0: add the variable any_shared_limit_changing below and in the
10462 * algorithm above. If removing A0 support, it can be removed.
10464 int any_shared_limit_changing;
10465 struct buffer_control cur_bc;
10466 u8 changing[OPA_MAX_VLS];
10467 u8 lowering_dedicated[OPA_MAX_VLS];
10470 const u64 all_mask =
10471 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10472 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10473 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10474 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10475 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10476 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10477 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10478 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10479 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10481 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10482 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
10485 /* find the new total credits, do sanity check on unused VLs */
10486 for (i = 0; i < OPA_MAX_VLS; i++) {
10488 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10491 nonzero_msg(dd, i, "dedicated",
10492 be16_to_cpu(new_bc->vl[i].dedicated));
10493 nonzero_msg(dd, i, "shared",
10494 be16_to_cpu(new_bc->vl[i].shared));
10495 new_bc->vl[i].dedicated = 0;
10496 new_bc->vl[i].shared = 0;
10498 new_total += be16_to_cpu(new_bc->overall_shared_limit);
10500 /* fetch the current values */
10501 get_buffer_control(dd, &cur_bc, &cur_total);
10504 * Create the masks we will use.
10506 memset(changing, 0, sizeof(changing));
10507 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10508 /* NOTE: Assumes that the individual VL bits are adjacent and in
10509 increasing order */
10511 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10515 any_shared_limit_changing = 0;
10516 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10519 this_shared_changing = new_bc->vl[i].shared
10520 != cur_bc.vl[i].shared;
10521 if (this_shared_changing)
10522 any_shared_limit_changing = 1;
10523 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10524 || this_shared_changing) {
10526 changing_mask |= stat_mask;
10529 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10530 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10531 lowering_dedicated[i] = 1;
10532 ld_mask |= stat_mask;
10536 /* bracket the credit change with a total adjustment */
10537 if (new_total > cur_total)
10538 set_global_limit(dd, new_total);
10541 * Start the credit change algorithm.
10544 if ((be16_to_cpu(new_bc->overall_shared_limit) <
10545 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10546 (is_ax(dd) && any_shared_limit_changing)) {
10547 set_global_shared(dd, 0);
10548 cur_bc.overall_shared_limit = 0;
10552 for (i = 0; i < NUM_USABLE_VLS; i++) {
10557 set_vl_shared(dd, i, 0);
10558 cur_bc.vl[i].shared = 0;
10562 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10565 if (change_count > 0) {
10566 for (i = 0; i < NUM_USABLE_VLS; i++) {
10570 if (lowering_dedicated[i]) {
10571 set_vl_dedicated(dd, i,
10572 be16_to_cpu(new_bc->vl[i].dedicated));
10573 cur_bc.vl[i].dedicated =
10574 new_bc->vl[i].dedicated;
10578 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10580 /* now raise all dedicated that are going up */
10581 for (i = 0; i < NUM_USABLE_VLS; i++) {
10585 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10586 be16_to_cpu(cur_bc.vl[i].dedicated))
10587 set_vl_dedicated(dd, i,
10588 be16_to_cpu(new_bc->vl[i].dedicated));
10592 /* next raise all shared that are going up */
10593 for (i = 0; i < NUM_USABLE_VLS; i++) {
10597 if (be16_to_cpu(new_bc->vl[i].shared) >
10598 be16_to_cpu(cur_bc.vl[i].shared))
10599 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10602 /* finally raise the global shared */
10603 if (be16_to_cpu(new_bc->overall_shared_limit) >
10604 be16_to_cpu(cur_bc.overall_shared_limit))
10605 set_global_shared(dd,
10606 be16_to_cpu(new_bc->overall_shared_limit));
10608 /* bracket the credit change with a total adjustment */
10609 if (new_total < cur_total)
10610 set_global_limit(dd, new_total);
10615 * Read the given fabric manager table. Return the size of the
10616 * table (in bytes) on success, and a negative error code on
10619 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10623 struct vl_arb_cache *vlc;
10626 case FM_TBL_VL_HIGH_ARB:
10629 * OPA specifies 128 elements (of 2 bytes each), though
10630 * HFI supports only 16 elements in h/w.
10632 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10633 vl_arb_get_cache(vlc, t);
10634 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10636 case FM_TBL_VL_LOW_ARB:
10639 * OPA specifies 128 elements (of 2 bytes each), though
10640 * HFI supports only 16 elements in h/w.
10642 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10643 vl_arb_get_cache(vlc, t);
10644 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10646 case FM_TBL_BUFFER_CONTROL:
10647 size = get_buffer_control(ppd->dd, t, NULL);
10649 case FM_TBL_SC2VLNT:
10650 size = get_sc2vlnt(ppd->dd, t);
10652 case FM_TBL_VL_PREEMPT_ELEMS:
10654 /* OPA specifies 128 elements, of 2 bytes each */
10655 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10657 case FM_TBL_VL_PREEMPT_MATRIX:
10660 * OPA specifies that this is the same size as the VL
10661 * arbitration tables (i.e., 256 bytes).
10671 * Write the given fabric manager table.
10673 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10676 struct vl_arb_cache *vlc;
10679 case FM_TBL_VL_HIGH_ARB:
10680 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10681 if (vl_arb_match_cache(vlc, t)) {
10682 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10685 vl_arb_set_cache(vlc, t);
10686 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10687 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10688 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10690 case FM_TBL_VL_LOW_ARB:
10691 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10692 if (vl_arb_match_cache(vlc, t)) {
10693 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10696 vl_arb_set_cache(vlc, t);
10697 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10698 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10699 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10701 case FM_TBL_BUFFER_CONTROL:
10702 ret = set_buffer_control(ppd->dd, t);
10704 case FM_TBL_SC2VLNT:
10705 set_sc2vlnt(ppd->dd, t);
10714 * Disable all data VLs.
10716 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10718 static int disable_data_vls(struct hfi1_devdata *dd)
10723 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10729 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10730 * Just re-enables all data VLs (the "fill" part happens
10731 * automatically - the name was chosen for symmetry with
10732 * stop_drain_data_vls()).
10734 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10736 int open_fill_data_vls(struct hfi1_devdata *dd)
10741 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10747 * drain_data_vls() - assumes that disable_data_vls() has been called,
10748 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10749 * engines to drop to 0.
10751 static void drain_data_vls(struct hfi1_devdata *dd)
10755 pause_for_credit_return(dd);
10759 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10761 * Use open_fill_data_vls() to resume using data VLs. This pair is
10762 * meant to be used like this:
10764 * stop_drain_data_vls(dd);
10765 * // do things with per-VL resources
10766 * open_fill_data_vls(dd);
10768 int stop_drain_data_vls(struct hfi1_devdata *dd)
10772 ret = disable_data_vls(dd);
10774 drain_data_vls(dd);
10780 * Convert a nanosecond time to a cclock count. No matter how slow
10781 * the cclock, a non-zero ns will always have a non-zero result.
10783 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10787 if (dd->icode == ICODE_FPGA_EMULATION)
10788 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10789 else /* simulation pretends to be ASIC */
10790 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10791 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10797 * Convert a cclock count to nanoseconds. Not matter how slow
10798 * the cclock, a non-zero cclocks will always have a non-zero result.
10800 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10804 if (dd->icode == ICODE_FPGA_EMULATION)
10805 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10806 else /* simulation pretends to be ASIC */
10807 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10808 if (cclocks && !ns)
10814 * Dynamically adjust the receive interrupt timeout for a context based on
10815 * incoming packet rate.
10817 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10819 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10821 struct hfi1_devdata *dd = rcd->dd;
10822 u32 timeout = rcd->rcvavail_timeout;
10825 * This algorithm doubles or halves the timeout depending on whether
10826 * the number of packets received in this interrupt were less than or
10827 * greater equal the interrupt count.
10829 * The calculations below do not allow a steady state to be achieved.
10830 * Only at the endpoints it is possible to have an unchanging
10833 if (npkts < rcv_intr_count) {
10835 * Not enough packets arrived before the timeout, adjust
10836 * timeout downward.
10838 if (timeout < 2) /* already at minimum? */
10843 * More than enough packets arrived before the timeout, adjust
10846 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10848 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10851 rcd->rcvavail_timeout = timeout;
10852 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10853 been verified to be in range */
10854 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10855 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10858 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10859 u32 intr_adjust, u32 npkts)
10861 struct hfi1_devdata *dd = rcd->dd;
10863 u32 ctxt = rcd->ctxt;
10866 * Need to write timeout register before updating RcvHdrHead to ensure
10867 * that a new value is used when the HW decides to restart counting.
10870 adjust_rcv_timeout(rcd, npkts);
10872 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10873 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10874 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10877 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10878 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10879 << RCV_HDR_HEAD_HEAD_SHIFT);
10880 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10884 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10888 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10889 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10891 if (rcd->rcvhdrtail_kvaddr)
10892 tail = get_rcvhdrtail(rcd);
10894 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10896 return head == tail;
10900 * Context Control and Receive Array encoding for buffer size:
10909 * 0x8 512 KB (Receive Array only)
10910 * 0x9 1 MB (Receive Array only)
10911 * 0xa 2 MB (Receive Array only)
10913 * 0xB-0xF - reserved (Receive Array only)
10916 * This routine assumes that the value has already been sanity checked.
10918 static u32 encoded_size(u32 size)
10921 case 4*1024: return 0x1;
10922 case 8*1024: return 0x2;
10923 case 16*1024: return 0x3;
10924 case 32*1024: return 0x4;
10925 case 64*1024: return 0x5;
10926 case 128*1024: return 0x6;
10927 case 256*1024: return 0x7;
10928 case 512*1024: return 0x8;
10929 case 1*1024*1024: return 0x9;
10930 case 2*1024*1024: return 0xa;
10932 return 0x1; /* if invalid, go with the minimum size */
10935 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
10937 struct hfi1_ctxtdata *rcd;
10939 int did_enable = 0;
10941 rcd = dd->rcd[ctxt];
10945 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
10947 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
10948 /* if the context already enabled, don't do the extra steps */
10949 if ((op & HFI1_RCVCTRL_CTXT_ENB)
10950 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
10951 /* reset the tail and hdr addresses, and sequence count */
10952 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
10953 rcd->rcvhdrq_phys);
10954 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
10955 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
10956 rcd->rcvhdrqtailaddr_phys);
10959 /* reset the cached receive header queue head value */
10963 * Zero the receive header queue so we don't get false
10964 * positives when checking the sequence number. The
10965 * sequence numbers could land exactly on the same spot.
10966 * E.g. a rcd restart before the receive header wrapped.
10968 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
10970 /* starting timeout */
10971 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
10973 /* enable the context */
10974 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
10976 /* clean the egr buffer size first */
10977 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
10978 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
10979 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
10980 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
10982 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
10983 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
10986 /* zero RcvEgrIndexHead */
10987 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
10989 /* set eager count and base index */
10990 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
10991 & RCV_EGR_CTRL_EGR_CNT_MASK)
10992 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
10993 (((rcd->eager_base >> RCV_SHIFT)
10994 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
10995 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
10996 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
10999 * Set TID (expected) count and base index.
11000 * rcd->expected_count is set to individual RcvArray entries,
11001 * not pairs, and the CSR takes a pair-count in groups of
11002 * four, so divide by 8.
11004 reg = (((rcd->expected_count >> RCV_SHIFT)
11005 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11006 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11007 (((rcd->expected_base >> RCV_SHIFT)
11008 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11009 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11010 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11011 if (ctxt == HFI1_CTRL_CTXT)
11012 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11014 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11015 write_csr(dd, RCV_VL15, 0);
11017 * When receive context is being disabled turn on tail
11018 * update with a dummy tail address and then disable
11021 if (dd->rcvhdrtail_dummy_physaddr) {
11022 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11023 dd->rcvhdrtail_dummy_physaddr);
11024 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11027 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11029 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11030 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11031 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11032 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11033 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11034 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11035 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11036 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11037 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11038 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11039 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11040 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11041 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11042 /* In one-packet-per-eager mode, the size comes from
11043 the RcvArray entry. */
11044 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11045 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11047 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11048 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11049 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11050 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11051 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11052 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11053 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11054 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11055 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11056 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11057 rcd->rcvctrl = rcvctrl;
11058 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11059 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11061 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11063 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11064 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11066 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11068 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11069 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11070 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11071 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11072 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11073 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11074 ctxt, reg, reg == 0 ? "not" : "still");
11080 * The interrupt timeout and count must be set after
11081 * the context is enabled to take effect.
11083 /* set interrupt timeout */
11084 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11085 (u64)rcd->rcvavail_timeout <<
11086 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11088 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11089 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11090 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11093 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11095 * If the context has been disabled and the Tail Update has
11096 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11097 * so it doesn't contain an address that is invalid.
11099 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11100 dd->rcvhdrtail_dummy_physaddr);
11103 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11110 ret = dd->cntrnameslen;
11112 dd_dev_err(dd, "read_cntrs does not support indexing");
11115 *namep = dd->cntrnames;
11117 const struct cntr_entry *entry;
11120 ret = (dd->ndevcntrs) * sizeof(u64);
11122 dd_dev_err(dd, "read_cntrs does not support indexing");
11126 /* Get the start of the block of counters */
11127 *cntrp = dd->cntrs;
11130 * Now go and fill in each counter in the block.
11132 for (i = 0; i < DEV_CNTR_LAST; i++) {
11133 entry = &dev_cntrs[i];
11134 hfi1_cdbg(CNTR, "reading %s", entry->name);
11135 if (entry->flags & CNTR_DISABLED) {
11137 hfi1_cdbg(CNTR, "\tDisabled\n");
11139 if (entry->flags & CNTR_VL) {
11140 hfi1_cdbg(CNTR, "\tPer VL\n");
11141 for (j = 0; j < C_VL_COUNT; j++) {
11142 val = entry->rw_cntr(entry,
11148 "\t\tRead 0x%llx for %d\n",
11150 dd->cntrs[entry->offset + j] =
11154 val = entry->rw_cntr(entry, dd,
11157 dd->cntrs[entry->offset] = val;
11158 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11167 * Used by sysfs to create files for hfi stats to read
11169 u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11170 char **namep, u64 **cntrp)
11176 ret = dd->portcntrnameslen;
11178 dd_dev_err(dd, "index not supported");
11181 *namep = dd->portcntrnames;
11183 const struct cntr_entry *entry;
11184 struct hfi1_pportdata *ppd;
11187 ret = (dd->nportcntrs) * sizeof(u64);
11189 dd_dev_err(dd, "indexing not supported");
11192 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11193 *cntrp = ppd->cntrs;
11195 for (i = 0; i < PORT_CNTR_LAST; i++) {
11196 entry = &port_cntrs[i];
11197 hfi1_cdbg(CNTR, "reading %s", entry->name);
11198 if (entry->flags & CNTR_DISABLED) {
11200 hfi1_cdbg(CNTR, "\tDisabled\n");
11204 if (entry->flags & CNTR_VL) {
11205 hfi1_cdbg(CNTR, "\tPer VL");
11206 for (j = 0; j < C_VL_COUNT; j++) {
11207 val = entry->rw_cntr(entry, ppd, j,
11212 "\t\tRead 0x%llx for %d",
11214 ppd->cntrs[entry->offset + j] = val;
11217 val = entry->rw_cntr(entry, ppd,
11221 ppd->cntrs[entry->offset] = val;
11222 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11229 static void free_cntrs(struct hfi1_devdata *dd)
11231 struct hfi1_pportdata *ppd;
11234 if (dd->synth_stats_timer.data)
11235 del_timer_sync(&dd->synth_stats_timer);
11236 dd->synth_stats_timer.data = 0;
11237 ppd = (struct hfi1_pportdata *)(dd + 1);
11238 for (i = 0; i < dd->num_pports; i++, ppd++) {
11240 kfree(ppd->scntrs);
11241 free_percpu(ppd->ibport_data.rc_acks);
11242 free_percpu(ppd->ibport_data.rc_qacks);
11243 free_percpu(ppd->ibport_data.rc_delayed_comp);
11245 ppd->scntrs = NULL;
11246 ppd->ibport_data.rc_acks = NULL;
11247 ppd->ibport_data.rc_qacks = NULL;
11248 ppd->ibport_data.rc_delayed_comp = NULL;
11250 kfree(dd->portcntrnames);
11251 dd->portcntrnames = NULL;
11256 kfree(dd->cntrnames);
11257 dd->cntrnames = NULL;
11260 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11261 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11263 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11264 u64 *psval, void *context, int vl)
11269 if (entry->flags & CNTR_DISABLED) {
11270 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11274 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11276 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11278 /* If its a synthetic counter there is more work we need to do */
11279 if (entry->flags & CNTR_SYNTH) {
11280 if (sval == CNTR_MAX) {
11281 /* No need to read already saturated */
11285 if (entry->flags & CNTR_32BIT) {
11286 /* 32bit counters can wrap multiple times */
11287 u64 upper = sval >> 32;
11288 u64 lower = (sval << 32) >> 32;
11290 if (lower > val) { /* hw wrapped */
11291 if (upper == CNTR_32BIT_MAX)
11297 if (val != CNTR_MAX)
11298 val = (upper << 32) | val;
11301 /* If we rolled we are saturated */
11302 if ((val < sval) || (val > CNTR_MAX))
11309 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11314 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11315 struct cntr_entry *entry,
11316 u64 *psval, void *context, int vl, u64 data)
11320 if (entry->flags & CNTR_DISABLED) {
11321 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11325 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11327 if (entry->flags & CNTR_SYNTH) {
11329 if (entry->flags & CNTR_32BIT) {
11330 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11331 (data << 32) >> 32);
11332 val = data; /* return the full 64bit value */
11334 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11338 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11343 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11348 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11350 struct cntr_entry *entry;
11353 entry = &dev_cntrs[index];
11354 sval = dd->scntrs + entry->offset;
11356 if (vl != CNTR_INVALID_VL)
11359 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11362 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11364 struct cntr_entry *entry;
11367 entry = &dev_cntrs[index];
11368 sval = dd->scntrs + entry->offset;
11370 if (vl != CNTR_INVALID_VL)
11373 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11376 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11378 struct cntr_entry *entry;
11381 entry = &port_cntrs[index];
11382 sval = ppd->scntrs + entry->offset;
11384 if (vl != CNTR_INVALID_VL)
11387 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11388 (index <= C_RCV_HDR_OVF_LAST)) {
11389 /* We do not want to bother for disabled contexts */
11393 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11396 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11398 struct cntr_entry *entry;
11401 entry = &port_cntrs[index];
11402 sval = ppd->scntrs + entry->offset;
11404 if (vl != CNTR_INVALID_VL)
11407 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11408 (index <= C_RCV_HDR_OVF_LAST)) {
11409 /* We do not want to bother for disabled contexts */
11413 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11416 static void update_synth_timer(unsigned long opaque)
11423 struct hfi1_pportdata *ppd;
11424 struct cntr_entry *entry;
11426 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11429 * Rather than keep beating on the CSRs pick a minimal set that we can
11430 * check to watch for potential roll over. We can do this by looking at
11431 * the number of flits sent/recv. If the total flits exceeds 32bits then
11432 * we have to iterate all the counters and update.
11434 entry = &dev_cntrs[C_DC_RCV_FLITS];
11435 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11437 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11438 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11442 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11443 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11445 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11447 * May not be strictly necessary to update but it won't hurt and
11448 * simplifies the logic here.
11451 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11454 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11456 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11457 total_flits, (u64)CNTR_32BIT_MAX);
11458 if (total_flits >= CNTR_32BIT_MAX) {
11459 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11466 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11467 for (i = 0; i < DEV_CNTR_LAST; i++) {
11468 entry = &dev_cntrs[i];
11469 if (entry->flags & CNTR_VL) {
11470 for (vl = 0; vl < C_VL_COUNT; vl++)
11471 read_dev_cntr(dd, i, vl);
11473 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11476 ppd = (struct hfi1_pportdata *)(dd + 1);
11477 for (i = 0; i < dd->num_pports; i++, ppd++) {
11478 for (j = 0; j < PORT_CNTR_LAST; j++) {
11479 entry = &port_cntrs[j];
11480 if (entry->flags & CNTR_VL) {
11481 for (vl = 0; vl < C_VL_COUNT; vl++)
11482 read_port_cntr(ppd, j, vl);
11484 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11490 * We want the value in the register. The goal is to keep track
11491 * of the number of "ticks" not the counter value. In other
11492 * words if the register rolls we want to notice it and go ahead
11493 * and force an update.
11495 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11496 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11499 entry = &dev_cntrs[C_DC_RCV_FLITS];
11500 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11503 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11504 dd->unit, dd->last_tx, dd->last_rx);
11507 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11510 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11513 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11514 static int init_cntrs(struct hfi1_devdata *dd)
11516 int i, rcv_ctxts, index, j;
11519 char name[C_MAX_NAME];
11520 struct hfi1_pportdata *ppd;
11522 /* set up the stats timer; the add_timer is done at the end */
11523 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11524 (unsigned long)dd);
11526 /***********************/
11527 /* per device counters */
11528 /***********************/
11530 /* size names and determine how many we have*/
11535 for (i = 0; i < DEV_CNTR_LAST; i++) {
11536 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11537 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11538 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11542 if (dev_cntrs[i].flags & CNTR_VL) {
11543 hfi1_dbg_early("\tProcessing VL cntr\n");
11544 dev_cntrs[i].offset = index;
11545 for (j = 0; j < C_VL_COUNT; j++) {
11546 memset(name, '\0', C_MAX_NAME);
11547 snprintf(name, C_MAX_NAME, "%s%d",
11550 sz += strlen(name);
11552 hfi1_dbg_early("\t\t%s\n", name);
11557 /* +1 for newline */
11558 sz += strlen(dev_cntrs[i].name) + 1;
11560 dev_cntrs[i].offset = index;
11562 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11566 /* allocate space for the counter values */
11567 dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11571 dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11576 /* allocate space for the counter names */
11577 dd->cntrnameslen = sz;
11578 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11579 if (!dd->cntrnames)
11582 /* fill in the names */
11583 for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) {
11584 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11587 if (dev_cntrs[i].flags & CNTR_VL) {
11588 for (j = 0; j < C_VL_COUNT; j++) {
11589 memset(name, '\0', C_MAX_NAME);
11590 snprintf(name, C_MAX_NAME, "%s%d",
11593 memcpy(p, name, strlen(name));
11598 memcpy(p, dev_cntrs[i].name,
11599 strlen(dev_cntrs[i].name));
11600 p += strlen(dev_cntrs[i].name);
11607 /*********************/
11608 /* per port counters */
11609 /*********************/
11612 * Go through the counters for the overflows and disable the ones we
11613 * don't need. This varies based on platform so we need to do it
11614 * dynamically here.
11616 rcv_ctxts = dd->num_rcv_contexts;
11617 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11618 i <= C_RCV_HDR_OVF_LAST; i++) {
11619 port_cntrs[i].flags |= CNTR_DISABLED;
11622 /* size port counter names and determine how many we have*/
11624 dd->nportcntrs = 0;
11625 for (i = 0; i < PORT_CNTR_LAST; i++) {
11626 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11627 if (port_cntrs[i].flags & CNTR_DISABLED) {
11628 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11632 if (port_cntrs[i].flags & CNTR_VL) {
11633 hfi1_dbg_early("\tProcessing VL cntr\n");
11634 port_cntrs[i].offset = dd->nportcntrs;
11635 for (j = 0; j < C_VL_COUNT; j++) {
11636 memset(name, '\0', C_MAX_NAME);
11637 snprintf(name, C_MAX_NAME, "%s%d",
11638 port_cntrs[i].name,
11640 sz += strlen(name);
11642 hfi1_dbg_early("\t\t%s\n", name);
11646 /* +1 for newline */
11647 sz += strlen(port_cntrs[i].name) + 1;
11648 port_cntrs[i].offset = dd->nportcntrs;
11650 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11654 /* allocate space for the counter names */
11655 dd->portcntrnameslen = sz;
11656 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11657 if (!dd->portcntrnames)
11660 /* fill in port cntr names */
11661 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11662 if (port_cntrs[i].flags & CNTR_DISABLED)
11665 if (port_cntrs[i].flags & CNTR_VL) {
11666 for (j = 0; j < C_VL_COUNT; j++) {
11667 memset(name, '\0', C_MAX_NAME);
11668 snprintf(name, C_MAX_NAME, "%s%d",
11669 port_cntrs[i].name,
11671 memcpy(p, name, strlen(name));
11676 memcpy(p, port_cntrs[i].name,
11677 strlen(port_cntrs[i].name));
11678 p += strlen(port_cntrs[i].name);
11683 /* allocate per port storage for counter values */
11684 ppd = (struct hfi1_pportdata *)(dd + 1);
11685 for (i = 0; i < dd->num_pports; i++, ppd++) {
11686 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11690 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11695 /* CPU counters need to be allocated and zeroed */
11696 if (init_cpu_counters(dd))
11699 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11707 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11709 switch (chip_lstate) {
11712 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11716 return IB_PORT_DOWN;
11718 return IB_PORT_INIT;
11720 return IB_PORT_ARMED;
11721 case LSTATE_ACTIVE:
11722 return IB_PORT_ACTIVE;
11726 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11728 /* look at the HFI meta-states only */
11729 switch (chip_pstate & 0xf0) {
11731 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11735 return IB_PORTPHYSSTATE_DISABLED;
11737 return OPA_PORTPHYSSTATE_OFFLINE;
11739 return IB_PORTPHYSSTATE_POLLING;
11740 case PLS_CONFIGPHY:
11741 return IB_PORTPHYSSTATE_TRAINING;
11743 return IB_PORTPHYSSTATE_LINKUP;
11745 return IB_PORTPHYSSTATE_PHY_TEST;
11749 /* return the OPA port logical state name */
11750 const char *opa_lstate_name(u32 lstate)
11752 static const char * const port_logical_names[] = {
11758 "PORT_ACTIVE_DEFER",
11760 if (lstate < ARRAY_SIZE(port_logical_names))
11761 return port_logical_names[lstate];
11765 /* return the OPA port physical state name */
11766 const char *opa_pstate_name(u32 pstate)
11768 static const char * const port_physical_names[] = {
11775 "PHYS_LINK_ERR_RECOVER",
11782 if (pstate < ARRAY_SIZE(port_physical_names))
11783 return port_physical_names[pstate];
11788 * Read the hardware link state and set the driver's cached value of it.
11789 * Return the (new) current value.
11791 u32 get_logical_state(struct hfi1_pportdata *ppd)
11795 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11796 if (new_state != ppd->lstate) {
11797 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11798 opa_lstate_name(new_state), new_state);
11799 ppd->lstate = new_state;
11802 * Set port status flags in the page mapped into userspace
11803 * memory. Do it here to ensure a reliable state - this is
11804 * the only function called by all state handling code.
11805 * Always set the flags due to the fact that the cache value
11806 * might have been changed explicitly outside of this
11809 if (ppd->statusp) {
11810 switch (ppd->lstate) {
11813 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11814 HFI1_STATUS_IB_READY);
11816 case IB_PORT_ARMED:
11817 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11819 case IB_PORT_ACTIVE:
11820 *ppd->statusp |= HFI1_STATUS_IB_READY;
11824 return ppd->lstate;
11828 * wait_logical_linkstate - wait for an IB link state change to occur
11829 * @ppd: port device
11830 * @state: the state to wait for
11831 * @msecs: the number of milliseconds to wait
11833 * Wait up to msecs milliseconds for IB link state change to occur.
11834 * For now, take the easy polling route.
11835 * Returns 0 if state reached, otherwise -ETIMEDOUT.
11837 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11840 unsigned long timeout;
11842 timeout = jiffies + msecs_to_jiffies(msecs);
11844 if (get_logical_state(ppd) == state)
11846 if (time_after(jiffies, timeout))
11850 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11855 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11857 static u32 remembered_state = 0xff;
11861 pstate = read_physical_state(ppd->dd);
11862 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11863 if (remembered_state != ib_pstate) {
11864 dd_dev_info(ppd->dd,
11865 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
11866 __func__, opa_pstate_name(ib_pstate), ib_pstate,
11868 remembered_state = ib_pstate;
11874 * Read/modify/write ASIC_QSFP register bits as selected by mask
11875 * data: 0 or 1 in the positions depending on what needs to be written
11876 * dir: 0 for read, 1 for write
11877 * mask: select by setting
11881 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
11884 u64 qsfp_oe, target_oe;
11886 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
11888 /* We are writing register bits, so lock access */
11892 qsfp_oe = read_csr(dd, target_oe);
11893 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
11894 write_csr(dd, target_oe, qsfp_oe);
11896 /* We are exclusively reading bits here, but it is unlikely
11897 * we'll get valid data when we set the direction of the pin
11898 * in the same call, so read should call this function again
11899 * to get valid data
11901 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
11904 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
11905 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11907 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
11908 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11910 int hfi1_init_ctxt(struct send_context *sc)
11913 struct hfi1_devdata *dd = sc->dd;
11915 u8 set = (sc->type == SC_USER ?
11916 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
11917 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
11918 reg = read_kctxt_csr(dd, sc->hw_context,
11919 SEND_CTXT_CHECK_ENABLE);
11921 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
11923 SET_STATIC_RATE_CONTROL_SMASK(reg);
11924 write_kctxt_csr(dd, sc->hw_context,
11925 SEND_CTXT_CHECK_ENABLE, reg);
11930 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
11935 if (dd->icode != ICODE_RTL_SILICON) {
11936 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11937 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
11941 reg = read_csr(dd, ASIC_STS_THERM);
11942 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
11943 ASIC_STS_THERM_CURR_TEMP_MASK);
11944 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
11945 ASIC_STS_THERM_LO_TEMP_MASK);
11946 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
11947 ASIC_STS_THERM_HI_TEMP_MASK);
11948 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
11949 ASIC_STS_THERM_CRIT_TEMP_MASK);
11950 /* triggers is a 3-bit value - 1 bit per trigger. */
11951 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
11956 /* ========================================================================= */
11959 * Enable/disable chip from delivering interrupts.
11961 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
11966 * In HFI, the mask needs to be 1 to allow interrupts.
11970 const int qsfp1_int_smask = QSFP1_INT % 64;
11971 const int qsfp2_int_smask = QSFP2_INT % 64;
11973 /* enable all interrupts */
11974 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11975 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
11978 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
11979 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
11980 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
11981 * the index of the appropriate CSR in the CCEIntMask CSR array
11983 cce_int_mask = read_csr(dd, CCE_INT_MASK +
11984 (8*(QSFP1_INT/64)));
11986 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
11987 write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
11990 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
11991 write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
11995 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11996 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12001 * Clear all interrupt sources on the chip.
12003 static void clear_all_interrupts(struct hfi1_devdata *dd)
12007 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12008 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12010 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12011 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12012 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12013 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12014 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12015 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12016 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12017 for (i = 0; i < dd->chip_send_contexts; i++)
12018 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12019 for (i = 0; i < dd->chip_sdma_engines; i++)
12020 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12022 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12023 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12024 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12027 /* Move to pcie.c? */
12028 static void disable_intx(struct pci_dev *pdev)
12033 static void clean_up_interrupts(struct hfi1_devdata *dd)
12037 /* remove irqs - must happen before disabling/turning off */
12038 if (dd->num_msix_entries) {
12040 struct hfi1_msix_entry *me = dd->msix_entries;
12042 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12043 if (me->arg == NULL) /* => no irq, no affinity */
12045 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12047 free_irq(me->msix.vector, me->arg);
12051 if (dd->requested_intx_irq) {
12052 free_irq(dd->pcidev->irq, dd);
12053 dd->requested_intx_irq = 0;
12057 /* turn off interrupts */
12058 if (dd->num_msix_entries) {
12060 pci_disable_msix(dd->pcidev);
12063 disable_intx(dd->pcidev);
12066 /* clean structures */
12067 for (i = 0; i < dd->num_msix_entries; i++)
12068 free_cpumask_var(dd->msix_entries[i].mask);
12069 kfree(dd->msix_entries);
12070 dd->msix_entries = NULL;
12071 dd->num_msix_entries = 0;
12075 * Remap the interrupt source from the general handler to the given MSI-X
12078 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12083 /* clear from the handled mask of the general interrupt */
12086 dd->gi_mask[m] &= ~((u64)1 << n);
12088 /* direct the chip source to the given MSI-X interrupt */
12091 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12092 reg &= ~((u64)0xff << (8*n));
12093 reg |= ((u64)msix_intr & 0xff) << (8*n);
12094 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12097 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12098 int engine, int msix_intr)
12101 * SDMA engine interrupt sources grouped by type, rather than
12102 * engine. Per-engine interrupts are as follows:
12107 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12109 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12111 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12115 static int request_intx_irq(struct hfi1_devdata *dd)
12119 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12121 ret = request_irq(dd->pcidev->irq, general_interrupt,
12122 IRQF_SHARED, dd->intx_name, dd);
12124 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12127 dd->requested_intx_irq = 1;
12131 static int request_msix_irqs(struct hfi1_devdata *dd)
12133 const struct cpumask *local_mask;
12134 cpumask_var_t def, rcv;
12135 bool def_ret, rcv_ret;
12136 int first_general, last_general;
12137 int first_sdma, last_sdma;
12138 int first_rx, last_rx;
12139 int first_cpu, curr_cpu;
12140 int rcv_cpu, sdma_cpu;
12141 int i, ret = 0, possible;
12144 /* calculate the ranges we are going to use */
12146 first_sdma = last_general = first_general + 1;
12147 first_rx = last_sdma = first_sdma + dd->num_sdma;
12148 last_rx = first_rx + dd->n_krcv_queues;
12151 * Interrupt affinity.
12153 * non-rcv avail gets a default mask that
12154 * starts as possible cpus with threads reset
12155 * and each rcv avail reset.
12157 * rcv avail gets node relative 1 wrapping back
12158 * to the node relative 1 as necessary.
12161 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12162 /* if first cpu is invalid, use NUMA 0 */
12163 if (cpumask_first(local_mask) >= nr_cpu_ids)
12164 local_mask = topology_core_cpumask(0);
12166 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12167 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12168 if (!def_ret || !rcv_ret)
12170 /* use local mask as default */
12171 cpumask_copy(def, local_mask);
12172 possible = cpumask_weight(def);
12173 /* disarm threads from default */
12174 ht = cpumask_weight(
12175 topology_sibling_cpumask(cpumask_first(local_mask)));
12176 for (i = possible/ht; i < possible; i++)
12177 cpumask_clear_cpu(i, def);
12178 /* def now has full cores on chosen node*/
12179 first_cpu = cpumask_first(def);
12180 if (nr_cpu_ids >= first_cpu)
12182 curr_cpu = first_cpu;
12184 /* One context is reserved as control context */
12185 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
12186 cpumask_clear_cpu(curr_cpu, def);
12187 cpumask_set_cpu(curr_cpu, rcv);
12188 curr_cpu = cpumask_next(curr_cpu, def);
12189 if (curr_cpu >= nr_cpu_ids)
12192 /* def mask has non-rcv, rcv has recv mask */
12193 rcv_cpu = cpumask_first(rcv);
12194 sdma_cpu = cpumask_first(def);
12197 * Sanity check - the code expects all SDMA chip source
12198 * interrupts to be in the same CSR, starting at bit 0. Verify
12199 * that this is true by checking the bit location of the start.
12201 BUILD_BUG_ON(IS_SDMA_START % 64);
12203 for (i = 0; i < dd->num_msix_entries; i++) {
12204 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12205 const char *err_info;
12206 irq_handler_t handler;
12207 irq_handler_t thread = NULL;
12210 struct hfi1_ctxtdata *rcd = NULL;
12211 struct sdma_engine *sde = NULL;
12213 /* obtain the arguments to request_irq */
12214 if (first_general <= i && i < last_general) {
12215 idx = i - first_general;
12216 handler = general_interrupt;
12218 snprintf(me->name, sizeof(me->name),
12219 DRIVER_NAME "_%d", dd->unit);
12220 err_info = "general";
12221 } else if (first_sdma <= i && i < last_sdma) {
12222 idx = i - first_sdma;
12223 sde = &dd->per_sdma[idx];
12224 handler = sdma_interrupt;
12226 snprintf(me->name, sizeof(me->name),
12227 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12229 remap_sdma_interrupts(dd, idx, i);
12230 } else if (first_rx <= i && i < last_rx) {
12231 idx = i - first_rx;
12232 rcd = dd->rcd[idx];
12233 /* no interrupt if no rcd */
12237 * Set the interrupt register and mask for this
12238 * context's interrupt.
12240 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12241 rcd->imask = ((u64)1) <<
12242 ((IS_RCVAVAIL_START+idx) % 64);
12243 handler = receive_context_interrupt;
12244 thread = receive_context_thread;
12246 snprintf(me->name, sizeof(me->name),
12247 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12248 err_info = "receive context";
12249 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12251 /* not in our expected range - complain, then
12254 "Unexpected extra MSI-X interrupt %d\n", i);
12257 /* no argument, no interrupt */
12260 /* make sure the name is terminated */
12261 me->name[sizeof(me->name)-1] = 0;
12263 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12267 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12268 err_info, me->msix.vector, idx, ret);
12272 * assign arg after request_irq call, so it will be
12277 if (!zalloc_cpumask_var(
12278 &dd->msix_entries[i].mask,
12281 if (handler == sdma_interrupt) {
12282 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12283 sde->this_idx, sdma_cpu);
12284 sde->cpu = sdma_cpu;
12285 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12286 sdma_cpu = cpumask_next(sdma_cpu, def);
12287 if (sdma_cpu >= nr_cpu_ids)
12288 sdma_cpu = cpumask_first(def);
12289 } else if (handler == receive_context_interrupt) {
12290 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12291 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12292 cpumask_first(def) : rcv_cpu);
12293 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12294 /* map to first default */
12295 cpumask_set_cpu(cpumask_first(def),
12296 dd->msix_entries[i].mask);
12298 cpumask_set_cpu(rcv_cpu,
12299 dd->msix_entries[i].mask);
12300 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12301 if (rcv_cpu >= nr_cpu_ids)
12302 rcv_cpu = cpumask_first(rcv);
12305 /* otherwise first def */
12306 dd_dev_info(dd, "%s cpu %d\n",
12307 err_info, cpumask_first(def));
12309 cpumask_first(def), dd->msix_entries[i].mask);
12311 irq_set_affinity_hint(
12312 dd->msix_entries[i].msix.vector,
12313 dd->msix_entries[i].mask);
12317 free_cpumask_var(def);
12318 free_cpumask_var(rcv);
12326 * Set the general handler to accept all interrupts, remap all
12327 * chip interrupts back to MSI-X 0.
12329 static void reset_interrupts(struct hfi1_devdata *dd)
12333 /* all interrupts handled by the general handler */
12334 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12335 dd->gi_mask[i] = ~(u64)0;
12337 /* all chip interrupts map to MSI-X 0 */
12338 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12339 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12342 static int set_up_interrupts(struct hfi1_devdata *dd)
12344 struct hfi1_msix_entry *entries;
12345 u32 total, request;
12347 int single_interrupt = 0; /* we expect to have all the interrupts */
12351 * 1 general, "slow path" interrupt (includes the SDMA engines
12352 * slow source, SDMACleanupDone)
12353 * N interrupts - one per used SDMA engine
12354 * M interrupt - one per kernel receive context
12356 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12358 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12363 /* 1-1 MSI-X entry assignment */
12364 for (i = 0; i < total; i++)
12365 entries[i].msix.entry = i;
12367 /* ask for MSI-X interrupts */
12369 request_msix(dd, &request, entries);
12371 if (request == 0) {
12373 /* dd->num_msix_entries already zero */
12375 single_interrupt = 1;
12376 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12379 dd->num_msix_entries = request;
12380 dd->msix_entries = entries;
12382 if (request != total) {
12383 /* using MSI-X, with reduced interrupts */
12386 "cannot handle reduced interrupt case, want %u, got %u\n",
12391 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12394 /* mask all interrupts */
12395 set_intr_state(dd, 0);
12396 /* clear all pending interrupts */
12397 clear_all_interrupts(dd);
12399 /* reset general handler mask, chip MSI-X mappings */
12400 reset_interrupts(dd);
12402 if (single_interrupt)
12403 ret = request_intx_irq(dd);
12405 ret = request_msix_irqs(dd);
12412 clean_up_interrupts(dd);
12417 * Set up context values in dd. Sets:
12419 * num_rcv_contexts - number of contexts being used
12420 * n_krcv_queues - number of kernel contexts
12421 * first_user_ctxt - first non-kernel context in array of contexts
12422 * freectxts - number of free user contexts
12423 * num_send_contexts - number of PIO send contexts being used
12425 static int set_up_context_variables(struct hfi1_devdata *dd)
12427 int num_kernel_contexts;
12428 int total_contexts;
12433 * Kernel contexts: (to be fixed later):
12434 * - min or 2 or 1 context/numa
12435 * - Context 0 - control context (VL15/multicast/error)
12436 * - Context 1 - default context
12440 * Don't count context 0 in n_krcvqs since
12441 * is isn't used for normal verbs traffic.
12443 * krcvqs will reflect number of kernel
12444 * receive contexts above 0.
12446 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12448 num_kernel_contexts = num_online_nodes();
12449 num_kernel_contexts =
12450 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12452 * Every kernel receive context needs an ACK send context.
12453 * one send context is allocated for each VL{0-7} and VL15
12455 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12457 "Reducing # kernel rcv contexts to: %d, from %d\n",
12458 (int)(dd->chip_send_contexts - num_vls - 1),
12459 (int)num_kernel_contexts);
12460 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12463 * User contexts: (to be fixed later)
12464 * - default to 1 user context per CPU if num_user_contexts is
12467 if (num_user_contexts < 0)
12468 num_user_contexts = num_online_cpus();
12470 total_contexts = num_kernel_contexts + num_user_contexts;
12473 * Adjust the counts given a global max.
12475 if (total_contexts > dd->chip_rcv_contexts) {
12477 "Reducing # user receive contexts to: %d, from %d\n",
12478 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12479 (int)num_user_contexts);
12480 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12482 total_contexts = num_kernel_contexts + num_user_contexts;
12485 /* the first N are kernel contexts, the rest are user contexts */
12486 dd->num_rcv_contexts = total_contexts;
12487 dd->n_krcv_queues = num_kernel_contexts;
12488 dd->first_user_ctxt = num_kernel_contexts;
12489 dd->freectxts = num_user_contexts;
12491 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12492 (int)dd->chip_rcv_contexts,
12493 (int)dd->num_rcv_contexts,
12494 (int)dd->n_krcv_queues,
12495 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12498 * Receive array allocation:
12499 * All RcvArray entries are divided into groups of 8. This
12500 * is required by the hardware and will speed up writes to
12501 * consecutive entries by using write-combining of the entire
12504 * The number of groups are evenly divided among all contexts.
12505 * any left over groups will be given to the first N user
12508 dd->rcv_entries.group_size = RCV_INCREMENT;
12509 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12510 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12511 dd->rcv_entries.nctxt_extra = ngroups -
12512 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12513 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12514 dd->rcv_entries.ngroups,
12515 dd->rcv_entries.nctxt_extra);
12516 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12517 MAX_EAGER_ENTRIES * 2) {
12518 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12519 dd->rcv_entries.group_size;
12521 "RcvArray group count too high, change to %u\n",
12522 dd->rcv_entries.ngroups);
12523 dd->rcv_entries.nctxt_extra = 0;
12526 * PIO send contexts
12528 ret = init_sc_pools_and_sizes(dd);
12529 if (ret >= 0) { /* success */
12530 dd->num_send_contexts = ret;
12533 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12534 dd->chip_send_contexts,
12535 dd->num_send_contexts,
12536 dd->sc_sizes[SC_KERNEL].count,
12537 dd->sc_sizes[SC_ACK].count,
12538 dd->sc_sizes[SC_USER].count);
12539 ret = 0; /* success */
12546 * Set the device/port partition key table. The MAD code
12547 * will ensure that, at least, the partial management
12548 * partition key is present in the table.
12550 static void set_partition_keys(struct hfi1_pportdata *ppd)
12552 struct hfi1_devdata *dd = ppd->dd;
12556 dd_dev_info(dd, "Setting partition keys\n");
12557 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12558 reg |= (ppd->pkeys[i] &
12559 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12561 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12562 /* Each register holds 4 PKey values. */
12563 if ((i % 4) == 3) {
12564 write_csr(dd, RCV_PARTITION_KEY +
12565 ((i - 3) * 2), reg);
12570 /* Always enable HW pkeys check when pkeys table is set */
12571 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12575 * These CSRs and memories are uninitialized on reset and must be
12576 * written before reading to set the ECC/parity bits.
12578 * NOTE: All user context CSRs that are not mmaped write-only
12579 * (e.g. the TID flows) must be initialized even if the driver never
12582 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12587 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12588 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12590 /* SendCtxtCreditReturnAddr */
12591 for (i = 0; i < dd->chip_send_contexts; i++)
12592 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12594 /* PIO Send buffers */
12595 /* SDMA Send buffers */
12596 /* These are not normally read, and (presently) have no method
12597 to be read, so are not pre-initialized */
12600 /* RcvHdrTailAddr */
12601 /* RcvTidFlowTable */
12602 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12603 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12604 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12605 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12606 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12610 for (i = 0; i < dd->chip_rcv_array_count; i++)
12611 write_csr(dd, RCV_ARRAY + (8*i),
12612 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12614 /* RcvQPMapTable */
12615 for (i = 0; i < 32; i++)
12616 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12620 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12622 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12625 unsigned long timeout;
12628 /* is the condition present? */
12629 reg = read_csr(dd, CCE_STATUS);
12630 if ((reg & status_bits) == 0)
12633 /* clear the condition */
12634 write_csr(dd, CCE_CTRL, ctrl_bits);
12636 /* wait for the condition to clear */
12637 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12639 reg = read_csr(dd, CCE_STATUS);
12640 if ((reg & status_bits) == 0)
12642 if (time_after(jiffies, timeout)) {
12644 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12645 status_bits, reg & status_bits);
12652 /* set CCE CSRs to chip reset defaults */
12653 static void reset_cce_csrs(struct hfi1_devdata *dd)
12657 /* CCE_REVISION read-only */
12658 /* CCE_REVISION2 read-only */
12659 /* CCE_CTRL - bits clear automatically */
12660 /* CCE_STATUS read-only, use CceCtrl to clear */
12661 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12662 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12663 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12664 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12665 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12666 /* CCE_ERR_STATUS read-only */
12667 write_csr(dd, CCE_ERR_MASK, 0);
12668 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12669 /* CCE_ERR_FORCE leave alone */
12670 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12671 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12672 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12673 /* CCE_PCIE_CTRL leave alone */
12674 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12675 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12676 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12677 CCE_MSIX_TABLE_UPPER_RESETCSR);
12679 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12680 /* CCE_MSIX_PBA read-only */
12681 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12682 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12684 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12685 write_csr(dd, CCE_INT_MAP, 0);
12686 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12687 /* CCE_INT_STATUS read-only */
12688 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12689 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12690 /* CCE_INT_FORCE leave alone */
12691 /* CCE_INT_BLOCKED read-only */
12693 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12694 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12697 /* set ASIC CSRs to chip reset defaults */
12698 static void reset_asic_csrs(struct hfi1_devdata *dd)
12703 * If the HFIs are shared between separate nodes or VMs,
12704 * then more will need to be done here. One idea is a module
12705 * parameter that returns early, letting the first power-on or
12706 * a known first load do the reset and blocking all others.
12709 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12712 if (dd->icode != ICODE_FPGA_EMULATION) {
12713 /* emulation does not have an SBus - leave these alone */
12715 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12717 * o The reset is not zero if aimed at the core. See the
12718 * SBus documentation for details.
12719 * o If the SBus firmware has been updated (e.g. by the BIOS),
12720 * will the reset revert that?
12722 /* ASIC_CFG_SBUS_REQUEST leave alone */
12723 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12725 /* ASIC_SBUS_RESULT read-only */
12726 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12727 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12728 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12729 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
12731 /* We might want to retain this state across FLR if we ever use it */
12732 write_csr(dd, ASIC_CFG_DRV_STR, 0);
12734 /* ASIC_CFG_THERM_POLL_EN leave alone */
12735 /* ASIC_STS_THERM read-only */
12736 /* ASIC_CFG_RESET leave alone */
12738 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12739 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12740 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12741 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12742 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12743 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12744 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12745 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12746 for (i = 0; i < 16; i++)
12747 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12749 /* ASIC_GPIO_IN read-only */
12750 write_csr(dd, ASIC_GPIO_OE, 0);
12751 write_csr(dd, ASIC_GPIO_INVERT, 0);
12752 write_csr(dd, ASIC_GPIO_OUT, 0);
12753 write_csr(dd, ASIC_GPIO_MASK, 0);
12754 /* ASIC_GPIO_STATUS read-only */
12755 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12756 /* ASIC_GPIO_FORCE leave alone */
12758 /* ASIC_QSFP1_IN read-only */
12759 write_csr(dd, ASIC_QSFP1_OE, 0);
12760 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12761 write_csr(dd, ASIC_QSFP1_OUT, 0);
12762 write_csr(dd, ASIC_QSFP1_MASK, 0);
12763 /* ASIC_QSFP1_STATUS read-only */
12764 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12765 /* ASIC_QSFP1_FORCE leave alone */
12767 /* ASIC_QSFP2_IN read-only */
12768 write_csr(dd, ASIC_QSFP2_OE, 0);
12769 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12770 write_csr(dd, ASIC_QSFP2_OUT, 0);
12771 write_csr(dd, ASIC_QSFP2_MASK, 0);
12772 /* ASIC_QSFP2_STATUS read-only */
12773 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12774 /* ASIC_QSFP2_FORCE leave alone */
12776 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12777 /* this also writes a NOP command, clearing paging mode */
12778 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12779 write_csr(dd, ASIC_EEP_DATA, 0);
12782 /* set MISC CSRs to chip reset defaults */
12783 static void reset_misc_csrs(struct hfi1_devdata *dd)
12787 for (i = 0; i < 32; i++) {
12788 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12789 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12790 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12792 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12793 only be written 128-byte chunks */
12794 /* init RSA engine to clear lingering errors */
12795 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12796 write_csr(dd, MISC_CFG_RSA_MU, 0);
12797 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12798 /* MISC_STS_8051_DIGEST read-only */
12799 /* MISC_STS_SBM_DIGEST read-only */
12800 /* MISC_STS_PCIE_DIGEST read-only */
12801 /* MISC_STS_FAB_DIGEST read-only */
12802 /* MISC_ERR_STATUS read-only */
12803 write_csr(dd, MISC_ERR_MASK, 0);
12804 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12805 /* MISC_ERR_FORCE leave alone */
12808 /* set TXE CSRs to chip reset defaults */
12809 static void reset_txe_csrs(struct hfi1_devdata *dd)
12816 write_csr(dd, SEND_CTRL, 0);
12817 __cm_reset(dd, 0); /* reset CM internal state */
12818 /* SEND_CONTEXTS read-only */
12819 /* SEND_DMA_ENGINES read-only */
12820 /* SEND_PIO_MEM_SIZE read-only */
12821 /* SEND_DMA_MEM_SIZE read-only */
12822 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12823 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12824 /* SEND_PIO_ERR_STATUS read-only */
12825 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12826 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12827 /* SEND_PIO_ERR_FORCE leave alone */
12828 /* SEND_DMA_ERR_STATUS read-only */
12829 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12830 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12831 /* SEND_DMA_ERR_FORCE leave alone */
12832 /* SEND_EGRESS_ERR_STATUS read-only */
12833 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12834 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12835 /* SEND_EGRESS_ERR_FORCE leave alone */
12836 write_csr(dd, SEND_BTH_QP, 0);
12837 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12838 write_csr(dd, SEND_SC2VLT0, 0);
12839 write_csr(dd, SEND_SC2VLT1, 0);
12840 write_csr(dd, SEND_SC2VLT2, 0);
12841 write_csr(dd, SEND_SC2VLT3, 0);
12842 write_csr(dd, SEND_LEN_CHECK0, 0);
12843 write_csr(dd, SEND_LEN_CHECK1, 0);
12844 /* SEND_ERR_STATUS read-only */
12845 write_csr(dd, SEND_ERR_MASK, 0);
12846 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12847 /* SEND_ERR_FORCE read-only */
12848 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12849 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12850 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12851 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12852 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12853 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12854 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12855 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12856 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12857 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12858 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12859 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12860 SEND_CM_GLOBAL_CREDIT_RESETCSR);
12861 /* SEND_CM_CREDIT_USED_STATUS read-only */
12862 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12863 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12864 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12865 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12866 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12867 for (i = 0; i < TXE_NUM_DATA_VL; i++)
12868 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12869 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12870 /* SEND_CM_CREDIT_USED_VL read-only */
12871 /* SEND_CM_CREDIT_USED_VL15 read-only */
12872 /* SEND_EGRESS_CTXT_STATUS read-only */
12873 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12874 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12875 /* SEND_EGRESS_ERR_INFO read-only */
12876 /* SEND_EGRESS_ERR_SOURCE read-only */
12879 * TXE Per-Context CSRs
12881 for (i = 0; i < dd->chip_send_contexts; i++) {
12882 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12883 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
12884 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12885 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
12886 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
12887 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
12888 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
12889 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
12890 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
12891 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
12892 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
12893 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
12897 * TXE Per-SDMA CSRs
12899 for (i = 0; i < dd->chip_sdma_engines; i++) {
12900 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
12901 /* SEND_DMA_STATUS read-only */
12902 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
12903 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
12904 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
12905 /* SEND_DMA_HEAD read-only */
12906 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
12907 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
12908 /* SEND_DMA_IDLE_CNT read-only */
12909 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
12910 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
12911 /* SEND_DMA_DESC_FETCHED_CNT read-only */
12912 /* SEND_DMA_ENG_ERR_STATUS read-only */
12913 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
12914 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
12915 /* SEND_DMA_ENG_ERR_FORCE leave alone */
12916 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
12917 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
12918 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
12919 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
12920 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
12921 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
12922 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
12928 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
12930 static void init_rbufs(struct hfi1_devdata *dd)
12936 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
12941 reg = read_csr(dd, RCV_STATUS);
12942 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
12943 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
12946 * Give up after 1ms - maximum wait time.
12948 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
12949 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
12950 * 148 KB / (66% * 250MB/s) = 920us
12952 if (count++ > 500) {
12954 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
12958 udelay(2); /* do not busy-wait the CSR */
12961 /* start the init - expect RcvCtrl to be 0 */
12962 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
12965 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
12966 * period after the write before RcvStatus.RxRbufInitDone is valid.
12967 * The delay in the first run through the loop below is sufficient and
12968 * required before the first read of RcvStatus.RxRbufInintDone.
12970 read_csr(dd, RCV_CTRL);
12972 /* wait for the init to finish */
12975 /* delay is required first time through - see above */
12976 udelay(2); /* do not busy-wait the CSR */
12977 reg = read_csr(dd, RCV_STATUS);
12978 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
12981 /* give up after 100us - slowest possible at 33MHz is 73us */
12982 if (count++ > 50) {
12984 "%s: RcvStatus.RxRbufInit not set, continuing\n",
12991 /* set RXE CSRs to chip reset defaults */
12992 static void reset_rxe_csrs(struct hfi1_devdata *dd)
12999 write_csr(dd, RCV_CTRL, 0);
13001 /* RCV_STATUS read-only */
13002 /* RCV_CONTEXTS read-only */
13003 /* RCV_ARRAY_CNT read-only */
13004 /* RCV_BUF_SIZE read-only */
13005 write_csr(dd, RCV_BTH_QP, 0);
13006 write_csr(dd, RCV_MULTICAST, 0);
13007 write_csr(dd, RCV_BYPASS, 0);
13008 write_csr(dd, RCV_VL15, 0);
13009 /* this is a clear-down */
13010 write_csr(dd, RCV_ERR_INFO,
13011 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13012 /* RCV_ERR_STATUS read-only */
13013 write_csr(dd, RCV_ERR_MASK, 0);
13014 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13015 /* RCV_ERR_FORCE leave alone */
13016 for (i = 0; i < 32; i++)
13017 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13018 for (i = 0; i < 4; i++)
13019 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13020 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13021 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13022 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13023 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13024 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13025 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13026 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13027 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13029 for (i = 0; i < 32; i++)
13030 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13033 * RXE Kernel and User Per-Context CSRs
13035 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13037 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13038 /* RCV_CTXT_STATUS read-only */
13039 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13040 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13041 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13042 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13043 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13044 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13045 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13046 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13047 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13048 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13051 /* RCV_HDR_TAIL read-only */
13052 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13053 /* RCV_EGR_INDEX_TAIL read-only */
13054 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13055 /* RCV_EGR_OFFSET_TAIL read-only */
13056 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13057 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13064 * Set sc2vl tables.
13066 * They power on to zeros, so to avoid send context errors
13067 * they need to be set:
13069 * SC 0-7 -> VL 0-7 (respectively)
13074 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13077 /* init per architecture spec, constrained by hardware capability */
13079 /* HFI maps sent packets */
13080 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13086 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13092 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13098 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13105 /* DC maps received packets */
13106 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13108 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13109 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13110 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13112 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13113 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13115 /* initialize the cached sc2vl values consistently with h/w */
13116 for (i = 0; i < 32; i++) {
13117 if (i < 8 || i == 15)
13118 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13120 *((u8 *)(dd->sc2vl) + i) = 0;
13125 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13126 * depend on the chip going through a power-on reset - a driver may be loaded
13127 * and unloaded many times.
13129 * Do not write any CSR values to the chip in this routine - there may be
13130 * a reset following the (possible) FLR in this routine.
13133 static void init_chip(struct hfi1_devdata *dd)
13138 * Put the HFI CSRs in a known state.
13139 * Combine this with a DC reset.
13141 * Stop the device from doing anything while we do a
13142 * reset. We know there are no other active users of
13143 * the device since we are now in charge. Turn off
13144 * off all outbound and inbound traffic and make sure
13145 * the device does not generate any interrupts.
13148 /* disable send contexts and SDMA engines */
13149 write_csr(dd, SEND_CTRL, 0);
13150 for (i = 0; i < dd->chip_send_contexts; i++)
13151 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13152 for (i = 0; i < dd->chip_sdma_engines; i++)
13153 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13154 /* disable port (turn off RXE inbound traffic) and contexts */
13155 write_csr(dd, RCV_CTRL, 0);
13156 for (i = 0; i < dd->chip_rcv_contexts; i++)
13157 write_csr(dd, RCV_CTXT_CTRL, 0);
13158 /* mask all interrupt sources */
13159 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13160 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13163 * DC Reset: do a full DC reset before the register clear.
13164 * A recommended length of time to hold is one CSR read,
13165 * so reread the CceDcCtrl. Then, hold the DC in reset
13166 * across the clear.
13168 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13169 (void) read_csr(dd, CCE_DC_CTRL);
13173 * A FLR will reset the SPC core and part of the PCIe.
13174 * The parts that need to be restored have already been
13177 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13179 /* do the FLR, the DC reset will remain */
13182 /* restore command and BARs */
13183 restore_pci_variables(dd);
13186 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13188 restore_pci_variables(dd);
13191 reset_asic_csrs(dd);
13193 dd_dev_info(dd, "Resetting CSRs with writes\n");
13194 reset_cce_csrs(dd);
13195 reset_txe_csrs(dd);
13196 reset_rxe_csrs(dd);
13197 reset_asic_csrs(dd);
13198 reset_misc_csrs(dd);
13200 /* clear the DC reset */
13201 write_csr(dd, CCE_DC_CTRL, 0);
13203 /* Set the LED off */
13207 * Clear the QSFP reset.
13208 * An FLR enforces a 0 on all out pins. The driver does not touch
13209 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13210 * anything plugged constantly in reset, if it pays attention
13212 * Prime examples of this are optical cables. Set all pins high.
13213 * I2CCLK and I2CDAT will change per direction, and INT_N and
13214 * MODPRS_N are input only and their value is ignored.
13216 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13217 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13220 static void init_early_variables(struct hfi1_devdata *dd)
13224 /* assign link credit variables */
13226 dd->link_credits = CM_GLOBAL_CREDITS;
13228 dd->link_credits--;
13229 dd->vcu = cu_to_vcu(hfi1_cu);
13230 /* enough room for 8 MAD packets plus header - 17K */
13231 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13232 if (dd->vl15_init > dd->link_credits)
13233 dd->vl15_init = dd->link_credits;
13235 write_uninitialized_csrs_and_memories(dd);
13237 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13238 for (i = 0; i < dd->num_pports; i++) {
13239 struct hfi1_pportdata *ppd = &dd->pport[i];
13241 set_partition_keys(ppd);
13243 init_sc2vl_tables(dd);
13246 static void init_kdeth_qp(struct hfi1_devdata *dd)
13248 /* user changed the KDETH_QP */
13249 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13250 /* out of range or illegal value */
13251 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13254 if (kdeth_qp == 0) /* not set, or failed range check */
13255 kdeth_qp = DEFAULT_KDETH_QP;
13257 write_csr(dd, SEND_BTH_QP,
13258 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13259 << SEND_BTH_QP_KDETH_QP_SHIFT);
13261 write_csr(dd, RCV_BTH_QP,
13262 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13263 << RCV_BTH_QP_KDETH_QP_SHIFT);
13268 * @dd - device data
13269 * @first_ctxt - first context
13270 * @last_ctxt - first context
13272 * This return sets the qpn mapping table that
13273 * is indexed by qpn[8:1].
13275 * The routine will round robin the 256 settings
13276 * from first_ctxt to last_ctxt.
13278 * The first/last looks ahead to having specialized
13279 * receive contexts for mgmt and bypass. Normal
13280 * verbs traffic will assumed to be on a range
13281 * of receive contexts.
13283 static void init_qpmap_table(struct hfi1_devdata *dd,
13288 u64 regno = RCV_QP_MAP_TABLE;
13290 u64 ctxt = first_ctxt;
13292 for (i = 0; i < 256;) {
13293 reg |= ctxt << (8 * (i % 8));
13296 if (ctxt > last_ctxt)
13299 write_csr(dd, regno, reg);
13305 write_csr(dd, regno, reg);
13307 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13308 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13312 * init_qos - init RX qos
13313 * @dd - device data
13316 * This routine initializes Rule 0 and the
13317 * RSM map table to implement qos.
13319 * If all of the limit tests succeed,
13320 * qos is applied based on the array
13321 * interpretation of krcvqs where
13324 * The number of vl bits (n) and the number of qpn
13325 * bits (m) are computed to feed both the RSM map table
13326 * and the single rule.
13329 static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13332 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13335 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13338 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13342 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13343 if (krcvqs[i] > max_by_vl)
13344 max_by_vl = krcvqs[i];
13345 if (max_by_vl > 32)
13347 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13348 /* determine bits vl */
13349 n = ilog2(num_vls);
13350 /* determine bits for qpn */
13351 m = ilog2(qpns_per_vl);
13354 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13356 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13359 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13360 /* init the local copy of the table */
13361 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13364 for (qpn = 0, tctxt = ctxt;
13365 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13366 unsigned idx, regoff, regidx;
13368 /* generate index <= 128 */
13369 idx = (qpn << n) ^ i;
13370 regoff = (idx % 8) * 8;
13372 reg = rsmmap[regidx];
13373 /* replace 0xff with context number */
13374 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13376 reg |= (u64)(tctxt++) << regoff;
13377 rsmmap[regidx] = reg;
13378 if (tctxt == ctxt + krcvqs[i])
13383 /* flush cached copies to chip */
13384 for (i = 0; i < NUM_MAP_REGS; i++)
13385 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13387 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13388 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13389 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13390 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13391 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13392 LRH_BTH_MATCH_OFFSET
13393 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13394 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13395 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13396 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13397 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13398 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13399 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13400 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13401 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13402 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13403 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13405 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13407 /* map everything else to first context */
13408 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13409 dd->qos_shift = n + 1;
13413 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13416 static void init_rxe(struct hfi1_devdata *dd)
13418 /* enable all receive errors */
13419 write_csr(dd, RCV_ERR_MASK, ~0ull);
13420 /* setup QPN map table - start where VL15 context leaves off */
13423 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13425 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13426 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13427 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13428 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13429 * Max_PayLoad_Size set to its minimum of 128.
13431 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13432 * (64 bytes). Max_Payload_Size is possibly modified upward in
13433 * tune_pcie_caps() which is called after this routine.
13437 static void init_other(struct hfi1_devdata *dd)
13439 /* enable all CCE errors */
13440 write_csr(dd, CCE_ERR_MASK, ~0ull);
13441 /* enable *some* Misc errors */
13442 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13443 /* enable all DC errors, except LCB */
13444 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13445 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13449 * Fill out the given AU table using the given CU. A CU is defined in terms
13450 * AUs. The table is a an encoding: given the index, how many AUs does that
13453 * NOTE: Assumes that the register layout is the same for the
13454 * local and remote tables.
13456 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13457 u32 csr0to3, u32 csr4to7)
13459 write_csr(dd, csr0to3,
13461 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13463 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13465 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13467 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13468 write_csr(dd, csr4to7,
13470 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13472 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13474 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13476 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13480 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13482 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13483 SEND_CM_LOCAL_AU_TABLE4_TO7);
13486 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13488 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13489 SEND_CM_REMOTE_AU_TABLE4_TO7);
13492 static void init_txe(struct hfi1_devdata *dd)
13496 /* enable all PIO, SDMA, general, and Egress errors */
13497 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13498 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13499 write_csr(dd, SEND_ERR_MASK, ~0ull);
13500 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13502 /* enable all per-context and per-SDMA engine errors */
13503 for (i = 0; i < dd->chip_send_contexts; i++)
13504 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13505 for (i = 0; i < dd->chip_sdma_engines; i++)
13506 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13508 /* set the local CU to AU mapping */
13509 assign_local_cm_au_table(dd, dd->vcu);
13512 * Set reasonable default for Credit Return Timer
13513 * Don't set on Simulator - causes it to choke.
13515 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13516 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13519 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13521 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13526 if (!rcd || !rcd->sc) {
13530 sctxt = rcd->sc->hw_context;
13531 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13532 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13533 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13534 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13535 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13536 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13537 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13539 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13542 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13543 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13544 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13547 /* Enable J_KEY check on receive context. */
13548 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13549 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13550 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13551 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13556 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13558 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13563 if (!rcd || !rcd->sc) {
13567 sctxt = rcd->sc->hw_context;
13568 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13570 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13571 * This check would not have been enabled for A0 h/w, see
13575 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13576 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13577 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13579 /* Turn off the J_KEY on the receive side */
13580 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13585 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13587 struct hfi1_ctxtdata *rcd;
13592 if (ctxt < dd->num_rcv_contexts)
13593 rcd = dd->rcd[ctxt];
13598 if (!rcd || !rcd->sc) {
13602 sctxt = rcd->sc->hw_context;
13603 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13604 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13605 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13606 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13607 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13608 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13613 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13615 struct hfi1_ctxtdata *rcd;
13620 if (ctxt < dd->num_rcv_contexts)
13621 rcd = dd->rcd[ctxt];
13626 if (!rcd || !rcd->sc) {
13630 sctxt = rcd->sc->hw_context;
13631 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13632 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13633 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13634 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13640 * Start doing the clean up the the chip. Our clean up happens in multiple
13641 * stages and this is just the first.
13643 void hfi1_start_cleanup(struct hfi1_devdata *dd)
13647 clean_up_interrupts(dd);
13650 #define HFI_BASE_GUID(dev) \
13651 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13654 * Certain chip functions need to be initialized only once per asic
13655 * instead of per-device. This function finds the peer device and
13656 * checks whether that chip initialization needs to be done by this
13659 static void asic_should_init(struct hfi1_devdata *dd)
13661 unsigned long flags;
13662 struct hfi1_devdata *tmp, *peer = NULL;
13664 spin_lock_irqsave(&hfi1_devs_lock, flags);
13665 /* Find our peer device */
13666 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13667 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13668 dd->unit != tmp->unit) {
13675 * "Claim" the ASIC for initialization if it hasn't been
13678 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13679 dd->flags |= HFI1_DO_INIT_ASIC;
13680 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13684 * Set dd->boardname. Use a generic name if a name is not returned from
13685 * EFI variable space.
13687 * Return 0 on success, -ENOMEM if space could not be allocated.
13689 static int obtain_boardname(struct hfi1_devdata *dd)
13691 /* generic board description */
13692 const char generic[] =
13693 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13694 unsigned long size;
13697 ret = read_hfi1_efi_var(dd, "description", &size,
13698 (void **)&dd->boardname);
13700 dd_dev_err(dd, "Board description not found\n");
13701 /* use generic description */
13702 dd->boardname = kstrdup(generic, GFP_KERNEL);
13703 if (!dd->boardname)
13710 * Allocate and initialize the device structure for the hfi.
13711 * @dev: the pci_dev for hfi1_ib device
13712 * @ent: pci_device_id struct for this dev
13714 * Also allocates, initializes, and returns the devdata struct for this
13717 * This is global, and is called directly at init to set up the
13718 * chip-specific function pointers for later use.
13720 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13721 const struct pci_device_id *ent)
13723 struct hfi1_devdata *dd;
13724 struct hfi1_pportdata *ppd;
13727 static const char * const inames[] = { /* implementation names */
13729 "RTL VCS simulation",
13730 "RTL FPGA emulation",
13731 "Functional simulator"
13734 dd = hfi1_alloc_devdata(pdev,
13735 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13739 for (i = 0; i < dd->num_pports; i++, ppd++) {
13741 /* init common fields */
13742 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13743 /* DC supports 4 link widths */
13744 ppd->link_width_supported =
13745 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13746 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13747 ppd->link_width_downgrade_supported =
13748 ppd->link_width_supported;
13749 /* start out enabling only 4X */
13750 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13751 ppd->link_width_downgrade_enabled =
13752 ppd->link_width_downgrade_supported;
13753 /* link width active is 0 when link is down */
13754 /* link width downgrade active is 0 when link is down */
13756 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13757 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13758 hfi1_early_err(&pdev->dev,
13759 "Invalid num_vls %u, using %u VLs\n",
13760 num_vls, HFI1_MAX_VLS_SUPPORTED);
13761 num_vls = HFI1_MAX_VLS_SUPPORTED;
13763 ppd->vls_supported = num_vls;
13764 ppd->vls_operational = ppd->vls_supported;
13765 /* Set the default MTU. */
13766 for (vl = 0; vl < num_vls; vl++)
13767 dd->vld[vl].mtu = hfi1_max_mtu;
13768 dd->vld[15].mtu = MAX_MAD_PACKET;
13770 * Set the initial values to reasonable default, will be set
13771 * for real when link is up.
13773 ppd->lstate = IB_PORT_DOWN;
13774 ppd->overrun_threshold = 0x4;
13775 ppd->phy_error_threshold = 0xf;
13776 ppd->port_crc_mode_enabled = link_crc_mask;
13777 /* initialize supported LTP CRC mode */
13778 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13779 /* initialize enabled LTP CRC mode */
13780 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13781 /* start in offline */
13782 ppd->host_link_state = HLS_DN_OFFLINE;
13783 init_vl_arb_caches(ppd);
13786 dd->link_default = HLS_DN_POLL;
13789 * Do remaining PCIe setup and save PCIe values in dd.
13790 * Any error printing is already done by the init code.
13791 * On return, we have the chip mapped.
13793 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13797 /* verify that reads actually work, save revision for reset check */
13798 dd->revision = read_csr(dd, CCE_REVISION);
13799 if (dd->revision == ~(u64)0) {
13800 dd_dev_err(dd, "cannot read chip CSRs\n");
13804 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13805 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13806 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13807 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13809 /* obtain the hardware ID - NOT related to unit, which is a
13810 software enumeration */
13811 reg = read_csr(dd, CCE_REVISION2);
13812 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13813 & CCE_REVISION2_HFI_ID_MASK;
13814 /* the variable size will remove unwanted bits */
13815 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13816 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13817 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13818 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13821 /* speeds the hardware can support */
13822 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13823 /* speeds allowed to run at */
13824 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13825 /* give a reasonable active value, will be set on link up */
13826 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13828 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13829 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13830 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13831 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13832 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13833 /* fix up link widths for emulation _p */
13835 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13836 ppd->link_width_supported =
13837 ppd->link_width_enabled =
13838 ppd->link_width_downgrade_supported =
13839 ppd->link_width_downgrade_enabled =
13842 /* insure num_vls isn't larger than number of sdma engines */
13843 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13844 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
13845 num_vls, dd->chip_sdma_engines);
13846 num_vls = dd->chip_sdma_engines;
13847 ppd->vls_supported = dd->chip_sdma_engines;
13851 * Convert the ns parameter to the 64 * cclocks used in the CSR.
13852 * Limit the max if larger than the field holds. If timeout is
13853 * non-zero, then the calculated field will be at least 1.
13855 * Must be after icode is set up - the cclock rate depends
13856 * on knowing the hardware being used.
13858 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13859 if (dd->rcv_intr_timeout_csr >
13860 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13861 dd->rcv_intr_timeout_csr =
13862 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13863 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13864 dd->rcv_intr_timeout_csr = 1;
13866 /* needs to be done before we look for the peer device */
13869 /* should this device init the ASIC block? */
13870 asic_should_init(dd);
13872 /* obtain chip sizes, reset chip CSRs */
13875 /* read in the PCIe link speed information */
13876 ret = pcie_speeds(dd);
13880 /* read in firmware */
13881 ret = hfi1_firmware_init(dd);
13886 * In general, the PCIe Gen3 transition must occur after the
13887 * chip has been idled (so it won't initiate any PCIe transactions
13888 * e.g. an interrupt) and before the driver changes any registers
13889 * (the transition will reset the registers).
13891 * In particular, place this call after:
13892 * - init_chip() - the chip will not initiate any PCIe transactions
13893 * - pcie_speeds() - reads the current link speed
13894 * - hfi1_firmware_init() - the needed firmware is ready to be
13897 ret = do_pcie_gen3_transition(dd);
13901 /* start setting dd values and adjusting CSRs */
13902 init_early_variables(dd);
13904 parse_platform_config(dd);
13906 ret = obtain_boardname(dd);
13910 snprintf(dd->boardversion, BOARD_VERS_MAX,
13911 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
13912 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
13915 (dd->revision >> CCE_REVISION_SW_SHIFT)
13916 & CCE_REVISION_SW_MASK);
13918 ret = set_up_context_variables(dd);
13922 /* set initial RXE CSRs */
13924 /* set initial TXE CSRs */
13926 /* set initial non-RXE, non-TXE CSRs */
13928 /* set up KDETH QP prefix in both RX and TX CSRs */
13931 /* send contexts must be set up before receive contexts */
13932 ret = init_send_contexts(dd);
13936 ret = hfi1_create_ctxts(dd);
13940 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
13942 * rcd[0] is guaranteed to be valid by this point. Also, all
13943 * context are using the same value, as per the module parameter.
13945 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
13947 ret = init_pervl_scs(dd);
13952 for (i = 0; i < dd->num_pports; ++i) {
13953 ret = sdma_init(dd, i);
13958 /* use contexts created by hfi1_create_ctxts */
13959 ret = set_up_interrupts(dd);
13963 /* set up LCB access - must be after set_up_interrupts() */
13964 init_lcb_access(dd);
13966 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
13967 dd->base_guid & 0xFFFFFF);
13969 dd->oui1 = dd->base_guid >> 56 & 0xFF;
13970 dd->oui2 = dd->base_guid >> 48 & 0xFF;
13971 dd->oui3 = dd->base_guid >> 40 & 0xFF;
13973 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
13975 goto bail_clear_intr;
13976 check_fabric_firmware_versions(dd);
13980 ret = init_cntrs(dd);
13982 goto bail_clear_intr;
13984 ret = init_rcverr(dd);
13986 goto bail_free_cntrs;
13988 ret = eprom_init(dd);
13990 goto bail_free_rcverr;
13999 clean_up_interrupts(dd);
14001 hfi1_pcie_ddcleanup(dd);
14003 hfi1_free_devdata(dd);
14009 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14013 u32 current_egress_rate = ppd->current_egress_rate;
14014 /* rates here are in units of 10^6 bits/sec */
14016 if (desired_egress_rate == -1)
14017 return 0; /* shouldn't happen */
14019 if (desired_egress_rate >= current_egress_rate)
14020 return 0; /* we can't help go faster, only slower */
14022 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14023 egress_cycles(dw_len * 4, current_egress_rate);
14025 return (u16)delta_cycles;
14030 * create_pbc - build a pbc for transmission
14031 * @flags: special case flags or-ed in built pbc
14032 * @srate: static rate
14034 * @dwlen: dword length (header words + data words + pbc words)
14036 * Create a PBC with the given flags, rate, VL, and length.
14038 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14039 * for verbs, which does not use this PSM feature. The lone other caller
14040 * is for the diagnostic interface which calls this if the user does not
14041 * supply their own PBC.
14043 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14046 u64 pbc, delay = 0;
14048 if (unlikely(srate_mbs))
14049 delay = delay_cycles(ppd, srate_mbs, dw_len);
14052 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14053 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14054 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14055 | (dw_len & PBC_LENGTH_DWS_MASK)
14056 << PBC_LENGTH_DWS_SHIFT;
14061 #define SBUS_THERMAL 0x4f
14062 #define SBUS_THERM_MONITOR_MODE 0x1
14064 #define THERM_FAILURE(dev, ret, reason) \
14066 "Thermal sensor initialization failed: %s (%d)\n", \
14070 * Initialize the Avago Thermal sensor.
14072 * After initialization, enable polling of thermal sensor through
14073 * SBus interface. In order for this to work, the SBus Master
14074 * firmware has to be loaded due to the fact that the HW polling
14075 * logic uses SBus interrupts, which are not supported with
14076 * default firmware. Otherwise, no data will be returned through
14077 * the ASIC_STS_THERM CSR.
14079 static int thermal_init(struct hfi1_devdata *dd)
14083 if (dd->icode != ICODE_RTL_SILICON ||
14084 !(dd->flags & HFI1_DO_INIT_ASIC))
14087 acquire_hw_mutex(dd);
14088 dd_dev_info(dd, "Initializing thermal sensor\n");
14089 /* Disable polling of thermal readings */
14090 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14092 /* Thermal Sensor Initialization */
14093 /* Step 1: Reset the Thermal SBus Receiver */
14094 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14095 RESET_SBUS_RECEIVER, 0);
14097 THERM_FAILURE(dd, ret, "Bus Reset");
14100 /* Step 2: Set Reset bit in Thermal block */
14101 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14102 WRITE_SBUS_RECEIVER, 0x1);
14104 THERM_FAILURE(dd, ret, "Therm Block Reset");
14107 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14108 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14109 WRITE_SBUS_RECEIVER, 0x32);
14111 THERM_FAILURE(dd, ret, "Write Clock Div");
14114 /* Step 4: Select temperature mode */
14115 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14116 WRITE_SBUS_RECEIVER,
14117 SBUS_THERM_MONITOR_MODE);
14119 THERM_FAILURE(dd, ret, "Write Mode Sel");
14122 /* Step 5: De-assert block reset and start conversion */
14123 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14124 WRITE_SBUS_RECEIVER, 0x2);
14126 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14129 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14132 /* Enable polling of thermal readings */
14133 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14135 release_hw_mutex(dd);
14139 static void handle_temp_err(struct hfi1_devdata *dd)
14141 struct hfi1_pportdata *ppd = &dd->pport[0];
14143 * Thermal Critical Interrupt
14144 * Put the device into forced freeze mode, take link down to
14145 * offline, and put DC into reset.
14148 "Critical temperature reached! Forcing device into freeze mode!\n");
14149 dd->flags |= HFI1_FORCED_FREEZE;
14150 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14152 * Shut DC down as much and as quickly as possible.
14154 * Step 1: Take the link down to OFFLINE. This will cause the
14155 * 8051 to put the Serdes in reset. However, we don't want to
14156 * go through the entire link state machine since we want to
14157 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14158 * but rather an attempt to save the chip.
14159 * Code below is almost the same as quiet_serdes() but avoids
14160 * all the extra work and the sleeps.
14162 ppd->driver_link_ready = 0;
14163 ppd->link_enabled = 0;
14164 set_physical_link_state(dd, PLS_OFFLINE |
14165 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14167 * Step 2: Shutdown LCB and 8051
14168 * After shutdown, do not restore DC_CFG_RESET value.