2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_force_master;
51 int csio_dbg_level = 0xFEFF;
52 unsigned int csio_port_mask = 0xf;
54 /* Default FW event queue entries. */
55 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
57 /* Default MSI param level */
60 /* FCoE function instances */
63 /* FCoE Adapter types & its description */
64 static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
91 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
114 static void csio_mgmtm_cleanup(struct csio_mgmtm *);
115 static void csio_hw_mbm_cleanup(struct csio_hw *);
117 /* State machine forward declarations */
118 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
119 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
120 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
121 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
122 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
123 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
124 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
125 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
126 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
128 static void csio_hw_initialize(struct csio_hw *hw);
129 static void csio_evtq_stop(struct csio_hw *hw);
130 static void csio_evtq_start(struct csio_hw *hw);
132 int csio_is_hw_ready(struct csio_hw *hw)
134 return csio_match_state(hw, csio_hws_ready);
137 int csio_is_hw_removing(struct csio_hw *hw)
139 return csio_match_state(hw, csio_hws_removing);
144 * csio_hw_wait_op_done_val - wait until an operation is completed
146 * @reg: the register to check for completion
147 * @mask: a single-bit field within @reg that indicates completion
148 * @polarity: the value of the field when the operation is completed
149 * @attempts: number of check iterations
150 * @delay: delay in usecs between iterations
151 * @valp: where to store the value of the register at completion time
153 * Wait until an operation is completed by checking a bit in a register
154 * up to @attempts times. If @valp is not NULL the value of the register
155 * at the time it indicated completion is stored there. Returns 0 if the
156 * operation completes and -EAGAIN otherwise.
159 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
160 int polarity, int attempts, int delay, uint32_t *valp)
164 val = csio_rd_reg32(hw, reg);
166 if (!!(val & mask) == polarity) {
180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
182 * @addr: the indirect TP register address
183 * @mask: specifies the field within the register to modify
184 * @val: new value for the field
186 * Sets a field of an indirect TP register to the given value.
189 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
190 unsigned int mask, unsigned int val)
192 csio_wr_reg32(hw, addr, TP_PIO_ADDR);
193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
194 csio_wr_reg32(hw, val, TP_PIO_DATA);
198 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
201 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
203 csio_wr_reg32(hw, val | value, reg);
205 csio_rd_reg32(hw, reg);
210 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
219 #define EEPROM_MAX_RD_POLL 40
220 #define EEPROM_MAX_WR_POLL 6
221 #define EEPROM_STAT_ADDR 0x7bfc
222 #define VPD_BASE 0x400
223 #define VPD_BASE_OLD 0
225 #define VPD_INFO_FLD_HDR_SIZE 3
228 * csio_hw_seeprom_read - read a serial EEPROM location
230 * @addr: EEPROM virtual address
231 * @data: where to store the read data
233 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
234 * VPD capability. Note that this function must be called with a virtual
238 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
241 int attempts = EEPROM_MAX_RD_POLL;
242 uint32_t base = hw->params.pci.vpd_cap_addr;
244 if (addr >= EEPROMVSIZE || (addr & 3))
247 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
251 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
252 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
254 if (!(val & PCI_VPD_ADDR_F)) {
255 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
259 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
260 *data = le32_to_cpu(*data);
266 * Partial EEPROM Vital Product Data structure. Includes only the ID and
278 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
280 * @v: Pointer to buffered vpd data structure
281 * @kw: The keyword to search for
283 * Returns the value of the information field keyword or
287 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
290 int32_t offset , len;
291 const uint8_t *buf = &v->id_tag;
292 const uint8_t *vpdr_len = &v->vpdr_tag;
293 offset = sizeof(struct t4_vpd_hdr);
294 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
296 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
299 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
300 if (memcmp(buf + i , kw, 2) == 0) {
301 i += VPD_INFO_FLD_HDR_SIZE;
305 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
312 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
314 *pos = pci_find_capability(pdev, cap);
322 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
324 * @p: where to store the parameters
326 * Reads card parameters stored in VPD EEPROM.
329 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
331 int i, ret, ec, sn, addr;
333 const struct t4_vpd_hdr *v;
334 /* To get around compilation warning from strstrip */
337 if (csio_is_valid_vpd(hw))
340 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
341 &hw->params.pci.vpd_cap_addr);
345 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
350 * Card information normally starts at VPD_BASE but early cards had
353 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
354 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
356 for (i = 0; i < VPD_LEN; i += 4) {
357 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
364 /* Reset the VPD flag! */
365 hw->flags &= (~CSIO_HWF_VPD_VALID);
367 v = (const struct t4_vpd_hdr *)vpd;
369 #define FIND_VPD_KW(var, name) do { \
370 var = csio_hw_get_vpd_keyword_val(v, name); \
372 csio_err(hw, "missing VPD keyword " name "\n"); \
378 FIND_VPD_KW(i, "RV");
379 for (csum = 0; i >= 0; i--)
383 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
387 FIND_VPD_KW(ec, "EC");
388 FIND_VPD_KW(sn, "SN");
391 memcpy(p->id, v->id_data, ID_LEN);
393 memcpy(p->ec, vpd + ec, EC_LEN);
395 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
396 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
399 csio_valid_vpd_copied(hw);
406 * csio_hw_sf1_read - read data from the serial flash
408 * @byte_cnt: number of bytes to read
409 * @cont: whether another operation will be chained
410 * @lock: whether to lock SF for PL access only
411 * @valp: where to store the read data
413 * Reads up to 4 bytes of data from the serial flash. The location of
414 * the read needs to be specified prior to calling this by issuing the
415 * appropriate commands to the serial flash.
418 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
419 int32_t lock, uint32_t *valp)
423 if (!byte_cnt || byte_cnt > 4)
425 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
428 cont = cont ? SF_CONT : 0;
429 lock = lock ? SF_LOCK : 0;
431 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
432 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
435 *valp = csio_rd_reg32(hw, SF_DATA);
440 * csio_hw_sf1_write - write data to the serial flash
442 * @byte_cnt: number of bytes to write
443 * @cont: whether another operation will be chained
444 * @lock: whether to lock SF for PL access only
445 * @val: value to write
447 * Writes up to 4 bytes of data to the serial flash. The location of
448 * the write needs to be specified prior to calling this by issuing the
449 * appropriate commands to the serial flash.
452 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
453 int32_t lock, uint32_t val)
455 if (!byte_cnt || byte_cnt > 4)
457 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
460 cont = cont ? SF_CONT : 0;
461 lock = lock ? SF_LOCK : 0;
463 csio_wr_reg32(hw, val, SF_DATA);
464 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
466 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
471 * csio_hw_flash_wait_op - wait for a flash operation to complete
473 * @attempts: max number of polls of the status register
474 * @delay: delay between polls in ms
476 * Wait for a flash operation to complete by polling the status register.
479 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
485 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
489 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
503 * csio_hw_read_flash - read words from serial flash
505 * @addr: the start address for the read
506 * @nwords: how many 32-bit words to read
507 * @data: where to store the read data
508 * @byte_oriented: whether to store data as bytes or as words
510 * Read the specified number of 32-bit words from the serial flash.
511 * If @byte_oriented is set the read data is stored as a byte array
512 * (i.e., big-endian), otherwise as 32-bit words in the platform's
516 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
517 uint32_t *data, int32_t byte_oriented)
521 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
524 addr = swab32(addr) | SF_RD_DATA_FAST;
526 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
530 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
534 for ( ; nwords; nwords--, data++) {
535 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
537 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
541 *data = htonl(*data);
547 * csio_hw_write_flash - write up to a page of data to the serial flash
549 * @addr: the start address to write
550 * @n: length of data to write in bytes
551 * @data: the data to write
553 * Writes up to a page of data (256 bytes) to the serial flash starting
554 * at the given address. All the data must be written to the same page.
557 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
558 uint32_t n, const uint8_t *data)
562 uint32_t i, c, left, val, offset = addr & 0xff;
564 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
567 val = swab32(addr) | SF_PROG_PAGE;
569 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
573 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
577 for (left = n; left; left -= c) {
579 for (val = 0, i = 0; i < c; ++i)
580 val = (val << 8) + *data++;
582 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
586 ret = csio_hw_flash_wait_op(hw, 8, 1);
590 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
592 /* Read the page to verify the write succeeded */
593 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
597 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
599 "failed to correctly write the flash page at %#x\n",
607 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
612 * csio_hw_flash_erase_sectors - erase a range of flash sectors
614 * @start: the first sector to erase
615 * @end: the last sector to erase
617 * Erases the sectors in the given inclusive range.
620 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
624 while (start <= end) {
626 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
630 ret = csio_hw_sf1_write(hw, 4, 0, 1,
631 SF_ERASE_SECTOR | (start << 8));
635 ret = csio_hw_flash_wait_op(hw, 14, 500);
643 csio_err(hw, "erase of flash sector %d failed, error %d\n",
645 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
650 csio_hw_print_fw_version(struct csio_hw *hw, char *str)
652 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
653 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
654 FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
655 FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
656 FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
660 * csio_hw_get_fw_version - read the firmware version
662 * @vers: where to place the version
664 * Reads the FW version from flash.
667 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
669 return csio_hw_read_flash(hw, FW_IMG_START +
670 offsetof(struct fw_hdr, fw_ver), 1,
675 * csio_hw_get_tp_version - read the TP microcode version
677 * @vers: where to place the version
679 * Reads the TP microcode version from flash.
682 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
684 return csio_hw_read_flash(hw, FLASH_FW_START +
685 offsetof(struct fw_hdr, tp_microcode_ver), 1,
690 * csio_hw_check_fw_version - check if the FW is compatible with
694 * Checks if an adapter's FW is compatible with the driver. Returns 0
695 * if there's exact match, a negative error if the version could not be
696 * read or there's a major/minor version mismatch/minor.
699 csio_hw_check_fw_version(struct csio_hw *hw)
701 int ret, major, minor, micro;
703 ret = csio_hw_get_fw_version(hw, &hw->fwrev);
705 ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
709 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
714 csio_err(hw, "card FW has major version %u, driver wants %u\n",
715 major, FW_VERSION_MAJOR(hw));
719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
720 return 0; /* perfect match */
722 /* Minor/micro version mismatch */
727 * csio_hw_fw_dload - download firmware.
729 * @fw_data: firmware image to write.
732 * Write the supplied firmware image to the card's serial flash.
735 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
741 uint8_t first_page[SF_PAGE_SIZE];
742 const __be32 *p = (const __be32 *)fw_data;
743 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
744 uint32_t sf_sec_size;
746 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
747 csio_err(hw, "Serial Flash data invalid\n");
752 csio_err(hw, "FW image has no data\n");
757 csio_err(hw, "FW image size not multiple of 512 bytes\n");
761 if (ntohs(hdr->len512) * 512 != size) {
762 csio_err(hw, "FW image size differs from size in FW header\n");
766 if (size > FW_MAX_SIZE) {
767 csio_err(hw, "FW image too large, max is %u bytes\n",
772 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
775 if (csum != 0xffffffff) {
776 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
780 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
781 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
783 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
784 FW_START_SEC, FW_START_SEC + i - 1);
786 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
787 FW_START_SEC + i - 1);
789 csio_err(hw, "Flash Erase failed\n");
794 * We write the correct version at the end so the driver can see a bad
795 * version if the FW write fails. Start by writing a copy of the
796 * first page with a bad version.
798 memcpy(first_page, fw_data, SF_PAGE_SIZE);
799 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
800 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
804 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
805 FW_IMG_START, FW_IMG_START + size);
808 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
809 addr += SF_PAGE_SIZE;
810 fw_data += SF_PAGE_SIZE;
811 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
816 ret = csio_hw_write_flash(hw,
818 offsetof(struct fw_hdr, fw_ver),
820 (const uint8_t *)&hdr->fw_ver);
824 csio_err(hw, "firmware download failed, error %d\n", ret);
829 csio_hw_get_flash_params(struct csio_hw *hw)
834 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
836 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
837 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
841 if ((info & 0xff) != 0x20) /* not a Numonix flash */
843 info >>= 16; /* log2 of size */
844 if (info >= 0x14 && info < 0x18)
845 hw->params.sf_nsec = 1 << (info - 16);
846 else if (info == 0x18)
847 hw->params.sf_nsec = 64;
850 hw->params.sf_size = 1 << info;
856 csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
862 pci_read_config_word(hw->pdev,
863 pcie_cap + PCI_EXP_DEVCTL2, &val);
866 pci_write_config_word(hw->pdev,
867 pcie_cap + PCI_EXP_DEVCTL2, val);
871 /*****************************************************************************/
872 /* HW State machine assists */
873 /*****************************************************************************/
876 csio_hw_dev_ready(struct csio_hw *hw)
881 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
885 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
886 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
887 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
891 hw->pfn = SOURCEPF_GET(reg);
897 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
899 * @state: Device state
901 * FW_HELLO_CMD has to be polled for completion.
904 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
908 enum csio_dev_master master;
909 enum fw_retval retval;
912 int retries = FW_CMD_HELLO_RETRIES;
914 memset(state_str, 0, sizeof(state_str));
916 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
919 CSIO_INC_STATS(hw, n_err_nomem);
923 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
926 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
927 hw->pfn, master, NULL);
929 rv = csio_mb_issue(hw, mbp);
931 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
935 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
936 if (retval != FW_SUCCESS) {
937 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
942 /* Firmware has designated us to be master */
943 if (hw->pfn == mpfn) {
944 hw->flags |= CSIO_HWF_MASTER;
945 } else if (*state == CSIO_DEV_STATE_UNINIT) {
947 * If we're not the Master PF then we need to wait around for
948 * the Master PF Driver to finish setting up the adapter.
950 * Note that we also do this wait if we're a non-Master-capable
951 * PF and there is no current Master PF; a Master PF may show up
952 * momentarily and we wouldn't want to fail pointlessly. (This
953 * can happen when an OS loads lots of different drivers rapidly
954 * at the same time). In this case, the Master PF returned by
955 * the firmware will be PCIE_FW_MASTER_MASK so the test below
959 int waiting = FW_CMD_HELLO_TIMEOUT;
962 * Wait for the firmware to either indicate an error or
963 * initialized state. If we see either of these we bail out
964 * and report the issue to the caller. If we exhaust the
965 * "hello timeout" and we haven't exhausted our retries, try
966 * again. Otherwise bail with a timeout error.
971 spin_unlock_irq(&hw->lock);
973 spin_lock_irq(&hw->lock);
977 * If neither Error nor Initialialized are indicated
978 * by the firmware keep waiting till we exaust our
979 * timeout ... and then retry if we haven't exhausted
982 pcie_fw = csio_rd_reg32(hw, PCIE_FW);
983 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
995 * We either have an Error or Initialized condition
996 * report errors preferentially.
999 if (pcie_fw & PCIE_FW_ERR) {
1000 *state = CSIO_DEV_STATE_ERR;
1002 } else if (pcie_fw & PCIE_FW_INIT)
1003 *state = CSIO_DEV_STATE_INIT;
1007 * If we arrived before a Master PF was selected and
1008 * there's not a valid Master PF, grab its identity
1011 if (mpfn == PCIE_FW_MASTER_MASK &&
1012 (pcie_fw & PCIE_FW_MASTER_VLD))
1013 mpfn = PCIE_FW_MASTER_GET(pcie_fw);
1016 hw->flags &= ~CSIO_HWF_MASTER;
1020 case CSIO_DEV_STATE_UNINIT:
1021 strcpy(state_str, "Initializing");
1023 case CSIO_DEV_STATE_INIT:
1024 strcpy(state_str, "Initialized");
1026 case CSIO_DEV_STATE_ERR:
1027 strcpy(state_str, "Error");
1030 strcpy(state_str, "Unknown");
1034 if (hw->pfn == mpfn)
1035 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1036 hw->pfn, state_str);
1039 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1040 hw->pfn, mpfn, state_str);
1043 mempool_free(mbp, hw->mb_mempool);
1049 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1054 csio_do_bye(struct csio_hw *hw)
1056 struct csio_mb *mbp;
1057 enum fw_retval retval;
1059 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1061 CSIO_INC_STATS(hw, n_err_nomem);
1065 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1067 if (csio_mb_issue(hw, mbp)) {
1068 csio_err(hw, "Issue of BYE command failed\n");
1069 mempool_free(mbp, hw->mb_mempool);
1073 retval = csio_mb_fw_retval(mbp);
1074 if (retval != FW_SUCCESS) {
1075 mempool_free(mbp, hw->mb_mempool);
1079 mempool_free(mbp, hw->mb_mempool);
1085 * csio_do_reset- Perform the device reset.
1089 * If fw_rst is set, issues FW reset mbox cmd otherwise
1091 * Performs reset of the function.
1094 csio_do_reset(struct csio_hw *hw, bool fw_rst)
1096 struct csio_mb *mbp;
1097 enum fw_retval retval;
1101 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1106 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1108 CSIO_INC_STATS(hw, n_err_nomem);
1112 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1113 PIORSTMODE | PIORST, 0, NULL);
1115 if (csio_mb_issue(hw, mbp)) {
1116 csio_err(hw, "Issue of RESET command failed.n");
1117 mempool_free(mbp, hw->mb_mempool);
1121 retval = csio_mb_fw_retval(mbp);
1122 if (retval != FW_SUCCESS) {
1123 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1124 mempool_free(mbp, hw->mb_mempool);
1128 mempool_free(mbp, hw->mb_mempool);
1134 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1136 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1139 caps = ntohs(rsp->fcoecaps);
1141 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1142 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1146 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1147 csio_err(hw, "No FCoE Control Offload capability\n");
1155 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1156 * @hw: the HW module
1157 * @mbox: mailbox to use for the FW RESET command (if desired)
1158 * @force: force uP into RESET even if FW RESET command fails
1160 * Issues a RESET command to firmware (if desired) with a HALT indication
1161 * and then puts the microprocessor into RESET state. The RESET command
1162 * will only be issued if a legitimate mailbox is provided (mbox <=
1163 * PCIE_FW_MASTER_MASK).
1165 * This is generally used in order for the host to safely manipulate the
1166 * adapter without fear of conflicting with whatever the firmware might
1167 * be doing. The only way out of this state is to RESTART the firmware
1171 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1173 enum fw_retval retval = 0;
1176 * If a legitimate mailbox is provided, issue a RESET command
1177 * with a HALT indication.
1179 if (mbox <= PCIE_FW_MASTER_MASK) {
1180 struct csio_mb *mbp;
1182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1184 CSIO_INC_STATS(hw, n_err_nomem);
1188 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1189 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
1192 if (csio_mb_issue(hw, mbp)) {
1193 csio_err(hw, "Issue of RESET command failed!\n");
1194 mempool_free(mbp, hw->mb_mempool);
1198 retval = csio_mb_fw_retval(mbp);
1199 mempool_free(mbp, hw->mb_mempool);
1203 * Normally we won't complete the operation if the firmware RESET
1204 * command fails but if our caller insists we'll go ahead and put the
1205 * uP into RESET. This can be useful if the firmware is hung or even
1206 * missing ... We'll have to take the risk of putting the uP into
1207 * RESET without the cooperation of firmware in that case.
1209 * We also force the firmware's HALT flag to be on in case we bypassed
1210 * the firmware RESET command above or we're dealing with old firmware
1211 * which doesn't have the HALT capability. This will serve as a flag
1212 * for the incoming firmware to know that it's coming out of a HALT
1213 * rather than a RESET ... if it's new enough to understand that ...
1215 if (retval == 0 || force) {
1216 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
1217 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
1221 * And we always return the result of the firmware RESET command
1222 * even when we force the uP into RESET ...
1224 return retval ? -EINVAL : 0;
1228 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1229 * @hw: the HW module
1230 * @reset: if we want to do a RESET to restart things
1232 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1233 * return the previous PF Master remains as the new PF Master and there
1234 * is no need to issue a new HELLO command, etc.
1236 * We do this in two ways:
1238 * 1. If we're dealing with newer firmware we'll simply want to take
1239 * the chip's microprocessor out of RESET. This will cause the
1240 * firmware to start up from its start vector. And then we'll loop
1241 * until the firmware indicates it's started again (PCIE_FW.HALT
1242 * reset to 0) or we timeout.
1244 * 2. If we're dealing with older firmware then we'll need to RESET
1245 * the chip since older firmware won't recognize the PCIE_FW.HALT
1246 * flag and automatically RESET itself on startup.
1249 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1253 * Since we're directing the RESET instead of the firmware
1254 * doing it automatically, we need to clear the PCIE_FW.HALT
1257 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
1260 * If we've been given a valid mailbox, first try to get the
1261 * firmware to do the RESET. If that works, great and we can
1262 * return success. Otherwise, if we haven't been given a
1263 * valid mailbox or the RESET command failed, fall back to
1264 * hitting the chip with a hammer.
1266 if (mbox <= PCIE_FW_MASTER_MASK) {
1267 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
1269 if (csio_do_reset(hw, true) == 0)
1273 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1278 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
1279 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1280 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
1291 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1292 * @hw: the HW module
1293 * @mbox: mailbox to use for the FW RESET command (if desired)
1294 * @fw_data: the firmware image to write
1296 * @force: force upgrade even if firmware doesn't cooperate
1298 * Perform all of the steps necessary for upgrading an adapter's
1299 * firmware image. Normally this requires the cooperation of the
1300 * existing firmware in order to halt all existing activities
1301 * but if an invalid mailbox token is passed in we skip that step
1302 * (though we'll still put the adapter microprocessor into RESET in
1305 * On successful return the new firmware will have been loaded and
1306 * the adapter will have been fully RESET losing all previous setup
1307 * state. On unsuccessful return the adapter may be completely hosed ...
1308 * positive errno indicates that the adapter is ~probably~ intact, a
1309 * negative errno indicates that things are looking bad ...
1312 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1313 const u8 *fw_data, uint32_t size, int32_t force)
1315 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1318 ret = csio_hw_fw_halt(hw, mbox, force);
1319 if (ret != 0 && !force)
1322 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1327 * Older versions of the firmware don't understand the new
1328 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1329 * restart. So for newly loaded older firmware we'll have to do the
1330 * RESET for it so it starts up on a clean slate. We can tell if
1331 * the newly loaded firmware will handle this right by checking
1332 * its header flags to see if it advertises the capability.
1334 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1335 return csio_hw_fw_restart(hw, mbox, reset);
1340 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1341 * @hw: the HW module
1342 * @mbox: mailbox to use for the FW command
1343 * @mtype: the memory type where the Configuration File is located
1344 * @maddr: the memory address where the Configuration File is located
1345 * @finiver: return value for CF [fini] version
1346 * @finicsum: return value for CF [fini] checksum
1347 * @cfcsum: return value for CF computed checksum
1349 * Issue a command to get the firmware to process the Configuration
1350 * File located at the specified mtype/maddress. If the Configuration
1351 * File is processed successfully and return value pointers are
1352 * provided, the Configuration File "[fini] section version and
1353 * checksum values will be returned along with the computed checksum.
1354 * It's up to the caller to decide how it wants to respond to the
1355 * checksums not matching but it recommended that a prominant warning
1356 * be emitted in order to help people rapidly identify changed or
1357 * corrupted Configuration Files.
1359 * Also note that it's possible to modify things like "niccaps",
1360 * "toecaps",etc. between processing the Configuration File and telling
1361 * the firmware to use the new configuration. Callers which want to
1362 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1363 * Configuration Files if they want to do this.
1366 csio_hw_fw_config_file(struct csio_hw *hw,
1367 unsigned int mtype, unsigned int maddr,
1368 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
1370 struct csio_mb *mbp;
1371 struct fw_caps_config_cmd *caps_cmd;
1375 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1377 CSIO_INC_STATS(hw, n_err_nomem);
1381 * Tell the firmware to process the indicated Configuration File.
1382 * If there are no errors and the caller has provided return value
1383 * pointers for the [fini] section version, checksum and computed
1384 * checksum, pass those back to the caller.
1386 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1387 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1388 caps_cmd->op_to_write =
1389 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1392 caps_cmd->cfvalid_to_len16 =
1393 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
1394 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1395 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
1396 FW_LEN16(*caps_cmd));
1398 if (csio_mb_issue(hw, mbp)) {
1399 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1403 ret = csio_mb_fw_retval(mbp);
1404 if (ret != FW_SUCCESS) {
1405 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1410 *finiver = ntohl(caps_cmd->finiver);
1412 *finicsum = ntohl(caps_cmd->finicsum);
1414 *cfcsum = ntohl(caps_cmd->cfcsum);
1416 /* Validate device capabilities */
1417 if (csio_hw_validate_caps(hw, mbp)) {
1423 * And now tell the firmware to use the configuration we just loaded.
1425 caps_cmd->op_to_write =
1426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1429 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1431 if (csio_mb_issue(hw, mbp)) {
1432 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1436 ret = csio_mb_fw_retval(mbp);
1437 if (ret != FW_SUCCESS) {
1438 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1444 mempool_free(mbp, hw->mb_mempool);
1449 * csio_get_device_params - Get device parameters.
1454 csio_get_device_params(struct csio_hw *hw)
1456 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1457 struct csio_mb *mbp;
1458 enum fw_retval retval;
1462 /* Initialize portids to -1 */
1463 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1464 hw->pport[i].portid = -1;
1466 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1468 CSIO_INC_STATS(hw, n_err_nomem);
1472 /* Get port vec information. */
1473 param[0] = FW_PARAM_DEV(PORTVEC);
1475 /* Get Core clock. */
1476 param[1] = FW_PARAM_DEV(CCLK);
1478 /* Get EQ id start and end. */
1479 param[2] = FW_PARAM_PFVF(EQ_START);
1480 param[3] = FW_PARAM_PFVF(EQ_END);
1482 /* Get IQ id start and end. */
1483 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1484 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1486 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1487 ARRAY_SIZE(param), param, NULL, false, NULL);
1488 if (csio_mb_issue(hw, mbp)) {
1489 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1490 mempool_free(mbp, hw->mb_mempool);
1494 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1495 ARRAY_SIZE(param), param);
1496 if (retval != FW_SUCCESS) {
1497 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1499 mempool_free(mbp, hw->mb_mempool);
1503 /* cache the information. */
1504 hw->port_vec = param[0];
1505 hw->vpd.cclk = param[1];
1506 wrm->fw_eq_start = param[2];
1507 wrm->fw_iq_start = param[4];
1509 /* Using FW configured max iqs & eqs */
1510 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1511 !csio_is_hw_master(hw)) {
1512 hw->cfg_niq = param[5] - param[4] + 1;
1513 hw->cfg_neq = param[3] - param[2] + 1;
1514 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1515 hw->cfg_niq, hw->cfg_neq);
1518 hw->port_vec &= csio_port_mask;
1520 hw->num_pports = hweight32(hw->port_vec);
1522 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1523 hw->port_vec, hw->num_pports);
1525 for (i = 0; i < hw->num_pports; i++) {
1526 while ((hw->port_vec & (1 << j)) == 0)
1528 hw->pport[i].portid = j++;
1529 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1531 mempool_free(mbp, hw->mb_mempool);
1538 * csio_config_device_caps - Get and set device capabilities.
1543 csio_config_device_caps(struct csio_hw *hw)
1545 struct csio_mb *mbp;
1546 enum fw_retval retval;
1549 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1551 CSIO_INC_STATS(hw, n_err_nomem);
1555 /* Get device capabilities */
1556 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1558 if (csio_mb_issue(hw, mbp)) {
1559 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1563 retval = csio_mb_fw_retval(mbp);
1564 if (retval != FW_SUCCESS) {
1565 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1569 /* Validate device capabilities */
1570 if (csio_hw_validate_caps(hw, mbp))
1573 /* Don't config device capabilities if already configured */
1574 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1579 /* Write back desired device capabilities */
1580 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1583 if (csio_mb_issue(hw, mbp)) {
1584 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1588 retval = csio_mb_fw_retval(mbp);
1589 if (retval != FW_SUCCESS) {
1590 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1596 mempool_free(mbp, hw->mb_mempool);
1601 * csio_enable_ports - Bring up all available ports.
1606 csio_enable_ports(struct csio_hw *hw)
1608 struct csio_mb *mbp;
1609 enum fw_retval retval;
1613 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1615 CSIO_INC_STATS(hw, n_err_nomem);
1619 for (i = 0; i < hw->num_pports; i++) {
1620 portid = hw->pport[i].portid;
1622 /* Read PORT information */
1623 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1626 if (csio_mb_issue(hw, mbp)) {
1627 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1629 mempool_free(mbp, hw->mb_mempool);
1633 csio_mb_process_read_port_rsp(hw, mbp, &retval,
1634 &hw->pport[i].pcap);
1635 if (retval != FW_SUCCESS) {
1636 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1638 mempool_free(mbp, hw->mb_mempool);
1642 /* Write back PORT information */
1643 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
1644 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
1646 if (csio_mb_issue(hw, mbp)) {
1647 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1649 mempool_free(mbp, hw->mb_mempool);
1653 retval = csio_mb_fw_retval(mbp);
1654 if (retval != FW_SUCCESS) {
1655 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1657 mempool_free(mbp, hw->mb_mempool);
1661 } /* For all ports */
1663 mempool_free(mbp, hw->mb_mempool);
1669 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1671 * Issued with lock held.
1674 csio_get_fcoe_resinfo(struct csio_hw *hw)
1676 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1677 struct fw_fcoe_res_info_cmd *rsp;
1678 struct csio_mb *mbp;
1679 enum fw_retval retval;
1681 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1683 CSIO_INC_STATS(hw, n_err_nomem);
1687 /* Get FCoE FW resource information */
1688 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1690 if (csio_mb_issue(hw, mbp)) {
1691 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1692 mempool_free(mbp, hw->mb_mempool);
1696 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
1697 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
1698 if (retval != FW_SUCCESS) {
1699 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1701 mempool_free(mbp, hw->mb_mempool);
1705 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1706 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1707 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1708 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1709 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1710 res_info->max_ssns = ntohl(rsp->max_ssns);
1711 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1712 res_info->used_ssns = ntohl(rsp->used_ssns);
1713 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1714 res_info->max_vnps = ntohl(rsp->max_vnps);
1715 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1716 res_info->used_vnps = ntohl(rsp->used_vnps);
1718 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1719 res_info->max_xchgs);
1720 mempool_free(mbp, hw->mb_mempool);
1726 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1728 struct csio_mb *mbp;
1729 enum fw_retval retval;
1732 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1734 CSIO_INC_STATS(hw, n_err_nomem);
1739 * Find out whether we're dealing with a version of
1740 * the firmware which has configuration file support.
1742 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1743 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
1745 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1746 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1747 if (csio_mb_issue(hw, mbp)) {
1748 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1749 mempool_free(mbp, hw->mb_mempool);
1753 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1754 ARRAY_SIZE(_param), _param);
1755 if (retval != FW_SUCCESS) {
1756 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1758 mempool_free(mbp, hw->mb_mempool);
1762 mempool_free(mbp, hw->mb_mempool);
1769 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1772 const struct firmware *cf;
1773 struct pci_dev *pci_dev = hw->pdev;
1774 struct device *dev = &pci_dev->dev;
1775 unsigned int mtype = 0, maddr = 0;
1777 int value_to_add = 0;
1779 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
1780 csio_err(hw, "could not find config file %s, err: %d\n",
1781 CSIO_CF_FNAME(hw), ret);
1785 if (cf->size%4 != 0)
1786 value_to_add = 4 - (cf->size % 4);
1788 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
1789 if (cfg_data == NULL) {
1794 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
1795 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1800 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
1801 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
1803 ret = csio_memory_write(hw, mtype, maddr,
1804 cf->size + value_to_add, cfg_data);
1806 if ((ret == 0) && (value_to_add != 0)) {
1811 size_t size = cf->size & ~0x3;
1814 last.word = cfg_data[size >> 2];
1815 for (i = value_to_add; i < 4; i++)
1817 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1820 csio_info(hw, "config file upgraded to %s\n",
1822 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
1827 release_firmware(cf);
1832 * HW initialization: contact FW, obtain config, perform basic init.
1834 * If the firmware we're dealing with has Configuration File support, then
1835 * we use that to perform all configuration -- either using the configuration
1836 * file stored in flash on the adapter or using a filesystem-local file
1839 * If we don't have configuration file support in the firmware, then we'll
1840 * have to set things up the old fashioned way with hard-coded register
1841 * writes and firmware commands ...
1845 * Attempt to initialize the HW via a Firmware Configuration File.
1848 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1850 unsigned int mtype, maddr;
1852 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
1857 * Reset device if necessary
1860 rv = csio_do_reset(hw, true);
1866 * If we have a configuration file in host ,
1867 * then use that. Otherwise, use the configuration file stored
1868 * in the HW flash ...
1870 spin_unlock_irq(&hw->lock);
1871 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
1872 spin_lock_irq(&hw->lock);
1874 if (rv == -ENOENT) {
1876 * config file was not found. Use default
1877 * config file from flash.
1879 mtype = FW_MEMTYPE_CF_FLASH;
1880 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
1884 * we revert back to the hardwired config if
1890 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
1891 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
1895 hw->cfg_store = (uint8_t)mtype;
1898 * Issue a Capability Configuration command to the firmware to get it
1899 * to parse the Configuration File.
1901 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
1902 &finicsum, &cfcsum);
1906 hw->cfg_finiver = finiver;
1907 hw->cfg_finicsum = finicsum;
1908 hw->cfg_cfcsum = cfcsum;
1909 hw->cfg_csum_status = true;
1911 if (finicsum != cfcsum) {
1913 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1916 hw->cfg_csum_status = false;
1920 * Note that we're operating with parameters
1921 * not supplied by the driver, rather than from hard-wired
1922 * initialization constants buried in the driver.
1924 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
1926 /* device parameters */
1927 rv = csio_get_device_params(hw);
1932 csio_wr_sge_init(hw);
1935 * And finally tell the firmware to initialize itself using the
1936 * parameters from the Configuration File.
1938 /* Post event to notify completion of configuration */
1939 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1942 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1943 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
1948 * Something bad happened. Return the error ...
1951 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
1952 csio_dbg(hw, "Configuration file error %d\n", rv);
1957 * Attempt to initialize the adapter via hard-coded, driver supplied
1961 csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
1965 * Reset device if necessary
1968 rv = csio_do_reset(hw, true);
1973 /* Get and set device capabilities */
1974 rv = csio_config_device_caps(hw);
1978 /* device parameters */
1979 rv = csio_get_device_params(hw);
1984 csio_wr_sge_init(hw);
1986 /* Post event to notify completion of configuration */
1987 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1994 * Returns -EINVAL if attempts to flash the firmware failed
1996 * if flashing was not attempted because the card had the
1997 * latest firmware ECANCELED is returned
2000 csio_hw_flash_fw(struct csio_hw *hw)
2002 int ret = -ECANCELED;
2003 const struct firmware *fw;
2004 const struct fw_hdr *hdr;
2006 struct pci_dev *pci_dev = hw->pdev;
2007 struct device *dev = &pci_dev->dev ;
2009 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
2010 csio_err(hw, "could not find firmware image %s, err: %d\n",
2011 CSIO_FW_FNAME(hw), ret);
2015 hdr = (const struct fw_hdr *)fw->data;
2016 fw_ver = ntohl(hdr->fw_ver);
2017 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
2018 return -EINVAL; /* wrong major version, won't do */
2021 * If the flash FW is unusable or we found something newer, load it.
2023 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
2024 fw_ver > hw->fwrev) {
2025 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2029 "firmware upgraded to version %pI4 from %s\n",
2030 &hdr->fw_ver, CSIO_FW_FNAME(hw));
2032 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
2036 release_firmware(fw);
2043 * csio_hw_configure - Configure HW
2048 csio_hw_configure(struct csio_hw *hw)
2054 rv = csio_hw_dev_ready(hw);
2056 CSIO_INC_STATS(hw, n_err_fatal);
2057 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2062 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
2064 /* Needed for FW download */
2065 rv = csio_hw_get_flash_params(hw);
2067 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2068 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2072 /* Set pci completion timeout value to 4 seconds. */
2073 csio_set_pcie_completion_timeout(hw, 0xd);
2075 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2077 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2081 csio_hw_print_fw_version(hw, "Firmware revision");
2083 rv = csio_do_hello(hw, &hw->fw_state);
2085 CSIO_INC_STATS(hw, n_err_fatal);
2086 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2091 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2095 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2096 rv = csio_hw_check_fw_version(hw);
2097 if (rv == -EINVAL) {
2099 /* Do firmware update */
2100 spin_unlock_irq(&hw->lock);
2101 rv = csio_hw_flash_fw(hw);
2102 spin_lock_irq(&hw->lock);
2107 * Note that the chip was reset as part of the
2108 * firmware upgrade so we don't reset it again
2109 * below and grab the new firmware version.
2111 rv = csio_hw_check_fw_version(hw);
2115 * If the firmware doesn't support Configuration
2116 * Files, use the old Driver-based, hard-wired
2117 * initialization. Otherwise, try using the
2118 * Configuration File support and fall back to the
2119 * Driver-based initialization if there's no
2120 * Configuration File found.
2122 if (csio_hw_check_fwconfig(hw, param) == 0) {
2123 rv = csio_hw_use_fwconfig(hw, reset, param);
2128 "No Configuration File present "
2129 "on adapter. Using hard-wired "
2130 "configuration parameters.\n");
2131 rv = csio_hw_no_fwconfig(hw, reset);
2134 rv = csio_hw_no_fwconfig(hw, reset);
2141 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2143 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2145 /* device parameters */
2146 rv = csio_get_device_params(hw);
2150 /* Get device capabilities */
2151 rv = csio_config_device_caps(hw);
2156 csio_wr_sge_init(hw);
2158 /* Post event to notify completion of configuration */
2159 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2162 } /* if not master */
2169 * csio_hw_initialize - Initialize HW
2174 csio_hw_initialize(struct csio_hw *hw)
2176 struct csio_mb *mbp;
2177 enum fw_retval retval;
2181 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2186 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2188 if (csio_mb_issue(hw, mbp)) {
2189 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2193 retval = csio_mb_fw_retval(mbp);
2194 if (retval != FW_SUCCESS) {
2195 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2200 mempool_free(mbp, hw->mb_mempool);
2203 rv = csio_get_fcoe_resinfo(hw);
2205 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2209 spin_unlock_irq(&hw->lock);
2210 rv = csio_config_queues(hw);
2211 spin_lock_irq(&hw->lock);
2214 csio_err(hw, "Config of queues failed!: %d\n", rv);
2218 for (i = 0; i < hw->num_pports; i++)
2219 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2221 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2222 rv = csio_enable_ports(hw);
2224 csio_err(hw, "Failed to enable ports: %d\n", rv);
2229 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2233 mempool_free(mbp, hw->mb_mempool);
2238 #define PF_INTR_MASK (PFSW | PFCIM)
2241 * csio_hw_intr_enable - Enable HW interrupts
2242 * @hw: Pointer to HW module.
2244 * Enable interrupts in HW registers.
2247 csio_hw_intr_enable(struct csio_hw *hw)
2249 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2250 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2251 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
2254 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2255 * by FW, so do nothing for INTX.
2257 if (hw->intr_mode == CSIO_IM_MSIX)
2258 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
2259 AIVEC(AIVEC_MASK), vec);
2260 else if (hw->intr_mode == CSIO_IM_MSI)
2261 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
2262 AIVEC(AIVEC_MASK), 0);
2264 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
2266 /* Turn on MB interrupts - this will internally flush PIO as well */
2267 csio_mb_intr_enable(hw);
2269 /* These are common registers - only a master can modify them */
2270 if (csio_is_hw_master(hw)) {
2272 * Disable the Serial FLASH interrupt, if enabled!
2275 csio_wr_reg32(hw, pl, PL_INT_ENABLE);
2277 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
2278 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
2279 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
2280 ERR_DATA_CPL_ON_HIGH_QID1 |
2281 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2282 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2283 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2284 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
2286 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
2289 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2294 * csio_hw_intr_disable - Disable HW interrupts
2295 * @hw: Pointer to HW module.
2297 * Turn off Mailbox and PCI_PF_CFG interrupts.
2300 csio_hw_intr_disable(struct csio_hw *hw)
2302 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2304 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2307 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2309 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
2310 if (csio_is_hw_master(hw))
2311 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
2313 /* Turn off MB interrupts */
2314 csio_mb_intr_disable(hw);
2319 csio_hw_fatal_err(struct csio_hw *hw)
2321 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
2322 csio_hw_intr_disable(hw);
2324 /* Do not reset HW, we may need FW state for debugging */
2325 csio_fatal(hw, "HW Fatal error encountered!\n");
2328 /*****************************************************************************/
2330 /*****************************************************************************/
2332 * csio_hws_uninit - Uninit state
2338 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2340 hw->prev_evt = hw->cur_evt;
2342 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2346 csio_set_state(&hw->sm, csio_hws_configuring);
2347 csio_hw_configure(hw);
2351 CSIO_INC_STATS(hw, n_evt_unexp);
2357 * csio_hws_configuring - Configuring state
2363 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2365 hw->prev_evt = hw->cur_evt;
2367 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2371 csio_set_state(&hw->sm, csio_hws_initializing);
2372 csio_hw_initialize(hw);
2375 case CSIO_HWE_INIT_DONE:
2376 csio_set_state(&hw->sm, csio_hws_ready);
2377 /* Fan out event to all lnode SMs */
2378 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2381 case CSIO_HWE_FATAL:
2382 csio_set_state(&hw->sm, csio_hws_uninit);
2385 case CSIO_HWE_PCI_REMOVE:
2389 CSIO_INC_STATS(hw, n_evt_unexp);
2395 * csio_hws_initializing - Initialiazing state
2401 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2403 hw->prev_evt = hw->cur_evt;
2405 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2408 case CSIO_HWE_INIT_DONE:
2409 csio_set_state(&hw->sm, csio_hws_ready);
2411 /* Fan out event to all lnode SMs */
2412 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2414 /* Enable interrupts */
2415 csio_hw_intr_enable(hw);
2418 case CSIO_HWE_FATAL:
2419 csio_set_state(&hw->sm, csio_hws_uninit);
2422 case CSIO_HWE_PCI_REMOVE:
2427 CSIO_INC_STATS(hw, n_evt_unexp);
2433 * csio_hws_ready - Ready state
2439 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2441 /* Remember the event */
2444 hw->prev_evt = hw->cur_evt;
2446 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2449 case CSIO_HWE_HBA_RESET:
2450 case CSIO_HWE_FW_DLOAD:
2451 case CSIO_HWE_SUSPEND:
2452 case CSIO_HWE_PCI_REMOVE:
2453 case CSIO_HWE_PCIERR_DETECTED:
2454 csio_set_state(&hw->sm, csio_hws_quiescing);
2455 /* cleanup all outstanding cmds */
2456 if (evt == CSIO_HWE_HBA_RESET ||
2457 evt == CSIO_HWE_PCIERR_DETECTED)
2458 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2460 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2462 csio_hw_intr_disable(hw);
2463 csio_hw_mbm_cleanup(hw);
2465 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2466 csio_evtq_flush(hw);
2467 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2468 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2471 case CSIO_HWE_FATAL:
2472 csio_set_state(&hw->sm, csio_hws_uninit);
2476 CSIO_INC_STATS(hw, n_evt_unexp);
2482 * csio_hws_quiescing - Quiescing state
2488 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2490 hw->prev_evt = hw->cur_evt;
2492 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2495 case CSIO_HWE_QUIESCED:
2496 switch (hw->evtflag) {
2497 case CSIO_HWE_FW_DLOAD:
2498 csio_set_state(&hw->sm, csio_hws_resetting);
2499 /* Download firmware */
2502 case CSIO_HWE_HBA_RESET:
2503 csio_set_state(&hw->sm, csio_hws_resetting);
2504 /* Start reset of the HBA */
2505 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2506 csio_wr_destroy_queues(hw, false);
2507 csio_do_reset(hw, false);
2508 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2511 case CSIO_HWE_PCI_REMOVE:
2512 csio_set_state(&hw->sm, csio_hws_removing);
2513 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2514 csio_wr_destroy_queues(hw, true);
2515 /* Now send the bye command */
2519 case CSIO_HWE_SUSPEND:
2520 csio_set_state(&hw->sm, csio_hws_quiesced);
2523 case CSIO_HWE_PCIERR_DETECTED:
2524 csio_set_state(&hw->sm, csio_hws_pcierr);
2525 csio_wr_destroy_queues(hw, false);
2529 CSIO_INC_STATS(hw, n_evt_unexp);
2536 CSIO_INC_STATS(hw, n_evt_unexp);
2542 * csio_hws_quiesced - Quiesced state
2548 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2550 hw->prev_evt = hw->cur_evt;
2552 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2555 case CSIO_HWE_RESUME:
2556 csio_set_state(&hw->sm, csio_hws_configuring);
2557 csio_hw_configure(hw);
2561 CSIO_INC_STATS(hw, n_evt_unexp);
2567 * csio_hws_resetting - HW Resetting state
2573 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2575 hw->prev_evt = hw->cur_evt;
2577 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2580 case CSIO_HWE_HBA_RESET_DONE:
2581 csio_evtq_start(hw);
2582 csio_set_state(&hw->sm, csio_hws_configuring);
2583 csio_hw_configure(hw);
2587 CSIO_INC_STATS(hw, n_evt_unexp);
2593 * csio_hws_removing - PCI Hotplug removing state
2599 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
2601 hw->prev_evt = hw->cur_evt;
2603 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2606 case CSIO_HWE_HBA_RESET:
2607 if (!csio_is_hw_master(hw))
2610 * The BYE should have alerady been issued, so we cant
2611 * use the mailbox interface. Hence we use the PL_RST
2612 * register directly.
2614 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
2615 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
2619 /* Should never receive any new events */
2621 CSIO_INC_STATS(hw, n_evt_unexp);
2628 * csio_hws_pcierr - PCI Error state
2634 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2636 hw->prev_evt = hw->cur_evt;
2638 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2641 case CSIO_HWE_PCIERR_SLOT_RESET:
2642 csio_evtq_start(hw);
2643 csio_set_state(&hw->sm, csio_hws_configuring);
2644 csio_hw_configure(hw);
2648 CSIO_INC_STATS(hw, n_evt_unexp);
2653 /*****************************************************************************/
2655 /*****************************************************************************/
2658 * csio_handle_intr_status - table driven interrupt handler
2660 * @reg: the interrupt status register to process
2661 * @acts: table of interrupt actions
2663 * A table driven interrupt handler that applies a set of masks to an
2664 * interrupt status word and performs the corresponding actions if the
2665 * interrupts described by the mask have occured. The actions include
2666 * optionally emitting a warning or alert message. The table is terminated
2667 * by an entry specifying mask 0. Returns the number of fatal interrupt
2671 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
2672 const struct intr_info *acts)
2675 unsigned int mask = 0;
2676 unsigned int status = csio_rd_reg32(hw, reg);
2678 for ( ; acts->mask; ++acts) {
2679 if (!(status & acts->mask))
2683 csio_fatal(hw, "Fatal %s (0x%x)\n",
2684 acts->msg, status & acts->mask);
2685 } else if (acts->msg)
2686 csio_info(hw, "%s (0x%x)\n",
2687 acts->msg, status & acts->mask);
2691 if (status) /* clear processed interrupts */
2692 csio_wr_reg32(hw, status, reg);
2697 * TP interrupt handler.
2699 static void csio_tp_intr_handler(struct csio_hw *hw)
2701 static struct intr_info tp_intr_info[] = {
2702 { 0x3fffffff, "TP parity error", -1, 1 },
2703 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2707 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
2708 csio_hw_fatal_err(hw);
2712 * SGE interrupt handler.
2714 static void csio_sge_intr_handler(struct csio_hw *hw)
2718 static struct intr_info sge_intr_info[] = {
2719 { ERR_CPL_EXCEED_IQE_SIZE,
2720 "SGE received CPL exceeding IQE size", -1, 1 },
2721 { ERR_INVALID_CIDX_INC,
2722 "SGE GTS CIDX increment too large", -1, 0 },
2723 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2724 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2725 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
2726 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2727 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2729 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2731 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2733 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2735 { ERR_ING_CTXT_PRIO,
2736 "SGE too many priority ingress contexts", -1, 0 },
2737 { ERR_EGR_CTXT_PRIO,
2738 "SGE too many priority egress contexts", -1, 0 },
2739 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2740 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2744 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
2745 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
2747 csio_fatal(hw, "SGE parity error (%#llx)\n",
2748 (unsigned long long)v);
2749 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
2751 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
2754 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
2756 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
2758 csio_hw_fatal_err(hw);
2761 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
2762 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
2763 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
2764 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
2767 * CIM interrupt handler.
2769 static void csio_cim_intr_handler(struct csio_hw *hw)
2771 static struct intr_info cim_intr_info[] = {
2772 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2773 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2774 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2775 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2776 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2777 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2778 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2781 static struct intr_info cim_upintr_info[] = {
2782 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2783 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2784 { ILLWRINT, "CIM illegal write", -1, 1 },
2785 { ILLRDINT, "CIM illegal read", -1, 1 },
2786 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2787 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2788 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2789 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2790 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2791 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2792 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2793 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2794 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2795 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2796 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2797 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2798 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2799 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2800 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2801 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2802 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2803 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2804 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2805 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2806 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2807 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2808 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2809 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2815 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
2817 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
2820 csio_hw_fatal_err(hw);
2824 * ULP RX interrupt handler.
2826 static void csio_ulprx_intr_handler(struct csio_hw *hw)
2828 static struct intr_info ulprx_intr_info[] = {
2829 { 0x1800000, "ULPRX context error", -1, 1 },
2830 { 0x7fffff, "ULPRX parity error", -1, 1 },
2834 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
2835 csio_hw_fatal_err(hw);
2839 * ULP TX interrupt handler.
2841 static void csio_ulptx_intr_handler(struct csio_hw *hw)
2843 static struct intr_info ulptx_intr_info[] = {
2844 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2846 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2848 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2850 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2852 { 0xfffffff, "ULPTX parity error", -1, 1 },
2856 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
2857 csio_hw_fatal_err(hw);
2861 * PM TX interrupt handler.
2863 static void csio_pmtx_intr_handler(struct csio_hw *hw)
2865 static struct intr_info pmtx_intr_info[] = {
2866 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2867 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2868 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2869 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2870 { 0xffffff0, "PMTX framing error", -1, 1 },
2871 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2872 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2874 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2875 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2879 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
2880 csio_hw_fatal_err(hw);
2884 * PM RX interrupt handler.
2886 static void csio_pmrx_intr_handler(struct csio_hw *hw)
2888 static struct intr_info pmrx_intr_info[] = {
2889 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2890 { 0x3ffff0, "PMRX framing error", -1, 1 },
2891 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2892 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2894 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2895 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2899 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
2900 csio_hw_fatal_err(hw);
2904 * CPL switch interrupt handler.
2906 static void csio_cplsw_intr_handler(struct csio_hw *hw)
2908 static struct intr_info cplsw_intr_info[] = {
2909 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2910 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2911 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2912 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2913 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2914 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2918 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
2919 csio_hw_fatal_err(hw);
2923 * LE interrupt handler.
2925 static void csio_le_intr_handler(struct csio_hw *hw)
2927 static struct intr_info le_intr_info[] = {
2928 { LIPMISS, "LE LIP miss", -1, 0 },
2929 { LIP0, "LE 0 LIP error", -1, 0 },
2930 { PARITYERR, "LE parity error", -1, 1 },
2931 { UNKNOWNCMD, "LE unknown command", -1, 1 },
2932 { REQQPARERR, "LE request queue parity error", -1, 1 },
2936 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
2937 csio_hw_fatal_err(hw);
2941 * MPS interrupt handler.
2943 static void csio_mps_intr_handler(struct csio_hw *hw)
2945 static struct intr_info mps_rx_intr_info[] = {
2946 { 0xffffff, "MPS Rx parity error", -1, 1 },
2949 static struct intr_info mps_tx_intr_info[] = {
2950 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
2951 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2952 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
2953 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
2954 { BUBBLE, "MPS Tx underflow", -1, 1 },
2955 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2956 { FRMERR, "MPS Tx framing error", -1, 1 },
2959 static struct intr_info mps_trc_intr_info[] = {
2960 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
2961 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
2962 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
2965 static struct intr_info mps_stat_sram_intr_info[] = {
2966 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2969 static struct intr_info mps_stat_tx_intr_info[] = {
2970 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2973 static struct intr_info mps_stat_rx_intr_info[] = {
2974 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2977 static struct intr_info mps_cls_intr_info[] = {
2978 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2979 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2980 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2986 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
2988 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
2990 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
2991 mps_trc_intr_info) +
2992 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
2993 mps_stat_sram_intr_info) +
2994 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2995 mps_stat_tx_intr_info) +
2996 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2997 mps_stat_rx_intr_info) +
2998 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
3001 csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
3002 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
3004 csio_hw_fatal_err(hw);
3007 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
3010 * EDC/MC interrupt handler.
3012 static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3014 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
3016 unsigned int addr, cnt_addr, v;
3018 if (idx <= MEM_EDC1) {
3019 addr = EDC_REG(EDC_INT_CAUSE, idx);
3020 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
3022 addr = MC_INT_CAUSE;
3023 cnt_addr = MC_ECC_STATUS;
3026 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
3027 if (v & PERR_INT_CAUSE)
3028 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
3029 if (v & ECC_CE_INT_CAUSE) {
3030 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
3032 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
3033 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3034 cnt, name[idx], cnt > 1 ? "s" : "");
3036 if (v & ECC_UE_INT_CAUSE)
3037 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3039 csio_wr_reg32(hw, v, addr);
3040 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
3041 csio_hw_fatal_err(hw);
3045 * MA interrupt handler.
3047 static void csio_ma_intr_handler(struct csio_hw *hw)
3049 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
3051 if (status & MEM_PERR_INT_CAUSE)
3052 csio_fatal(hw, "MA parity error, parity status %#x\n",
3053 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
3054 if (status & MEM_WRAP_INT_CAUSE) {
3055 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
3057 "MA address wrap-around error by client %u to address %#x\n",
3058 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
3060 csio_wr_reg32(hw, status, MA_INT_CAUSE);
3061 csio_hw_fatal_err(hw);
3065 * SMB interrupt handler.
3067 static void csio_smb_intr_handler(struct csio_hw *hw)
3069 static struct intr_info smb_intr_info[] = {
3070 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
3071 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
3072 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
3076 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
3077 csio_hw_fatal_err(hw);
3081 * NC-SI interrupt handler.
3083 static void csio_ncsi_intr_handler(struct csio_hw *hw)
3085 static struct intr_info ncsi_intr_info[] = {
3086 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
3087 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
3088 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
3089 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
3093 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
3094 csio_hw_fatal_err(hw);
3098 * XGMAC interrupt handler.
3100 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3102 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
3104 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3108 if (v & TXFIFO_PRTY_ERR)
3109 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3110 if (v & RXFIFO_PRTY_ERR)
3111 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3112 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
3113 csio_hw_fatal_err(hw);
3117 * PL interrupt handler.
3119 static void csio_pl_intr_handler(struct csio_hw *hw)
3121 static struct intr_info pl_intr_info[] = {
3122 { FATALPERR, "T4 fatal parity error", -1, 1 },
3123 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
3127 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
3128 csio_hw_fatal_err(hw);
3132 * csio_hw_slow_intr_handler - control path interrupt handler
3135 * Interrupt handler for non-data global interrupt events, e.g., errors.
3136 * The designation 'slow' is because it involves register reads, while
3137 * data interrupts typically don't involve any MMIOs.
3140 csio_hw_slow_intr_handler(struct csio_hw *hw)
3142 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
3144 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3145 CSIO_INC_STATS(hw, n_plint_unexp);
3149 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3151 CSIO_INC_STATS(hw, n_plint_cnt);
3154 csio_cim_intr_handler(hw);
3157 csio_mps_intr_handler(hw);
3160 csio_ncsi_intr_handler(hw);
3163 csio_pl_intr_handler(hw);
3166 csio_smb_intr_handler(hw);
3169 csio_xgmac_intr_handler(hw, 0);
3172 csio_xgmac_intr_handler(hw, 1);
3174 if (cause & XGMAC_KR0)
3175 csio_xgmac_intr_handler(hw, 2);
3177 if (cause & XGMAC_KR1)
3178 csio_xgmac_intr_handler(hw, 3);
3181 hw->chip_ops->chip_pcie_intr_handler(hw);
3184 csio_mem_intr_handler(hw, MEM_MC);
3187 csio_mem_intr_handler(hw, MEM_EDC0);
3190 csio_mem_intr_handler(hw, MEM_EDC1);
3193 csio_le_intr_handler(hw);
3196 csio_tp_intr_handler(hw);
3199 csio_ma_intr_handler(hw);
3202 csio_pmtx_intr_handler(hw);
3205 csio_pmrx_intr_handler(hw);
3208 csio_ulprx_intr_handler(hw);
3210 if (cause & CPL_SWITCH)
3211 csio_cplsw_intr_handler(hw);
3214 csio_sge_intr_handler(hw);
3217 csio_ulptx_intr_handler(hw);
3219 /* Clear the interrupts just processed for which we are the master. */
3220 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
3221 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
3226 /*****************************************************************************
3227 * HW <--> mailbox interfacing routines.
3228 ****************************************************************************/
3230 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3232 * @data: Private data pointer.
3234 * Called from worker thread context.
3237 csio_mberr_worker(void *data)
3239 struct csio_hw *hw = (struct csio_hw *)data;
3240 struct csio_mbm *mbm = &hw->mbm;
3242 struct csio_mb *mbp_next;
3245 del_timer_sync(&mbm->timer);
3247 spin_lock_irq(&hw->lock);
3248 if (list_empty(&mbm->cbfn_q)) {
3249 spin_unlock_irq(&hw->lock);
3253 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3254 mbm->stats.n_cbfnq = 0;
3256 /* Try to start waiting mailboxes */
3257 if (!list_empty(&mbm->req_q)) {
3258 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3259 list_del_init(&mbp_next->list);
3261 rv = csio_mb_issue(hw, mbp_next);
3263 list_add_tail(&mbp_next->list, &mbm->req_q);
3265 CSIO_DEC_STATS(mbm, n_activeq);
3267 spin_unlock_irq(&hw->lock);
3269 /* Now callback completions */
3270 csio_mb_completions(hw, &cbfn_q);
3274 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3276 * @data: private data pointer
3280 csio_hw_mb_timer(uintptr_t data)
3282 struct csio_hw *hw = (struct csio_hw *)data;
3283 struct csio_mb *mbp = NULL;
3285 spin_lock_irq(&hw->lock);
3286 mbp = csio_mb_tmo_handler(hw);
3287 spin_unlock_irq(&hw->lock);
3289 /* Call back the function for the timed-out Mailbox */
3291 mbp->mb_cbfn(hw, mbp);
3296 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3299 * Called with lock held, should exit with lock held.
3300 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3301 * into a local queue. Drops lock and calls the completions. Holds
3305 csio_hw_mbm_cleanup(struct csio_hw *hw)
3309 csio_mb_cancel_all(hw, &cbfn_q);
3311 spin_unlock_irq(&hw->lock);
3312 csio_mb_completions(hw, &cbfn_q);
3313 spin_lock_irq(&hw->lock);
3316 /*****************************************************************************
3318 ****************************************************************************/
3320 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3323 struct csio_evt_msg *evt_entry = NULL;
3325 if (type >= CSIO_EVT_MAX)
3328 if (len > CSIO_EVT_MSG_SIZE)
3331 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3334 if (list_empty(&hw->evt_free_q)) {
3335 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3340 evt_entry = list_first_entry(&hw->evt_free_q,
3341 struct csio_evt_msg, list);
3342 list_del_init(&evt_entry->list);
3344 /* copy event msg and queue the event */
3345 evt_entry->type = type;
3346 memcpy((void *)evt_entry->data, evt_msg, len);
3347 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3349 CSIO_DEC_STATS(hw, n_evt_freeq);
3350 CSIO_INC_STATS(hw, n_evt_activeq);
3356 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3357 uint16_t len, bool msg_sg)
3359 struct csio_evt_msg *evt_entry = NULL;
3360 struct csio_fl_dma_buf *fl_sg;
3362 unsigned long flags;
3365 if (type >= CSIO_EVT_MAX)
3368 if (len > CSIO_EVT_MSG_SIZE)
3371 spin_lock_irqsave(&hw->lock, flags);
3372 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3377 if (list_empty(&hw->evt_free_q)) {
3378 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3384 evt_entry = list_first_entry(&hw->evt_free_q,
3385 struct csio_evt_msg, list);
3386 list_del_init(&evt_entry->list);
3388 /* copy event msg and queue the event */
3389 evt_entry->type = type;
3391 /* If Payload in SG list*/
3393 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3394 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3395 memcpy((void *)((uintptr_t)evt_entry->data + off),
3396 fl_sg->flbufs[n].vaddr,
3397 fl_sg->flbufs[n].len);
3398 off += fl_sg->flbufs[n].len;
3401 memcpy((void *)evt_entry->data, evt_msg, len);
3403 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3404 CSIO_DEC_STATS(hw, n_evt_freeq);
3405 CSIO_INC_STATS(hw, n_evt_activeq);
3407 spin_unlock_irqrestore(&hw->lock, flags);
3412 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3415 spin_lock_irq(&hw->lock);
3416 list_del_init(&evt_entry->list);
3417 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3418 CSIO_DEC_STATS(hw, n_evt_activeq);
3419 CSIO_INC_STATS(hw, n_evt_freeq);
3420 spin_unlock_irq(&hw->lock);
3425 csio_evtq_flush(struct csio_hw *hw)
3429 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3430 spin_unlock_irq(&hw->lock);
3432 spin_lock_irq(&hw->lock);
3435 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3439 csio_evtq_stop(struct csio_hw *hw)
3441 hw->flags |= CSIO_HWF_FWEVT_STOP;
3445 csio_evtq_start(struct csio_hw *hw)
3447 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3451 csio_evtq_cleanup(struct csio_hw *hw)
3453 struct list_head *evt_entry, *next_entry;
3455 /* Release outstanding events from activeq to freeq*/
3456 if (!list_empty(&hw->evt_active_q))
3457 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3459 hw->stats.n_evt_activeq = 0;
3460 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3462 /* Freeup event entry */
3463 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3465 CSIO_DEC_STATS(hw, n_evt_freeq);
3468 hw->stats.n_evt_freeq = 0;
3473 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3474 struct csio_fl_dma_buf *flb, void *priv)
3478 uint32_t msg_len = 0;
3481 op = ((struct rss_header *) wr)->opcode;
3482 if (op == CPL_FW6_PLD) {
3483 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3484 if (!flb || !flb->totlen) {
3485 CSIO_INC_STATS(hw, n_cpl_unexp);
3490 msg_len = flb->totlen;
3492 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3494 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3495 /* skip RSS header */
3496 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3497 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3498 sizeof(struct cpl_fw4_msg);
3500 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3501 CSIO_INC_STATS(hw, n_cpl_unexp);
3506 * Enqueue event to EventQ. Events processing happens
3507 * in Event worker thread context
3509 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3510 (uint16_t)msg_len, msg_sg))
3511 CSIO_INC_STATS(hw, n_evt_drop);
3515 csio_evtq_worker(struct work_struct *work)
3517 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3518 struct list_head *evt_entry, *next_entry;
3520 struct csio_evt_msg *evt_msg;
3521 struct cpl_fw6_msg *msg;
3522 struct csio_rnode *rn;
3524 uint8_t evtq_stop = 0;
3526 csio_dbg(hw, "event worker thread active evts#%d\n",
3527 hw->stats.n_evt_activeq);
3529 spin_lock_irq(&hw->lock);
3530 while (!list_empty(&hw->evt_active_q)) {
3531 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3532 spin_unlock_irq(&hw->lock);
3534 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3535 evt_msg = (struct csio_evt_msg *) evt_entry;
3537 /* Drop events if queue is STOPPED */
3538 spin_lock_irq(&hw->lock);
3539 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3541 spin_unlock_irq(&hw->lock);
3543 CSIO_INC_STATS(hw, n_evt_drop);
3547 switch (evt_msg->type) {
3549 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3551 if ((msg->opcode == CPL_FW6_MSG ||
3552 msg->opcode == CPL_FW4_MSG) &&
3554 rv = csio_mb_fwevt_handler(hw,
3558 /* Handle any remaining fw events */
3559 csio_fcoe_fwevt_handler(hw,
3560 msg->opcode, msg->data);
3561 } else if (msg->opcode == CPL_FW6_PLD) {
3563 csio_fcoe_fwevt_handler(hw,
3564 msg->opcode, msg->data);
3567 "Unhandled FW msg op %x type %x\n",
3568 msg->opcode, msg->type);
3569 CSIO_INC_STATS(hw, n_evt_drop);
3574 csio_mberr_worker(hw);
3577 case CSIO_EVT_DEV_LOSS:
3578 memcpy(&rn, evt_msg->data, sizeof(rn));
3579 csio_rnode_devloss_handler(rn);
3583 csio_warn(hw, "Unhandled event %x on evtq\n",
3585 CSIO_INC_STATS(hw, n_evt_unexp);
3589 csio_free_evt(hw, evt_msg);
3592 spin_lock_irq(&hw->lock);
3594 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3595 spin_unlock_irq(&hw->lock);
3599 csio_fwevtq_handler(struct csio_hw *hw)
3603 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
3604 CSIO_INC_STATS(hw, n_int_stray);
3608 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
3609 csio_process_fwevtq_entry, NULL);
3613 /****************************************************************************
3615 ****************************************************************************/
3617 /* Management module */
3619 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3620 * mgmt - mgmt module
3621 * @io_req - io request
3623 * Return - 0:if given IO Req exists in active Q.
3624 * -EINVAL :if lookup fails.
3627 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
3629 struct list_head *tmp;
3631 /* Lookup ioreq in the ACTIVEQ */
3632 list_for_each(tmp, &mgmtm->active_q) {
3633 if (io_req == (struct csio_ioreq *)tmp)
3639 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3642 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3643 * @data - Event data.
3648 csio_mgmt_tmo_handler(uintptr_t data)
3650 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
3651 struct list_head *tmp;
3652 struct csio_ioreq *io_req;
3654 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
3656 spin_lock_irq(&mgmtm->hw->lock);
3658 list_for_each(tmp, &mgmtm->active_q) {
3659 io_req = (struct csio_ioreq *) tmp;
3660 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
3663 /* Dequeue the request from retry Q. */
3664 tmp = csio_list_prev(tmp);
3665 list_del_init(&io_req->sm.sm_list);
3666 if (io_req->io_cbfn) {
3667 /* io_req will be freed by completion handler */
3668 io_req->wr_status = -ETIMEDOUT;
3669 io_req->io_cbfn(mgmtm->hw, io_req);
3676 /* If retry queue is not empty, re-arm timer */
3677 if (!list_empty(&mgmtm->active_q))
3678 mod_timer(&mgmtm->mgmt_timer,
3679 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
3680 spin_unlock_irq(&mgmtm->hw->lock);
3684 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
3686 struct csio_hw *hw = mgmtm->hw;
3687 struct csio_ioreq *io_req;
3688 struct list_head *tmp;
3692 /* Wait for all outstanding req to complete gracefully */
3693 while ((!list_empty(&mgmtm->active_q)) && count--) {
3694 spin_unlock_irq(&hw->lock);
3696 spin_lock_irq(&hw->lock);
3699 /* release outstanding req from ACTIVEQ */
3700 list_for_each(tmp, &mgmtm->active_q) {
3701 io_req = (struct csio_ioreq *) tmp;
3702 tmp = csio_list_prev(tmp);
3703 list_del_init(&io_req->sm.sm_list);
3704 mgmtm->stats.n_active--;
3705 if (io_req->io_cbfn) {
3706 /* io_req will be freed by completion handler */
3707 io_req->wr_status = -ETIMEDOUT;
3708 io_req->io_cbfn(mgmtm->hw, io_req);
3714 * csio_mgmt_init - Mgmt module init entry point
3715 * @mgmtsm - mgmt module
3718 * Initialize mgmt timer, resource wait queue, active queue,
3719 * completion q. Allocate Egress and Ingress
3720 * WR queues and save off the queue index returned by the WR
3721 * module for future use. Allocate and save off mgmt reqs in the
3722 * mgmt_req_freelist for future use. Make sure their SM is initialized
3724 * Returns: 0 - on success
3725 * -ENOMEM - on error.
3728 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
3730 struct timer_list *timer = &mgmtm->mgmt_timer;
3733 timer->function = csio_mgmt_tmo_handler;
3734 timer->data = (unsigned long)mgmtm;
3736 INIT_LIST_HEAD(&mgmtm->active_q);
3737 INIT_LIST_HEAD(&mgmtm->cbfn_q);
3740 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3746 * csio_mgmtm_exit - MGMT module exit entry point
3747 * @mgmtsm - mgmt module
3749 * This function called during MGMT module uninit.
3750 * Stop timers, free ioreqs allocated.
3755 csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
3757 del_timer_sync(&mgmtm->mgmt_timer);
3762 * csio_hw_start - Kicks off the HW State machine
3763 * @hw: Pointer to HW module.
3765 * It is assumed that the initialization is a synchronous operation.
3766 * So when we return afer posting the event, the HW SM should be in
3767 * the ready state, if there were no errors during init.
3770 csio_hw_start(struct csio_hw *hw)
3772 spin_lock_irq(&hw->lock);
3773 csio_post_event(&hw->sm, CSIO_HWE_CFG);
3774 spin_unlock_irq(&hw->lock);
3776 if (csio_is_hw_ready(hw))
3783 csio_hw_stop(struct csio_hw *hw)
3785 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
3787 if (csio_is_hw_removing(hw))
3793 /* Max reset retries */
3794 #define CSIO_MAX_RESET_RETRIES 3
3797 * csio_hw_reset - Reset the hardware
3800 * Caller should hold lock across this function.
3803 csio_hw_reset(struct csio_hw *hw)
3805 if (!csio_is_hw_master(hw))
3808 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
3809 csio_dbg(hw, "Max hw reset attempts reached..");
3814 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
3816 if (csio_is_hw_ready(hw)) {
3817 hw->rst_retries = 0;
3818 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
3825 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3829 csio_hw_get_device_id(struct csio_hw *hw)
3831 /* Is the adapter device id cached already ?*/
3832 if (csio_is_dev_id_cached(hw))
3835 /* Get the PCI vendor & device id */
3836 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
3837 &hw->params.pci.vendor_id);
3838 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
3839 &hw->params.pci.device_id);
3841 csio_dev_id_cached(hw);
3842 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
3844 } /* csio_hw_get_device_id */
3847 * csio_hw_set_description - Set the model, description of the hw.
3849 * @ven_id: PCI Vendor ID
3850 * @dev_id: PCI Device ID
3853 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
3855 uint32_t adap_type, prot_type;
3857 if (ven_id == CSIO_VENDOR_ID) {
3858 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
3859 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
3861 if (prot_type == CSIO_T4_FCOE_ASIC) {
3863 csio_t4_fcoe_adapters[adap_type].model_no, 16);
3864 memcpy(hw->model_desc,
3865 csio_t4_fcoe_adapters[adap_type].description,
3867 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
3869 csio_t5_fcoe_adapters[adap_type].model_no, 16);
3870 memcpy(hw->model_desc,
3871 csio_t5_fcoe_adapters[adap_type].description,
3874 char tempName[32] = "Chelsio FCoE Controller";
3875 memcpy(hw->model_desc, tempName, 32);
3878 } /* csio_hw_set_description */
3881 * csio_hw_init - Initialize HW module.
3882 * @hw: Pointer to HW module.
3884 * Initialize the members of the HW module.
3887 csio_hw_init(struct csio_hw *hw)
3891 uint16_t ven_id, dev_id;
3892 struct csio_evt_msg *evt_entry;
3894 INIT_LIST_HEAD(&hw->sm.sm_list);
3895 csio_init_state(&hw->sm, csio_hws_uninit);
3896 spin_lock_init(&hw->lock);
3897 INIT_LIST_HEAD(&hw->sln_head);
3899 /* Get the PCI vendor & device id */
3900 csio_hw_get_device_id(hw);
3902 strcpy(hw->name, CSIO_HW_NAME);
3904 /* Initialize the HW chip ops with T4/T5 specific ops */
3905 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3907 /* Set the model & its description */
3909 ven_id = hw->params.pci.vendor_id;
3910 dev_id = hw->params.pci.device_id;
3912 csio_hw_set_description(hw, ven_id, dev_id);
3914 /* Initialize default log level */
3915 hw->params.log_level = (uint32_t) csio_dbg_level;
3917 csio_set_fwevt_intr_idx(hw, -1);
3918 csio_set_nondata_intr_idx(hw, -1);
3920 /* Init all the modules: Mailbox, WorkRequest and Transport */
3921 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
3924 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
3928 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
3932 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
3934 goto err_scsim_exit;
3935 /* Pre-allocate evtq and initialize them */
3936 INIT_LIST_HEAD(&hw->evt_active_q);
3937 INIT_LIST_HEAD(&hw->evt_free_q);
3938 for (i = 0; i < csio_evtq_sz; i++) {
3940 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3942 csio_err(hw, "Failed to initialize eventq");
3943 goto err_evtq_cleanup;
3946 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3947 CSIO_INC_STATS(hw, n_evt_freeq);
3950 hw->dev_num = dev_num;
3956 csio_evtq_cleanup(hw);
3957 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3959 csio_scsim_exit(csio_hw_to_scsim(hw));
3961 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3963 csio_mbm_exit(csio_hw_to_mbm(hw));
3969 * csio_hw_exit - Un-initialize HW module.
3970 * @hw: Pointer to HW module.
3974 csio_hw_exit(struct csio_hw *hw)
3976 csio_evtq_cleanup(hw);
3977 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3978 csio_scsim_exit(csio_hw_to_scsim(hw));
3979 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3980 csio_mbm_exit(csio_hw_to_mbm(hw));