1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
51 #include <linux/stringify.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
62 #define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87 static int multi_mode = 1;
88 module_param(multi_mode, int, 0);
89 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
93 module_param(num_queues, int, 0);
94 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
97 static int disable_tpa;
98 module_param(disable_tpa, int, 0);
99 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 module_param(int_mode, int, 0);
103 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 static int dropless_fc;
107 module_param(dropless_fc, int, 0);
108 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111 module_param(poll, int, 0);
112 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
132 /* indexed by board_type, above */
135 } board_info[] __devinitdata = {
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E 0x1663
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
159 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
165 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
172 static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
180 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
183 size_t size = sizeof(struct ustorm_per_client_stats);
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
188 __storm_memset_fill(bp, addr, size, 0);
191 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
194 size_t size = sizeof(struct tstorm_per_client_stats);
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
199 __storm_memset_fill(bp, addr, size, 0);
202 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
205 size_t size = sizeof(struct xstorm_per_client_stats);
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
210 __storm_memset_fill(bp, addr, size, 0);
214 static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
220 __storm_memset_dma_mapping(bp, addr, mapping);
223 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
228 static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
240 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
244 size_t size = sizeof(struct stats_indication_flags);
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
251 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
255 size_t size = sizeof(struct stats_indication_flags);
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
262 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
266 size_t size = sizeof(struct stats_indication_flags);
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
273 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
277 size_t size = sizeof(struct stats_indication_flags);
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
284 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
290 __storm_memset_dma_mapping(bp, addr, mapping);
293 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
299 __storm_memset_dma_mapping(bp, addr, mapping);
302 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
308 __storm_memset_dma_mapping(bp, addr, mapping);
311 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
317 __storm_memset_dma_mapping(bp, addr, mapping);
320 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
333 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
346 static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
350 size_t size = sizeof(struct event_ring_data);
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
357 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
364 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
381 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
404 * locking is done by mcp
406 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
414 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
426 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE "dst_addr [none]"
432 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 const u32 dmae_reg_go_c[] = {
496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
518 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
524 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526 return opcode & ~DMAE_CMD_SRC_RESET;
529 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
554 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
557 memset(dmae, 0, sizeof(struct dmae_command));
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
583 /* reset completion */
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
589 /* wait for completion */
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
595 BNX2X_ERR("DMAE timeout!\n");
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
612 mutex_unlock(&bp->dmae_mutex);
616 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
619 struct dmae_command dmae;
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633 /* fill in addresses and len */
634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
646 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
648 struct dmae_command dmae;
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
664 /* fill in addresses and len */
665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
677 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
683 while (len > dmae_wr_max) {
684 bnx2x_write_dmae(bp, phys_addr + offset,
685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
704 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708 REG_RD_DMAE(bp, reg, wb_data, 2);
710 return HILO_U64(wb_data[0], wb_data[1]);
714 static int bnx2x_mc_assert(struct bnx2x *bp)
718 u32 row0, row1, row2, row3;
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
835 static void bnx2x_fw_dump(struct bnx2x *bp)
841 u32 trace_shmem_base;
843 BNX2X_ERR("NO MCP - can not dump\n");
847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
852 mark = REG_RD(bp, addr);
853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
855 pr_err("begin fw dump (mark 0x%x)\n", mark);
858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
859 for (word = 0; word < 8; word++)
860 data[word] = htonl(REG_RD(bp, offset + 4*word));
862 pr_cont("%s", (char *)data);
864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
865 for (word = 0; word < 8; word++)
866 data[word] = htonl(REG_RD(bp, offset + 4*word));
868 pr_cont("%s", (char *)data);
870 pr_err("end of fw dump\n");
873 void bnx2x_panic_dump(struct bnx2x *bp)
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879 #ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886 BNX2X_ERR("begin crash dump -----------------\n");
890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
891 " spq_prod_idx(0x%x)\n",
892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
921 for_each_queue(bp, i) {
922 struct bnx2x_fastpath *fp = &bp->fp[i];
924 struct hc_status_block_data_e2 sb_data_e2;
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
928 sb_data_e2.common.state_machine :
929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
932 sb_data_e2.index_data :
933 sb_data_e1x.index_data;
938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
939 " rx_comp_prod(0x%x)"
940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
941 i, fp->rx_bd_prod, fp->rx_bd_cons,
943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
945 " fp_hc_idx(0x%x)\n",
946 fp->rx_sge_prod, fp->last_max_sge,
947 le16_to_cpu(fp->fp_hc_idx));
950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
977 sb_data_p = CHIP_IS_E2(bp) ?
980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1026 #ifdef BNX2X_STOP_ON_ERROR
1029 for_each_queue(bp, i) {
1030 struct bnx2x_fastpath *fp = &bp->fp[i];
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1034 for (j = start; j != end; j = RX_BD(j + 1)) {
1035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
1044 for (j = start; j != end; j = RX_SGE(j + 1)) {
1045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
1054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1063 for_each_queue(bp, i) {
1064 struct bnx2x_fastpath *fp = &bp->fp[i];
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
1090 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1092 int port = BP_PORT(bp);
1093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
1101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1114 if (!CHIP_IS_E1(bp)) {
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1118 REG_WR(bp, addr, val);
1120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1125 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1128 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1130 REG_WR(bp, addr, val);
1132 * Ensure that HC_CONFIG is written before leading/trailing edge config
1137 if (!CHIP_IS_E1(bp)) {
1138 /* init leading/trailing edge */
1140 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1142 /* enable nig and gpio3 attention */
1147 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1151 /* Make sure that interrupts are indeed enabled from here on */
1155 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1158 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1161 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1164 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165 IGU_PF_CONF_SINGLE_ISR_EN);
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN);
1170 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN |
1174 IGU_PF_CONF_SINGLE_ISR_EN);
1176 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_INT_LINE_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1183 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1184 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1186 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1190 /* init leading/trailing edge */
1192 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1194 /* enable nig and gpio3 attention */
1199 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1202 /* Make sure that interrupts are indeed enabled from here on */
1206 void bnx2x_int_enable(struct bnx2x *bp)
1208 if (bp->common.int_block == INT_BLOCK_HC)
1209 bnx2x_hc_int_enable(bp);
1211 bnx2x_igu_int_enable(bp);
1214 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1216 int port = BP_PORT(bp);
1217 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218 u32 val = REG_RD(bp, addr);
1221 * in E1 we must use only PCI configuration space to disable
1222 * MSI/MSIX capablility
1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1225 if (CHIP_IS_E1(bp)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1230 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1232 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1236 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1241 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1244 /* flush all outstanding writes */
1247 REG_WR(bp, addr, val);
1248 if (REG_RD(bp, addr) != val)
1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1252 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1254 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1256 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257 IGU_PF_CONF_INT_LINE_EN |
1258 IGU_PF_CONF_ATTN_BIT_EN);
1260 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1262 /* flush all outstanding writes */
1265 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1270 void bnx2x_int_disable(struct bnx2x *bp)
1272 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp);
1275 bnx2x_igu_int_disable(bp);
1278 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1280 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1283 /* disable interrupt handling */
1284 atomic_inc(&bp->intr_sem);
1285 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1288 /* prevent the HW from sending interrupts */
1289 bnx2x_int_disable(bp);
1291 /* make sure all ISRs are done */
1293 synchronize_irq(bp->msix_table[0].vector);
1298 for_each_queue(bp, i)
1299 synchronize_irq(bp->msix_table[i + offset].vector);
1301 synchronize_irq(bp->pdev->irq);
1303 /* make sure sp_task is not running */
1304 cancel_delayed_work(&bp->sp_task);
1305 flush_workqueue(bnx2x_wq);
1311 * General service functions
1314 /* Return true if succeeded to acquire the lock */
1315 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1318 u32 resource_bit = (1 << resource);
1319 int func = BP_FUNC(bp);
1320 u32 hw_lock_control_reg;
1322 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1324 /* Validating that the resource is within range */
1325 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1327 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1333 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1335 hw_lock_control_reg =
1336 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1338 /* Try to acquire the lock */
1339 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340 lock_status = REG_RD(bp, hw_lock_control_reg);
1341 if (lock_status & resource_bit)
1344 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1349 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1352 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1353 union eth_rx_cqe *rr_cqe)
1355 struct bnx2x *bp = fp->bp;
1356 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1360 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1361 fp->index, cid, command, bp->state,
1362 rr_cqe->ramrod_cqe.ramrod_type);
1364 switch (command | fp->state) {
1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367 fp->state = BNX2X_FP_STATE_OPEN;
1370 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1372 fp->state = BNX2X_FP_STATE_HALTED;
1375 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377 fp->state = BNX2X_FP_STATE_TERMINATED;
1381 BNX2X_ERR("unexpected MC reply (%d) "
1382 "fp[%d] state is %x\n",
1383 command, fp->index, fp->state);
1387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp->spq_left);
1389 /* push the change in fp->state and towards the memory */
1395 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1397 struct bnx2x *bp = netdev_priv(dev_instance);
1398 u16 status = bnx2x_ack_int(bp);
1402 /* Return here if interrupt is shared and it's not for us */
1403 if (unlikely(status == 0)) {
1404 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1407 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1409 /* Return here if interrupt is disabled */
1410 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1415 #ifdef BNX2X_STOP_ON_ERROR
1416 if (unlikely(bp->panic))
1420 for_each_queue(bp, i) {
1421 struct bnx2x_fastpath *fp = &bp->fp[i];
1423 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1424 if (status & mask) {
1425 /* Handle Rx and Tx according to SB id */
1426 prefetch(fp->rx_cons_sb);
1427 prefetch(fp->tx_cons_sb);
1428 prefetch(&fp->sb_running_index[SM_RX_ID]);
1429 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1436 if (status & (mask | 0x1)) {
1437 struct cnic_ops *c_ops = NULL;
1440 c_ops = rcu_dereference(bp->cnic_ops);
1442 c_ops->cnic_handler(bp->cnic_data, NULL);
1449 if (unlikely(status & 0x1)) {
1450 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1457 if (unlikely(status))
1458 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1464 /* end of fast path */
1470 * General service functions
1473 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1476 u32 resource_bit = (1 << resource);
1477 int func = BP_FUNC(bp);
1478 u32 hw_lock_control_reg;
1481 /* Validating that the resource is within range */
1482 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1484 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1490 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1492 hw_lock_control_reg =
1493 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1496 /* Validating that the resource is not already taken */
1497 lock_status = REG_RD(bp, hw_lock_control_reg);
1498 if (lock_status & resource_bit) {
1499 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1500 lock_status, resource_bit);
1504 /* Try for 5 second every 5ms */
1505 for (cnt = 0; cnt < 1000; cnt++) {
1506 /* Try to acquire the lock */
1507 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508 lock_status = REG_RD(bp, hw_lock_control_reg);
1509 if (lock_status & resource_bit)
1514 DP(NETIF_MSG_HW, "Timeout\n");
1518 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1521 u32 resource_bit = (1 << resource);
1522 int func = BP_FUNC(bp);
1523 u32 hw_lock_control_reg;
1525 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1527 /* Validating that the resource is within range */
1528 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1530 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1536 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1538 hw_lock_control_reg =
1539 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1542 /* Validating that the resource is currently taken */
1543 lock_status = REG_RD(bp, hw_lock_control_reg);
1544 if (!(lock_status & resource_bit)) {
1545 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1546 lock_status, resource_bit);
1550 REG_WR(bp, hw_lock_control_reg, resource_bit);
1555 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1557 /* The GPIO should be swapped if swap register is set and active */
1558 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560 int gpio_shift = gpio_num +
1561 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562 u32 gpio_mask = (1 << gpio_shift);
1566 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1571 /* read GPIO value */
1572 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1574 /* get the requested pin value */
1575 if ((gpio_reg & gpio_mask) == gpio_mask)
1580 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1585 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1587 /* The GPIO should be swapped if swap register is set and active */
1588 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1589 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1590 int gpio_shift = gpio_num +
1591 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592 u32 gpio_mask = (1 << gpio_shift);
1595 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1600 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1601 /* read GPIO and mask except the float bits */
1602 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1605 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607 gpio_num, gpio_shift);
1608 /* clear FLOAT and set CLR */
1609 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1613 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615 gpio_num, gpio_shift);
1616 /* clear FLOAT and set SET */
1617 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1621 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1622 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623 gpio_num, gpio_shift);
1625 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1632 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1638 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1640 /* The GPIO should be swapped if swap register is set and active */
1641 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643 int gpio_shift = gpio_num +
1644 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645 u32 gpio_mask = (1 << gpio_shift);
1648 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1653 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1655 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1658 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660 "output low\n", gpio_num, gpio_shift);
1661 /* clear SET and set CLR */
1662 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1666 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668 "output high\n", gpio_num, gpio_shift);
1669 /* clear CLR and set SET */
1670 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1678 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1684 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1686 u32 spio_mask = (1 << spio_num);
1689 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690 (spio_num > MISC_REGISTERS_SPIO_7)) {
1691 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1695 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1696 /* read SPIO and mask except the float bits */
1697 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1700 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1701 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702 /* clear FLOAT and set CLR */
1703 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1707 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1708 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709 /* clear FLOAT and set SET */
1710 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1714 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1717 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1724 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1725 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1730 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1732 u32 sel_phy_idx = 0;
1733 if (bp->link_vars.link_up) {
1734 sel_phy_idx = EXT_PHY1;
1735 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738 sel_phy_idx = EXT_PHY2;
1741 switch (bnx2x_phy_selection(&bp->link_params)) {
1742 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745 sel_phy_idx = EXT_PHY1;
1747 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749 sel_phy_idx = EXT_PHY2;
1754 * The selected actived PHY is always after swapping (in case PHY
1755 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 if (bp->link_params.multi_phy_config &
1760 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761 if (sel_phy_idx == EXT_PHY1)
1762 sel_phy_idx = EXT_PHY2;
1763 else if (sel_phy_idx == EXT_PHY2)
1764 sel_phy_idx = EXT_PHY1;
1766 return LINK_CONFIG_IDX(sel_phy_idx);
1769 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1771 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1772 switch (bp->link_vars.ieee_fc &
1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1775 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1780 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1785 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1789 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1795 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1797 if (!BP_NOMCP(bp)) {
1799 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1801 /* Initialize link parameters structure variables */
1802 /* It is recommended to turn off RX FC for jumbo frames
1803 for better performance */
1804 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1805 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1807 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1809 bnx2x_acquire_phy_lock(bp);
1811 if (load_mode == LOAD_DIAG) {
1812 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1813 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1816 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1818 bnx2x_release_phy_lock(bp);
1820 bnx2x_calc_fc_adv(bp);
1822 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1824 bnx2x_link_report(bp);
1826 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1829 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1833 void bnx2x_link_set(struct bnx2x *bp)
1835 if (!BP_NOMCP(bp)) {
1836 bnx2x_acquire_phy_lock(bp);
1837 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1838 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1839 bnx2x_release_phy_lock(bp);
1841 bnx2x_calc_fc_adv(bp);
1843 BNX2X_ERR("Bootcode is missing - can not set link\n");
1846 static void bnx2x__link_reset(struct bnx2x *bp)
1848 if (!BP_NOMCP(bp)) {
1849 bnx2x_acquire_phy_lock(bp);
1850 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1851 bnx2x_release_phy_lock(bp);
1853 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1856 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1860 if (!BP_NOMCP(bp)) {
1861 bnx2x_acquire_phy_lock(bp);
1862 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1864 bnx2x_release_phy_lock(bp);
1866 BNX2X_ERR("Bootcode is missing - can not test link\n");
1871 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1873 u32 r_param = bp->link_vars.line_speed / 8;
1874 u32 fair_periodic_timeout_usec;
1877 memset(&(bp->cmng.rs_vars), 0,
1878 sizeof(struct rate_shaping_vars_per_port));
1879 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1881 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1884 /* this is the threshold below which no timer arming will occur
1885 1.25 coefficient is for the threshold to be a little bigger
1886 than the real time, to compensate for timer in-accuracy */
1887 bp->cmng.rs_vars.rs_threshold =
1888 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1890 /* resolution of fairness timer */
1891 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1895 /* this is the threshold below which we won't arm the timer anymore */
1896 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1898 /* we multiply by 1e3/8 to get bytes/msec.
1899 We don't want the credits to pass a credit
1900 of the t_fair*FAIR_MEM (algorithm resolution) */
1901 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902 /* since each tick is 4 usec */
1903 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1906 /* Calculates the sum of vn_min_rates.
1907 It's needed for further normalizing of the min_rates.
1909 sum of vn_min_rates.
1911 0 - if all the min_rates are 0.
1912 In the later case fainess algorithm should be deactivated.
1913 If not all min_rates are zero then those that are zeroes will be set to 1.
1915 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1920 bp->vn_weight_sum = 0;
1921 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1922 u32 vn_cfg = bp->mf_config[vn];
1923 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1926 /* Skip hidden vns */
1927 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1930 /* If min rate is zero - set it to 1 */
1932 vn_min_rate = DEF_MIN_RATE;
1936 bp->vn_weight_sum += vn_min_rate;
1939 /* ... only if all min rates are zeros - disable fairness */
1941 bp->cmng.flags.cmng_enables &=
1942 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944 " fairness will be disabled\n");
1946 bp->cmng.flags.cmng_enables |=
1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1950 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1952 struct rate_shaping_vars_per_vn m_rs_vn;
1953 struct fairness_vars_per_vn m_fair_vn;
1954 u32 vn_cfg = bp->mf_config[vn];
1955 int func = 2*vn + BP_PORT(bp);
1956 u16 vn_min_rate, vn_max_rate;
1959 /* If function is hidden - set min and max to zeroes */
1960 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1965 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1967 /* If min rate is zero - set it to 1 */
1968 if (bp->vn_weight_sum && (vn_min_rate == 0))
1969 vn_min_rate = DEF_MIN_RATE;
1970 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1976 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1978 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1981 /* global vn counter - maximal Mbps for this vn */
1982 m_rs_vn.vn_counter.rate = vn_max_rate;
1984 /* quota - number of bytes transmitted in this period */
1985 m_rs_vn.vn_counter.quota =
1986 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1988 if (bp->vn_weight_sum) {
1989 /* credit for each period of the fairness algorithm:
1990 number of bytes in T_FAIR (the vn share the port rate).
1991 vn_weight_sum should not be larger than 10000, thus
1992 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1994 m_fair_vn.vn_credit_delta =
1995 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996 (8 * bp->vn_weight_sum))),
1997 (bp->cmng.fair_vars.fair_threshold * 2));
1998 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1999 m_fair_vn.vn_credit_delta);
2002 /* Store it to internal memory */
2003 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006 ((u32 *)(&m_rs_vn))[i]);
2008 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011 ((u32 *)(&m_fair_vn))[i]);
2014 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2016 if (CHIP_REV_IS_SLOW(bp))
2017 return CMNG_FNS_NONE;
2019 return CMNG_FNS_MINMAX;
2021 return CMNG_FNS_NONE;
2024 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2029 return; /* what should be the default bvalue in this case */
2031 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032 int /*abs*/func = 2*vn + BP_PORT(bp);
2034 MF_CFG_RD(bp, func_mf_config[func].config);
2038 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2047 /* read mf conf from shmem */
2049 bnx2x_read_mf_cfg(bp);
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2057 /* calculate and set min-max rate for each vn */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059 bnx2x_init_vn_minmax(bp, vn);
2061 /* always enable rate shaping and fairness */
2062 bp->cmng.flags.cmng_enables |=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064 if (!bp->vn_weight_sum)
2065 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2070 /* rate shaping and fairness are disabled */
2072 "rate shaping and fairness are disabled\n");
2075 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2077 int port = BP_PORT(bp);
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083 if (vn == BP_E1HVN(bp))
2086 func = ((vn << 1) | port);
2087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2092 /* This function is called upon link interrupt */
2093 static void bnx2x_link_attn(struct bnx2x *bp)
2095 u32 prev_link_status = bp->link_vars.link_status;
2096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2101 if (bp->link_vars.link_up) {
2103 /* dropless flow control */
2104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
2112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2124 if (bp->state == BNX2X_STATE_OPEN)
2125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2128 /* indicate link status only if link status actually changed */
2129 if (prev_link_status != bp->link_vars.link_status)
2130 bnx2x_link_report(bp);
2133 bnx2x_link_sync_notify(bp);
2135 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2138 if (cmng_fns != CMNG_FNS_NONE) {
2139 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2142 /* rate shaping and fairness are disabled */
2144 "single function mode without fairness\n");
2148 void bnx2x__link_status_update(struct bnx2x *bp)
2150 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2153 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2155 if (bp->link_vars.link_up)
2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp);
2164 /* indicate link status */
2165 bnx2x_link_report(bp);
2168 static void bnx2x_pmf_update(struct bnx2x *bp)
2170 int port = BP_PORT(bp);
2174 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2176 /* enable nig attention */
2177 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2178 if (bp->common.int_block == INT_BLOCK_HC) {
2179 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181 } else if (CHIP_IS_E2(bp)) {
2182 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2194 * General service functions
2197 /* send the MCP a request, block until there is a reply */
2198 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2200 int mb_idx = BP_FW_MB_IDX(bp);
2201 u32 seq = ++bp->fw_seq;
2204 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2206 mutex_lock(&bp->fw_mb_mutex);
2207 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2210 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2213 /* let the FW do it's magic ... */
2216 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2218 /* Give the FW up to 5 second (500*10ms) */
2219 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2221 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222 cnt*delay, rc, seq);
2224 /* is this a reply to our command? */
2225 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226 rc &= FW_MSG_CODE_MASK;
2229 BNX2X_ERR("FW failed to respond!\n");
2233 mutex_unlock(&bp->fw_mb_mutex);
2238 /* must be called under rtnl_lock */
2239 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2241 u32 mask = (1 << cl_id);
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246 u8 unmatched_unicast = 0;
2248 if (filters & BNX2X_PROMISCUOUS_MODE) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2257 if (filters & BNX2X_ACCEPT_MULTICAST) {
2258 /* accept matched mcast */
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2306 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2308 struct tstorm_eth_function_common_config tcfg = {0};
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
2332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2364 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
2372 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2375 flags |= QUEUE_FLG_VLAN;
2376 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2379 if (!fp->disable_tpa)
2380 flags |= QUEUE_FLG_TPA;
2382 flags |= QUEUE_FLG_STATS;
2387 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2388 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2389 struct bnx2x_rxq_init_params *rxq_init)
2393 u16 tpa_agg_size = 0;
2395 /* calculate queue flags */
2396 u16 flags = bnx2x_get_cl_flags(bp, fp);
2398 if (!fp->disable_tpa) {
2399 pause->sge_th_hi = 250;
2400 pause->sge_th_lo = 150;
2401 tpa_agg_size = min_t(u32,
2402 (min_t(u32, 8, MAX_SKB_FRAGS) *
2403 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2404 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2406 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2407 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2408 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2412 /* pause - not for e1 */
2413 if (!CHIP_IS_E1(bp)) {
2414 pause->bd_th_hi = 350;
2415 pause->bd_th_lo = 250;
2416 pause->rcq_th_hi = 350;
2417 pause->rcq_th_lo = 250;
2418 pause->sge_th_hi = 0;
2419 pause->sge_th_lo = 0;
2424 rxq_init->flags = flags;
2425 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2426 rxq_init->dscr_map = fp->rx_desc_mapping;
2427 rxq_init->sge_map = fp->rx_sge_mapping;
2428 rxq_init->rcq_map = fp->rx_comp_mapping;
2429 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2430 rxq_init->mtu = bp->dev->mtu;
2431 rxq_init->buf_sz = bp->rx_buf_size;
2432 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2433 rxq_init->cl_id = fp->cl_id;
2434 rxq_init->spcl_id = fp->cl_id;
2435 rxq_init->stat_id = fp->cl_id;
2436 rxq_init->tpa_agg_sz = tpa_agg_size;
2437 rxq_init->sge_buf_sz = sge_sz;
2438 rxq_init->max_sges_pkt = max_sge;
2439 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2440 rxq_init->fw_sb_id = fp->fw_sb_id;
2442 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2444 rxq_init->cid = HW_CID(bp, fp->cid);
2446 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2449 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2450 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2452 u16 flags = bnx2x_get_cl_flags(bp, fp);
2454 txq_init->flags = flags;
2455 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2456 txq_init->dscr_map = fp->tx_desc_mapping;
2457 txq_init->stat_id = fp->cl_id;
2458 txq_init->cid = HW_CID(bp, fp->cid);
2459 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2460 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2461 txq_init->fw_sb_id = fp->fw_sb_id;
2462 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2465 void bnx2x_pf_init(struct bnx2x *bp)
2467 struct bnx2x_func_init_params func_init = {0};
2468 struct bnx2x_rss_params rss = {0};
2469 struct event_ring_data eq_data = { {0} };
2472 /* pf specific setups */
2473 if (!CHIP_IS_E1(bp))
2474 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2476 if (CHIP_IS_E2(bp)) {
2477 /* reset IGU PF statistics: MSIX + ATTN */
2479 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2480 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2481 (CHIP_MODE_IS_4_PORT(bp) ?
2482 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2484 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2485 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2486 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2487 (CHIP_MODE_IS_4_PORT(bp) ?
2488 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2491 /* function setup flags */
2492 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2494 if (CHIP_IS_E1x(bp))
2495 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2497 flags |= FUNC_FLG_TPA;
2499 /* function setup */
2502 * Although RSS is meaningless when there is a single HW queue we
2503 * still need it enabled in order to have HW Rx hash generated.
2505 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2506 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2507 rss.mode = bp->multi_mode;
2508 rss.result_mask = MULTI_MASK;
2509 func_init.rss = &rss;
2511 func_init.func_flgs = flags;
2512 func_init.pf_id = BP_FUNC(bp);
2513 func_init.func_id = BP_FUNC(bp);
2514 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2515 func_init.spq_map = bp->spq_mapping;
2516 func_init.spq_prod = bp->spq_prod_idx;
2518 bnx2x_func_init(bp, &func_init);
2520 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2523 Congestion management values depend on the link rate
2524 There is no active link so initial link rate is set to 10 Gbps.
2525 When the link comes up The congestion management values are
2526 re-calculated according to the actual link rate.
2528 bp->link_vars.line_speed = SPEED_10000;
2529 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2531 /* Only the PMF sets the HW */
2533 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2535 /* no rx until link is up */
2536 bp->rx_mode = BNX2X_RX_MODE_NONE;
2537 bnx2x_set_storm_rx_mode(bp);
2539 /* init Event Queue */
2540 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2541 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2542 eq_data.producer = bp->eq_prod;
2543 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2544 eq_data.sb_id = DEF_SB_ID;
2545 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2549 static void bnx2x_e1h_disable(struct bnx2x *bp)
2551 int port = BP_PORT(bp);
2553 netif_tx_disable(bp->dev);
2555 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2557 netif_carrier_off(bp->dev);
2560 static void bnx2x_e1h_enable(struct bnx2x *bp)
2562 int port = BP_PORT(bp);
2564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2566 /* Tx queue should be only reenabled */
2567 netif_tx_wake_all_queues(bp->dev);
2570 * Should not call netif_carrier_on since it will be called if the link
2571 * is up when checking for link state
2575 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2579 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2582 * This is the only place besides the function initialization
2583 * where the bp->flags can change so it is done without any
2586 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2587 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2588 bp->flags |= MF_FUNC_DIS;
2590 bnx2x_e1h_disable(bp);
2592 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2593 bp->flags &= ~MF_FUNC_DIS;
2595 bnx2x_e1h_enable(bp);
2597 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2602 bnx2x_link_sync_notify(bp);
2603 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2604 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2607 /* Report results to MCP */
2609 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2614 /* must be called under the spq lock */
2615 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2617 struct eth_spe *next_spe = bp->spq_prod_bd;
2619 if (bp->spq_prod_bd == bp->spq_last_bd) {
2620 bp->spq_prod_bd = bp->spq;
2621 bp->spq_prod_idx = 0;
2622 DP(NETIF_MSG_TIMER, "end of spq\n");
2630 /* must be called under the spq lock */
2631 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2633 int func = BP_FUNC(bp);
2635 /* Make sure that BD data is updated before writing the producer */
2638 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2643 /* the slow path queue is odd since completions arrive on the fastpath ring */
2644 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2645 u32 data_hi, u32 data_lo, int common)
2647 struct eth_spe *spe;
2650 #ifdef BNX2X_STOP_ON_ERROR
2651 if (unlikely(bp->panic))
2655 spin_lock_bh(&bp->spq_lock);
2657 if (!atomic_read(&bp->spq_left)) {
2658 BNX2X_ERR("BUG! SPQ ring full!\n");
2659 spin_unlock_bh(&bp->spq_lock);
2664 spe = bnx2x_sp_get_next(bp);
2666 /* CID needs port number to be encoded int it */
2667 spe->hdr.conn_and_cmd_data =
2668 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2673 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2674 * TRAFFIC_STOP, TRAFFIC_START
2676 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2677 & SPE_HDR_CONN_TYPE;
2679 /* ETH ramrods: SETUP, HALT */
2680 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2681 & SPE_HDR_CONN_TYPE;
2683 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2684 SPE_HDR_FUNCTION_ID);
2686 spe->hdr.type = cpu_to_le16(type);
2688 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2689 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2691 /* stats ramrod has it's own slot on the spq */
2692 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2693 /* It's ok if the actual decrement is issued towards the memory
2694 * somewhere between the spin_lock and spin_unlock. Thus no
2695 * more explict memory barrier is needed.
2697 atomic_dec(&bp->spq_left);
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2701 "type(0x%x) left %x\n",
2702 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2703 (u32)(U64_LO(bp->spq_mapping) +
2704 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2705 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2707 bnx2x_sp_prod_update(bp);
2708 spin_unlock_bh(&bp->spq_lock);
2712 /* acquire split MCP access lock register */
2713 static int bnx2x_acquire_alr(struct bnx2x *bp)
2719 for (j = 0; j < 1000; j++) {
2721 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2722 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2723 if (val & (1L << 31))
2728 if (!(val & (1L << 31))) {
2729 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2736 /* release split MCP access lock register */
2737 static void bnx2x_release_alr(struct bnx2x *bp)
2739 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2742 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2743 #define BNX2X_DEF_SB_IDX 0x0002
2745 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2747 struct host_sp_status_block *def_sb = bp->def_status_blk;
2750 barrier(); /* status block is written to by the chip */
2751 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2752 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2753 rc |= BNX2X_DEF_SB_ATT_IDX;
2756 if (bp->def_idx != def_sb->sp_sb.running_index) {
2757 bp->def_idx = def_sb->sp_sb.running_index;
2758 rc |= BNX2X_DEF_SB_IDX;
2761 /* Do not reorder: indecies reading should complete before handling */
2767 * slow path service functions
2770 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2772 int port = BP_PORT(bp);
2773 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2775 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2776 NIG_REG_MASK_INTERRUPT_PORT0;
2781 if (bp->attn_state & asserted)
2782 BNX2X_ERR("IGU ERROR\n");
2784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785 aeu_mask = REG_RD(bp, aeu_addr);
2787 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2788 aeu_mask, asserted);
2789 aeu_mask &= ~(asserted & 0x3ff);
2790 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2792 REG_WR(bp, aeu_addr, aeu_mask);
2793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2795 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2796 bp->attn_state |= asserted;
2797 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2799 if (asserted & ATTN_HARD_WIRED_MASK) {
2800 if (asserted & ATTN_NIG_FOR_FUNC) {
2802 bnx2x_acquire_phy_lock(bp);
2804 /* save nig interrupt mask */
2805 nig_mask = REG_RD(bp, nig_int_mask_addr);
2806 REG_WR(bp, nig_int_mask_addr, 0);
2808 bnx2x_link_attn(bp);
2810 /* handle unicore attn? */
2812 if (asserted & ATTN_SW_TIMER_4_FUNC)
2813 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2815 if (asserted & GPIO_2_FUNC)
2816 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2818 if (asserted & GPIO_3_FUNC)
2819 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2821 if (asserted & GPIO_4_FUNC)
2822 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2825 if (asserted & ATTN_GENERAL_ATTN_1) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2829 if (asserted & ATTN_GENERAL_ATTN_2) {
2830 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2831 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2833 if (asserted & ATTN_GENERAL_ATTN_3) {
2834 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2835 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2838 if (asserted & ATTN_GENERAL_ATTN_4) {
2839 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2840 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2842 if (asserted & ATTN_GENERAL_ATTN_5) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2846 if (asserted & ATTN_GENERAL_ATTN_6) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2852 } /* if hardwired */
2854 if (bp->common.int_block == INT_BLOCK_HC)
2855 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2856 COMMAND_REG_ATTN_BITS_SET);
2858 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2860 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2861 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2862 REG_WR(bp, reg_addr, asserted);
2864 /* now set back the mask */
2865 if (asserted & ATTN_NIG_FOR_FUNC) {
2866 REG_WR(bp, nig_int_mask_addr, nig_mask);
2867 bnx2x_release_phy_lock(bp);
2871 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2873 int port = BP_PORT(bp);
2875 /* mark the failure */
2878 dev_info.port_hw_config[port].external_phy_config);
2880 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2881 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2882 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2885 /* log the failure */
2886 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2887 " the driver to shutdown the card to prevent permanent"
2888 " damage. Please contact OEM Support for assistance\n");
2891 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2893 int port = BP_PORT(bp);
2897 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2898 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2900 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2902 val = REG_RD(bp, reg_offset);
2903 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2904 REG_WR(bp, reg_offset, val);
2906 BNX2X_ERR("SPIO5 hw attention\n");
2908 /* Fan failure attention */
2909 bnx2x_hw_reset_phy(&bp->link_params);
2910 bnx2x_fan_failure(bp);
2913 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915 bnx2x_acquire_phy_lock(bp);
2916 bnx2x_handle_module_detect_int(&bp->link_params);
2917 bnx2x_release_phy_lock(bp);
2920 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2922 val = REG_RD(bp, reg_offset);
2923 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924 REG_WR(bp, reg_offset, val);
2926 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2927 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2932 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2938 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940 /* DORQ discard attention */
2942 BNX2X_ERR("FATAL error from DORQ\n");
2945 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2947 int port = BP_PORT(bp);
2950 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955 REG_WR(bp, reg_offset, val);
2957 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2958 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2963 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2969 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971 /* CFC error attention */
2973 BNX2X_ERR("FATAL error from CFC\n");
2976 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2978 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980 /* RQ_USDMDP_FIFO_OVERFLOW */
2982 BNX2X_ERR("FATAL error from PXP\n");
2983 if (CHIP_IS_E2(bp)) {
2984 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2985 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2989 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2991 int port = BP_PORT(bp);
2994 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2995 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2997 val = REG_RD(bp, reg_offset);
2998 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2999 REG_WR(bp, reg_offset, val);
3001 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3002 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3007 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3011 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3013 if (attn & BNX2X_PMF_LINK_ASSERT) {
3014 int func = BP_FUNC(bp);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3017 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3018 func_mf_config[BP_ABS_FUNC(bp)].config);
3020 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3021 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 (val & DRV_STATUS_DCC_EVENT_MASK));
3024 bnx2x__link_status_update(bp);
3025 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3026 bnx2x_pmf_update(bp);
3028 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3030 BNX2X_ERR("MC assert!\n");
3031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3037 } else if (attn & BNX2X_MCP_ASSERT) {
3039 BNX2X_ERR("MCP assert!\n");
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3044 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3047 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3048 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3049 if (attn & BNX2X_GRC_TIMEOUT) {
3050 val = CHIP_IS_E1(bp) ? 0 :
3051 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3052 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3054 if (attn & BNX2X_GRC_RSV) {
3055 val = CHIP_IS_E1(bp) ? 0 :
3056 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3057 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3059 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3063 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3064 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3065 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3066 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3067 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3068 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3071 * should be run under rtnl lock
3073 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3083 * should be run under rtnl lock
3085 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3095 * should be run under rtnl lock
3097 bool bnx2x_reset_is_done(struct bnx2x *bp)
3099 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3100 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3101 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3105 * should be run under rtnl lock
3107 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3109 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3111 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3113 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3114 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3120 * should be run under rtnl lock
3122 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3124 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3126 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3128 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3129 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3137 * should be run under rtnl lock
3139 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3141 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3144 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3146 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3147 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3150 static inline void _print_next_block(int idx, const char *blk)
3157 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3161 for (i = 0; sig; i++) {
3162 cur_bit = ((u32)0x1 << i);
3163 if (sig & cur_bit) {
3165 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3166 _print_next_block(par_num++, "BRB");
3168 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3169 _print_next_block(par_num++, "PARSER");
3171 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3172 _print_next_block(par_num++, "TSDM");
3174 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3175 _print_next_block(par_num++, "SEARCHER");
3177 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3178 _print_next_block(par_num++, "TSEMI");
3190 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3194 for (i = 0; sig; i++) {
3195 cur_bit = ((u32)0x1 << i);
3196 if (sig & cur_bit) {
3198 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3199 _print_next_block(par_num++, "PBCLIENT");
3201 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3202 _print_next_block(par_num++, "QM");
3204 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3205 _print_next_block(par_num++, "XSDM");
3207 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3208 _print_next_block(par_num++, "XSEMI");
3210 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3211 _print_next_block(par_num++, "DOORBELLQ");
3213 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3214 _print_next_block(par_num++, "VAUX PCI CORE");
3216 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3217 _print_next_block(par_num++, "DEBUG");
3219 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3220 _print_next_block(par_num++, "USDM");
3222 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3223 _print_next_block(par_num++, "USEMI");
3225 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3226 _print_next_block(par_num++, "UPB");
3228 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3229 _print_next_block(par_num++, "CSDM");
3241 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3245 for (i = 0; sig; i++) {
3246 cur_bit = ((u32)0x1 << i);
3247 if (sig & cur_bit) {
3249 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3250 _print_next_block(par_num++, "CSEMI");
3252 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3253 _print_next_block(par_num++, "PXP");
3255 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3256 _print_next_block(par_num++,
3257 "PXPPCICLOCKCLIENT");
3259 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3260 _print_next_block(par_num++, "CFC");
3262 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3263 _print_next_block(par_num++, "CDU");
3265 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3266 _print_next_block(par_num++, "IGU");
3268 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3269 _print_next_block(par_num++, "MISC");
3281 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3285 for (i = 0; sig; i++) {
3286 cur_bit = ((u32)0x1 << i);
3287 if (sig & cur_bit) {
3289 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3290 _print_next_block(par_num++, "MCP ROM");
3292 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3293 _print_next_block(par_num++, "MCP UMP RX");
3295 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3296 _print_next_block(par_num++, "MCP UMP TX");
3298 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3299 _print_next_block(par_num++, "MCP SCPAD");
3311 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3314 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3315 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3317 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3318 "[0]:0x%08x [1]:0x%08x "
3319 "[2]:0x%08x [3]:0x%08x\n",
3320 sig0 & HW_PRTY_ASSERT_SET_0,
3321 sig1 & HW_PRTY_ASSERT_SET_1,
3322 sig2 & HW_PRTY_ASSERT_SET_2,
3323 sig3 & HW_PRTY_ASSERT_SET_3);
3324 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3326 par_num = bnx2x_print_blocks_with_parity0(
3327 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3328 par_num = bnx2x_print_blocks_with_parity1(
3329 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3330 par_num = bnx2x_print_blocks_with_parity2(
3331 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3332 par_num = bnx2x_print_blocks_with_parity3(
3333 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3340 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3342 struct attn_route attn;
3343 int port = BP_PORT(bp);
3345 attn.sig[0] = REG_RD(bp,
3346 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3348 attn.sig[1] = REG_RD(bp,
3349 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3351 attn.sig[2] = REG_RD(bp,
3352 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3354 attn.sig[3] = REG_RD(bp,
3355 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3358 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3363 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3366 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3368 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3369 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3370 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3371 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3374 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3375 "INCORRECT_RCV_BEHAVIOR\n");
3376 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378 "WAS_ERROR_ATTN\n");
3379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381 "VF_LENGTH_VIOLATION_ATTN\n");
3383 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3384 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3385 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3387 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3388 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3389 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3390 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3391 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3392 "TCPL_ERROR_ATTN\n");
3393 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3394 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3395 "TCPL_IN_TWO_RCBS_ATTN\n");
3396 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3397 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3398 "CSSNOOP_FIFO_OVERFLOW\n");
3400 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3401 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3402 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3403 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3405 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3406 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3407 "_ATC_TCPL_TO_NOT_PEND\n");
3408 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3409 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3410 "ATC_GPA_MULTIPLE_HITS\n");
3411 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3412 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3413 "ATC_RCPL_TO_EMPTY_CNT\n");
3414 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3416 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3417 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3418 "ATC_IREQ_LESS_THAN_STU\n");
3421 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3422 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3423 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3424 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3425 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3430 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3432 struct attn_route attn, *group_mask;
3433 int port = BP_PORT(bp);
3439 /* need to take HW lock because MCP or other port might also
3440 try to handle this event */
3441 bnx2x_acquire_alr(bp);
3443 if (bnx2x_chk_parity_attn(bp)) {
3444 bp->recovery_state = BNX2X_RECOVERY_INIT;
3445 bnx2x_set_reset_in_progress(bp);
3446 schedule_delayed_work(&bp->reset_task, 0);
3447 /* Disable HW interrupts */
3448 bnx2x_int_disable(bp);
3449 bnx2x_release_alr(bp);
3450 /* In case of parity errors don't handle attentions so that
3451 * other function would "see" parity errors.
3456 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3457 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3458 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3459 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3462 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3466 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3467 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3469 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3470 if (deasserted & (1 << index)) {
3471 group_mask = &bp->attn_group[index];
3473 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3476 group_mask->sig[0], group_mask->sig[1],
3477 group_mask->sig[2], group_mask->sig[3],
3478 group_mask->sig[4]);
3480 bnx2x_attn_int_deasserted4(bp,
3481 attn.sig[4] & group_mask->sig[4]);
3482 bnx2x_attn_int_deasserted3(bp,
3483 attn.sig[3] & group_mask->sig[3]);
3484 bnx2x_attn_int_deasserted1(bp,
3485 attn.sig[1] & group_mask->sig[1]);
3486 bnx2x_attn_int_deasserted2(bp,
3487 attn.sig[2] & group_mask->sig[2]);
3488 bnx2x_attn_int_deasserted0(bp,
3489 attn.sig[0] & group_mask->sig[0]);
3493 bnx2x_release_alr(bp);
3495 if (bp->common.int_block == INT_BLOCK_HC)
3496 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3497 COMMAND_REG_ATTN_BITS_CLR);
3499 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3502 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3503 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3504 REG_WR(bp, reg_addr, val);
3506 if (~bp->attn_state & deasserted)
3507 BNX2X_ERR("IGU ERROR\n");
3509 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3510 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3513 aeu_mask = REG_RD(bp, reg_addr);
3515 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3516 aeu_mask, deasserted);
3517 aeu_mask |= (deasserted & 0x3ff);
3518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3520 REG_WR(bp, reg_addr, aeu_mask);
3521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3524 bp->attn_state &= ~deasserted;
3525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3528 static void bnx2x_attn_int(struct bnx2x *bp)
3530 /* read local copy of bits */
3531 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3533 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3535 u32 attn_state = bp->attn_state;
3537 /* look for changed bits */
3538 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3539 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3542 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3543 attn_bits, attn_ack, asserted, deasserted);
3545 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3546 BNX2X_ERR("BAD attention state\n");
3548 /* handle bits that were raised */
3550 bnx2x_attn_int_asserted(bp, asserted);
3553 bnx2x_attn_int_deasserted(bp, deasserted);
3556 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3558 /* No memory barriers */
3559 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3560 mmiowb(); /* keep prod updates ordered */
3564 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3565 union event_ring_elem *elem)
3567 if (!bp->cnic_eth_dev.starting_cid ||
3568 cid < bp->cnic_eth_dev.starting_cid)
3571 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3573 if (unlikely(elem->message.data.cfc_del_event.error)) {
3574 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3576 bnx2x_panic_dump(bp);
3578 bnx2x_cnic_cfc_comp(bp, cid);
3583 static void bnx2x_eq_int(struct bnx2x *bp)
3585 u16 hw_cons, sw_cons, sw_prod;
3586 union event_ring_elem *elem;
3591 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3593 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3594 * when we get the the next-page we nned to adjust so the loop
3595 * condition below will be met. The next element is the size of a
3596 * regular element and hence incrementing by 1
3598 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3601 /* This function may never run in parralel with itself for a
3602 * specific bp, thus there is no need in "paired" read memory
3605 sw_cons = bp->eq_cons;
3606 sw_prod = bp->eq_prod;
3608 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3609 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3611 for (; sw_cons != hw_cons;
3612 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3615 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3617 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3618 opcode = elem->message.opcode;
3621 /* handle eq element */
3623 case EVENT_RING_OPCODE_STAT_QUERY:
3624 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3625 /* nothing to do with stats comp */
3628 case EVENT_RING_OPCODE_CFC_DEL:
3629 /* handle according to cid range */
3631 * we may want to verify here that the bp state is
3634 DP(NETIF_MSG_IFDOWN,
3635 "got delete ramrod for MULTI[%d]\n", cid);
3637 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3640 bnx2x_fp(bp, cid, state) =
3641 BNX2X_FP_STATE_CLOSED;
3646 switch (opcode | bp->state) {
3647 case (EVENT_RING_OPCODE_FUNCTION_START |
3648 BNX2X_STATE_OPENING_WAIT4_PORT):
3649 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3650 bp->state = BNX2X_STATE_FUNC_STARTED;
3653 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3656 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3659 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3660 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3661 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3662 bp->set_mac_pending = 0;
3665 case (EVENT_RING_OPCODE_SET_MAC |
3666 BNX2X_STATE_CLOSING_WAIT4_HALT):
3667 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3668 bp->set_mac_pending = 0;
3671 /* unknown event log error and continue */
3672 BNX2X_ERR("Unknown EQ event %d\n",
3673 elem->message.opcode);
3679 smp_mb__before_atomic_inc();
3680 atomic_add(spqe_cnt, &bp->spq_left);
3682 bp->eq_cons = sw_cons;
3683 bp->eq_prod = sw_prod;
3684 /* Make sure that above mem writes were issued towards the memory */
3687 /* update producer */
3688 bnx2x_update_eq_prod(bp, bp->eq_prod);
3691 static void bnx2x_sp_task(struct work_struct *work)
3693 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3696 /* Return here if interrupt is disabled */
3697 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3698 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3702 status = bnx2x_update_dsb_idx(bp);
3703 /* if (status == 0) */
3704 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3706 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3709 if (status & BNX2X_DEF_SB_ATT_IDX) {
3711 status &= ~BNX2X_DEF_SB_ATT_IDX;
3714 /* SP events: STAT_QUERY and others */
3715 if (status & BNX2X_DEF_SB_IDX) {
3717 /* Handle EQ completions */
3720 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3721 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3723 status &= ~BNX2X_DEF_SB_IDX;
3726 if (unlikely(status))
3727 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3730 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3731 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3734 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3736 struct net_device *dev = dev_instance;
3737 struct bnx2x *bp = netdev_priv(dev);
3739 /* Return here if interrupt is disabled */
3740 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3741 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3745 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3746 IGU_INT_DISABLE, 0);
3748 #ifdef BNX2X_STOP_ON_ERROR
3749 if (unlikely(bp->panic))
3755 struct cnic_ops *c_ops;
3758 c_ops = rcu_dereference(bp->cnic_ops);
3760 c_ops->cnic_handler(bp->cnic_data, NULL);
3764 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3769 /* end of slow path */
3771 static void bnx2x_timer(unsigned long data)
3773 struct bnx2x *bp = (struct bnx2x *) data;
3775 if (!netif_running(bp->dev))
3778 if (atomic_read(&bp->intr_sem) != 0)
3782 struct bnx2x_fastpath *fp = &bp->fp[0];
3786 rc = bnx2x_rx_int(fp, 1000);
3789 if (!BP_NOMCP(bp)) {
3790 int mb_idx = BP_FW_MB_IDX(bp);
3794 ++bp->fw_drv_pulse_wr_seq;
3795 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3796 /* TBD - add SYSTEM_TIME */
3797 drv_pulse = bp->fw_drv_pulse_wr_seq;
3798 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3800 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3801 MCP_PULSE_SEQ_MASK);
3802 /* The delta between driver pulse and mcp response
3803 * should be 1 (before mcp response) or 0 (after mcp response)
3805 if ((drv_pulse != mcp_pulse) &&
3806 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3807 /* someone lost a heartbeat... */
3808 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3809 drv_pulse, mcp_pulse);
3813 if (bp->state == BNX2X_STATE_OPEN)
3814 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3817 mod_timer(&bp->timer, jiffies + bp->current_interval);
3820 /* end of Statistics */
3825 * nic init service functions
3828 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3831 if (!(len%4) && !(addr%4))
3832 for (i = 0; i < len; i += 4)
3833 REG_WR(bp, addr + i, fill);
3835 for (i = 0; i < len; i++)
3836 REG_WR8(bp, addr + i, fill);
3840 /* helper: writes FP SP data to FW - data_size in dwords */
3841 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3847 for (index = 0; index < data_size; index++)
3848 REG_WR(bp, BAR_CSTRORM_INTMEM +
3849 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3851 *(sb_data_p + index));
3854 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3858 struct hc_status_block_data_e2 sb_data_e2;
3859 struct hc_status_block_data_e1x sb_data_e1x;
3861 /* disable the function first */
3862 if (CHIP_IS_E2(bp)) {
3863 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3864 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3865 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3866 sb_data_e2.common.p_func.vf_valid = false;
3867 sb_data_p = (u32 *)&sb_data_e2;
3868 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3870 memset(&sb_data_e1x, 0,
3871 sizeof(struct hc_status_block_data_e1x));
3872 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3873 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3874 sb_data_e1x.common.p_func.vf_valid = false;
3875 sb_data_p = (u32 *)&sb_data_e1x;
3876 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3878 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3880 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3881 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3882 CSTORM_STATUS_BLOCK_SIZE);
3883 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3885 CSTORM_SYNC_BLOCK_SIZE);
3888 /* helper: writes SP SB data to FW */
3889 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3890 struct hc_sp_status_block_data *sp_sb_data)
3892 int func = BP_FUNC(bp);
3894 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3895 REG_WR(bp, BAR_CSTRORM_INTMEM +
3896 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3898 *((u32 *)sp_sb_data + i));
3901 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3903 int func = BP_FUNC(bp);
3904 struct hc_sp_status_block_data sp_sb_data;
3905 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3907 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3908 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3909 sp_sb_data.p_func.vf_valid = false;
3911 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3913 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3914 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3915 CSTORM_SP_STATUS_BLOCK_SIZE);
3916 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3917 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3918 CSTORM_SP_SYNC_BLOCK_SIZE);
3924 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3925 int igu_sb_id, int igu_seg_id)
3927 hc_sm->igu_sb_id = igu_sb_id;
3928 hc_sm->igu_seg_id = igu_seg_id;
3929 hc_sm->timer_value = 0xFF;
3930 hc_sm->time_to_expire = 0xFFFFFFFF;
3933 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3934 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3938 struct hc_status_block_data_e2 sb_data_e2;
3939 struct hc_status_block_data_e1x sb_data_e1x;
3940 struct hc_status_block_sm *hc_sm_p;
3941 struct hc_index_data *hc_index_p;
3945 if (CHIP_INT_MODE_IS_BC(bp))
3946 igu_seg_id = HC_SEG_ACCESS_NORM;
3948 igu_seg_id = IGU_SEG_ACCESS_NORM;
3950 bnx2x_zero_fp_sb(bp, fw_sb_id);
3952 if (CHIP_IS_E2(bp)) {
3953 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3954 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3955 sb_data_e2.common.p_func.vf_id = vfid;
3956 sb_data_e2.common.p_func.vf_valid = vf_valid;
3957 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3958 sb_data_e2.common.same_igu_sb_1b = true;
3959 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3960 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3961 hc_sm_p = sb_data_e2.common.state_machine;
3962 hc_index_p = sb_data_e2.index_data;
3963 sb_data_p = (u32 *)&sb_data_e2;
3964 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3966 memset(&sb_data_e1x, 0,
3967 sizeof(struct hc_status_block_data_e1x));
3968 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3969 sb_data_e1x.common.p_func.vf_id = 0xff;
3970 sb_data_e1x.common.p_func.vf_valid = false;
3971 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3972 sb_data_e1x.common.same_igu_sb_1b = true;
3973 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3974 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3975 hc_sm_p = sb_data_e1x.common.state_machine;
3976 hc_index_p = sb_data_e1x.index_data;
3977 sb_data_p = (u32 *)&sb_data_e1x;
3978 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3982 igu_sb_id, igu_seg_id);
3983 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3984 igu_sb_id, igu_seg_id);
3986 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3988 /* write indecies to HW */
3989 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3992 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3993 u8 sb_index, u8 disable, u16 usec)
3995 int port = BP_PORT(bp);
3996 u8 ticks = usec / BNX2X_BTR;
3998 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4000 disable = disable ? 1 : (usec ? 0 : 1);
4001 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4004 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4005 u16 tx_usec, u16 rx_usec)
4007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4009 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4013 static void bnx2x_init_def_sb(struct bnx2x *bp)
4015 struct host_sp_status_block *def_sb = bp->def_status_blk;
4016 dma_addr_t mapping = bp->def_status_blk_mapping;
4017 int igu_sp_sb_index;
4019 int port = BP_PORT(bp);
4020 int func = BP_FUNC(bp);
4024 struct hc_sp_status_block_data sp_sb_data;
4025 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4027 if (CHIP_INT_MODE_IS_BC(bp)) {
4028 igu_sp_sb_index = DEF_SB_IGU_ID;
4029 igu_seg_id = HC_SEG_ACCESS_DEF;
4031 igu_sp_sb_index = bp->igu_dsb_id;
4032 igu_seg_id = IGU_SEG_ACCESS_DEF;
4036 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4037 atten_status_block);
4038 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4042 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4043 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4044 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4046 /* take care of sig[0]..sig[4] */
4047 for (sindex = 0; sindex < 4; sindex++)
4048 bp->attn_group[index].sig[sindex] =
4049 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4053 * enable5 is separate from the rest of the registers,
4054 * and therefore the address skip is 4
4055 * and not 16 between the different groups
4057 bp->attn_group[index].sig[4] = REG_RD(bp,
4058 reg_offset + 0x10 + 0x4*index);
4060 bp->attn_group[index].sig[4] = 0;
4063 if (bp->common.int_block == INT_BLOCK_HC) {
4064 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4065 HC_REG_ATTN_MSG0_ADDR_L);
4067 REG_WR(bp, reg_offset, U64_LO(section));
4068 REG_WR(bp, reg_offset + 4, U64_HI(section));
4069 } else if (CHIP_IS_E2(bp)) {
4070 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4071 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4074 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4077 bnx2x_zero_sp_sb(bp);
4079 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4080 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4081 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4082 sp_sb_data.igu_seg_id = igu_seg_id;
4083 sp_sb_data.p_func.pf_id = func;
4084 sp_sb_data.p_func.vnic_id = BP_VN(bp);
4085 sp_sb_data.p_func.vf_id = 0xff;
4087 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4089 bp->stats_pending = 0;
4090 bp->set_mac_pending = 0;
4092 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4095 void bnx2x_update_coalesce(struct bnx2x *bp)
4099 for_each_queue(bp, i)
4100 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4101 bp->rx_ticks, bp->tx_ticks);
4104 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4106 spin_lock_init(&bp->spq_lock);
4107 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4109 bp->spq_prod_idx = 0;
4110 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4111 bp->spq_prod_bd = bp->spq;
4112 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4115 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4118 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4119 union event_ring_elem *elem =
4120 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4122 elem->next_page.addr.hi =
4123 cpu_to_le32(U64_HI(bp->eq_mapping +
4124 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4125 elem->next_page.addr.lo =
4126 cpu_to_le32(U64_LO(bp->eq_mapping +
4127 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4130 bp->eq_prod = NUM_EQ_DESC;
4131 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4134 static void bnx2x_init_ind_table(struct bnx2x *bp)
4136 int func = BP_FUNC(bp);
4139 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4143 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4144 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4145 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4146 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4147 bp->fp->cl_id + (i % bp->num_queues));
4150 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4152 int mode = bp->rx_mode;
4155 /* All but management unicast packets should pass to the host as well */
4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4163 case BNX2X_RX_MODE_NONE: /* no Rx */
4164 cl_id = BP_L_ID(bp);
4165 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4168 case BNX2X_RX_MODE_NORMAL:
4169 cl_id = BP_L_ID(bp);
4170 bnx2x_rxq_set_mac_filters(bp, cl_id,
4171 BNX2X_ACCEPT_UNICAST |
4172 BNX2X_ACCEPT_BROADCAST |
4173 BNX2X_ACCEPT_MULTICAST);
4176 case BNX2X_RX_MODE_ALLMULTI:
4177 cl_id = BP_L_ID(bp);
4178 bnx2x_rxq_set_mac_filters(bp, cl_id,
4179 BNX2X_ACCEPT_UNICAST |
4180 BNX2X_ACCEPT_BROADCAST |
4181 BNX2X_ACCEPT_ALL_MULTICAST);
4184 case BNX2X_RX_MODE_PROMISC:
4185 cl_id = BP_L_ID(bp);
4186 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4188 /* pass management unicast packets as well */
4189 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4193 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4198 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4199 NIG_REG_LLH0_BRB1_DRV_MASK,
4202 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4203 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4204 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4205 bp->mac_filters.ucast_drop_all,
4206 bp->mac_filters.mcast_drop_all,
4207 bp->mac_filters.bcast_drop_all,
4208 bp->mac_filters.ucast_accept_all,
4209 bp->mac_filters.mcast_accept_all,
4210 bp->mac_filters.bcast_accept_all
4213 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4216 static void bnx2x_init_internal_common(struct bnx2x *bp)
4220 if (!CHIP_IS_E1(bp)) {
4222 /* xstorm needs to know whether to add ovlan to packets or not,
4223 * in switch-independent we'll write 0 to here... */
4224 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4226 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4230 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4234 /* Zero this manually as its initialization is
4235 currently missing in the initTool */
4236 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4237 REG_WR(bp, BAR_USTRORM_INTMEM +
4238 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4239 if (CHIP_IS_E2(bp)) {
4240 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4241 CHIP_INT_MODE_IS_BC(bp) ?
4242 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4246 static void bnx2x_init_internal_port(struct bnx2x *bp)
4251 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4253 switch (load_code) {
4254 case FW_MSG_CODE_DRV_LOAD_COMMON:
4255 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4256 bnx2x_init_internal_common(bp);
4259 case FW_MSG_CODE_DRV_LOAD_PORT:
4260 bnx2x_init_internal_port(bp);
4263 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4264 /* internal memory per function is
4265 initialized inside bnx2x_pf_init */
4269 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4274 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4276 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4278 fp->state = BNX2X_FP_STATE_CLOSED;
4280 fp->index = fp->cid = fp_idx;
4281 fp->cl_id = BP_L_ID(bp) + fp_idx;
4282 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4283 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4284 /* qZone id equals to FW (per path) client id */
4285 fp->cl_qzone_id = fp->cl_id +
4286 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4287 ETH_MAX_RX_CLIENTS_E1H);
4289 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4290 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4291 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4292 /* Setup SB indicies */
4293 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4294 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4296 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4297 "cl_id %d fw_sb %d igu_sb %d\n",
4298 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4300 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4301 fp->fw_sb_id, fp->igu_sb_id);
4303 bnx2x_update_fpsb_idx(fp);
4306 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4310 for_each_queue(bp, i)
4311 bnx2x_init_fp_sb(bp, i);
4314 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4315 BNX2X_VF_ID_INVALID, false,
4316 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4320 /* ensure status block indices were read */
4323 bnx2x_init_def_sb(bp);
4324 bnx2x_update_dsb_idx(bp);
4325 bnx2x_init_rx_rings(bp);
4326 bnx2x_init_tx_rings(bp);
4327 bnx2x_init_sp_ring(bp);
4328 bnx2x_init_eq_ring(bp);
4329 bnx2x_init_internal(bp, load_code);
4331 bnx2x_init_ind_table(bp);
4332 bnx2x_stats_init(bp);
4334 /* At this point, we are ready for interrupts */
4335 atomic_set(&bp->intr_sem, 0);
4337 /* flush all before enabling interrupts */
4341 bnx2x_int_enable(bp);
4343 /* Check for SPIO5 */
4344 bnx2x_attn_int_deasserted0(bp,
4345 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4346 AEU_INPUTS_ATTN_BITS_SPIO5);
4349 /* end of nic init */
4352 * gzip service functions
4355 static int bnx2x_gunzip_init(struct bnx2x *bp)
4357 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4358 &bp->gunzip_mapping, GFP_KERNEL);
4359 if (bp->gunzip_buf == NULL)
4362 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4363 if (bp->strm == NULL)
4366 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4368 if (bp->strm->workspace == NULL)
4378 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4379 bp->gunzip_mapping);
4380 bp->gunzip_buf = NULL;
4383 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4384 " un-compression\n");
4388 static void bnx2x_gunzip_end(struct bnx2x *bp)
4390 kfree(bp->strm->workspace);
4394 if (bp->gunzip_buf) {
4395 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4396 bp->gunzip_mapping);
4397 bp->gunzip_buf = NULL;
4401 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4405 /* check gzip header */
4406 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4407 BNX2X_ERR("Bad gzip header\n");
4415 if (zbuf[3] & FNAME)
4416 while ((zbuf[n++] != 0) && (n < len));
4418 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4419 bp->strm->avail_in = len - n;
4420 bp->strm->next_out = bp->gunzip_buf;
4421 bp->strm->avail_out = FW_BUF_SIZE;
4423 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4427 rc = zlib_inflate(bp->strm, Z_FINISH);
4428 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4429 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4432 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4433 if (bp->gunzip_outlen & 0x3)
4434 netdev_err(bp->dev, "Firmware decompression error:"
4435 " gunzip_outlen (%d) not aligned\n",
4437 bp->gunzip_outlen >>= 2;
4439 zlib_inflateEnd(bp->strm);
4441 if (rc == Z_STREAM_END)
4447 /* nic load/unload */
4450 * General service functions
4453 /* send a NIG loopback debug packet */
4454 static void bnx2x_lb_pckt(struct bnx2x *bp)
4458 /* Ethernet source and destination addresses */
4459 wb_write[0] = 0x55555555;
4460 wb_write[1] = 0x55555555;
4461 wb_write[2] = 0x20; /* SOP */
4462 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4464 /* NON-IP protocol */
4465 wb_write[0] = 0x09000000;
4466 wb_write[1] = 0x55555555;
4467 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4468 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4471 /* some of the internal memories
4472 * are not directly readable from the driver
4473 * to test them we send debug packets
4475 static int bnx2x_int_mem_test(struct bnx2x *bp)
4481 if (CHIP_REV_IS_FPGA(bp))
4483 else if (CHIP_REV_IS_EMUL(bp))
4488 /* Disable inputs of parser neighbor blocks */
4489 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4490 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4491 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4492 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4494 /* Write 0 to parser credits for CFC search request */
4495 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4497 /* send Ethernet packet */
4500 /* TODO do i reset NIG statistic? */
4501 /* Wait until NIG register shows 1 packet of size 0x10 */
4502 count = 1000 * factor;
4505 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4506 val = *bnx2x_sp(bp, wb_data[0]);
4514 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4518 /* Wait until PRS register shows 1 packet */
4519 count = 1000 * factor;
4521 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4529 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4533 /* Reset and init BRB, PRS */
4534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4538 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4539 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4541 DP(NETIF_MSG_HW, "part2\n");
4543 /* Disable inputs of parser neighbor blocks */
4544 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4545 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4546 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4547 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4549 /* Write 0 to parser credits for CFC search request */
4550 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4552 /* send 10 Ethernet packets */
4553 for (i = 0; i < 10; i++)
4556 /* Wait until NIG register shows 10 + 1
4557 packets of size 11*0x10 = 0xb0 */
4558 count = 1000 * factor;
4561 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4562 val = *bnx2x_sp(bp, wb_data[0]);
4570 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4574 /* Wait until PRS register shows 2 packets */
4575 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4577 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4579 /* Write 1 to parser credits for CFC search request */
4580 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4582 /* Wait until PRS register shows 3 packets */
4583 msleep(10 * factor);
4584 /* Wait until NIG register shows 1 packet of size 0x10 */
4585 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4587 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4589 /* clear NIG EOP FIFO */
4590 for (i = 0; i < 11; i++)
4591 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4592 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4594 BNX2X_ERR("clear of NIG failed\n");
4598 /* Reset and init BRB, PRS, NIG */
4599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4601 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4603 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4604 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4607 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4610 /* Enable inputs of parser neighbor blocks */
4611 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4612 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4613 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4614 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4616 DP(NETIF_MSG_HW, "done\n");
4621 static void enable_blocks_attention(struct bnx2x *bp)
4623 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4627 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4628 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4629 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4631 * mask read length error interrupts in brb for parser
4632 * (parsing unit and 'checksum and crc' unit)
4633 * these errors are legal (PU reads fixed length and CAC can cause
4634 * read length error on truncated packets)
4636 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4637 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4638 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4639 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4641 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4642 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4643 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4644 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4645 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4646 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4647 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4648 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4649 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4650 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4652 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4653 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4654 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4656 if (CHIP_REV_IS_FPGA(bp))
4657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4658 else if (CHIP_IS_E2(bp))
4659 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4660 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4666 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4667 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4669 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4670 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4671 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4672 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4673 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4674 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4675 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
4678 static const struct {
4681 } bnx2x_parity_mask[] = {
4682 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4683 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4685 {HC_REG_HC_PRTY_MASK, 0x7},
4686 {MISC_REG_MISC_PRTY_MASK, 0x1},
4687 {QM_REG_QM_PRTY_MASK, 0x0},
4688 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4689 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4690 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4691 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4692 {CDU_REG_CDU_PRTY_MASK, 0x0},
4693 {CFC_REG_CFC_PRTY_MASK, 0x0},
4694 {DBG_REG_DBG_PRTY_MASK, 0x0},
4695 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4696 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4697 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4698 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4699 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4700 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4701 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4702 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4703 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4704 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4706 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4708 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4712 static void enable_blocks_parity(struct bnx2x *bp)
4716 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4717 REG_WR(bp, bnx2x_parity_mask[i].addr,
4718 bnx2x_parity_mask[i].mask);
4722 static void bnx2x_reset_common(struct bnx2x *bp)
4725 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4727 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4730 static void bnx2x_init_pxp(struct bnx2x *bp)
4733 int r_order, w_order;
4735 pci_read_config_word(bp->pdev,
4736 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4737 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4738 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4740 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4742 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4746 bnx2x_init_pxp_arb(bp, r_order, w_order);
4749 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4759 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4760 SHARED_HW_CFG_FAN_FAILURE_MASK;
4762 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4766 * The fan failure mechanism is usually related to the PHY type since
4767 * the power consumption of the board is affected by the PHY. Currently,
4768 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4770 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4771 for (port = PORT_0; port < PORT_MAX; port++) {
4773 bnx2x_fan_failure_det_req(
4775 bp->common.shmem_base,
4776 bp->common.shmem2_base,
4780 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4782 if (is_required == 0)
4785 /* Fan failure is indicated by SPIO 5 */
4786 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4787 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4789 /* set to active low mode */
4790 val = REG_RD(bp, MISC_REG_SPIO_INT);
4791 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4792 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4793 REG_WR(bp, MISC_REG_SPIO_INT, val);
4795 /* enable interrupt to signal the IGU */
4796 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4797 val |= (1 << MISC_REGISTERS_SPIO_5);
4798 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4801 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4807 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4810 switch (BP_ABS_FUNC(bp)) {
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4824 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4827 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4830 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4833 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4839 REG_WR(bp, offset, pretend_func_num);
4841 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4844 static void bnx2x_pf_disable(struct bnx2x *bp)
4846 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4847 val &= ~IGU_PF_CONF_FUNC_EN;
4849 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4850 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4851 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4854 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4858 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4860 bnx2x_reset_common(bp);
4861 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4864 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4865 if (!CHIP_IS_E1(bp))
4866 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4868 if (CHIP_IS_E2(bp)) {
4872 * 4-port mode or 2-port mode we need to turn of master-enable
4873 * for everyone, after that, turn it back on for self.
4874 * so, we disregard multi-function or not, and always disable
4875 * for all functions on the given path, this means 0,2,4,6 for
4876 * path 0 and 1,3,5,7 for path 1
4878 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4879 if (fid == BP_ABS_FUNC(bp)) {
4881 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4886 bnx2x_pretend_func(bp, fid);
4887 /* clear pf enable */
4888 bnx2x_pf_disable(bp);
4889 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4893 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4894 if (CHIP_IS_E1(bp)) {
4895 /* enable HW interrupt from PXP on USDM overflow
4896 bit 16 on INT_MASK_0 */
4897 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4900 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4904 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4905 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4907 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4908 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4909 /* make sure this value is 0 */
4910 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4912 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4913 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4914 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4915 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4916 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4919 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4921 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4922 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4924 /* let the HW do it's magic ... */
4926 /* finish PXP init */
4927 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4929 BNX2X_ERR("PXP2 CFG failed\n");
4932 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4934 BNX2X_ERR("PXP2 RD_INIT failed\n");
4938 /* Timers bug workaround E2 only. We need to set the entire ILT to
4939 * have entries with value "0" and valid bit on.
4940 * This needs to be done by the first PF that is loaded in a path
4941 * (i.e. common phase)
4943 if (CHIP_IS_E2(bp)) {
4944 struct ilt_client_info ilt_cli;
4945 struct bnx2x_ilt ilt;
4946 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4947 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4949 /* initalize dummy TM client */
4951 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4952 ilt_cli.client_num = ILT_CLIENT_TM;
4954 /* Step 1: set zeroes to all ilt page entries with valid bit on
4955 * Step 2: set the timers first/last ilt entry to point
4956 * to the entire range to prevent ILT range error for 3rd/4th
4957 * vnic (this code assumes existance of the vnic)
4959 * both steps performed by call to bnx2x_ilt_client_init_op()
4960 * with dummy TM client
4962 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4963 * and his brother are split registers
4965 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4966 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4967 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4969 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4970 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4971 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4975 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4976 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4978 if (CHIP_IS_E2(bp)) {
4979 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4980 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4981 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4983 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4985 /* let the HW do it's magic ... */
4988 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4989 } while (factor-- && (val != 1));
4992 BNX2X_ERR("ATC_INIT failed\n");
4997 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4999 /* clean the DMAE memory */
5001 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5003 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5004 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5005 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5006 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5008 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5009 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5010 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5011 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5013 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5015 if (CHIP_MODE_IS_4_PORT(bp))
5016 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5018 /* QM queues pointers table */
5019 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5021 /* soft reset pulse */
5022 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5023 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5026 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5029 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5030 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5032 if (!CHIP_REV_IS_SLOW(bp)) {
5033 /* enable hw interrupt from doorbell Q */
5034 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5037 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5038 if (CHIP_MODE_IS_4_PORT(bp)) {
5039 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5040 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5043 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5044 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5047 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5049 if (!CHIP_IS_E1(bp))
5050 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5052 if (CHIP_IS_E2(bp)) {
5053 /* Bit-map indicating which L2 hdrs may appear after the
5054 basic Ethernet header */
5055 int has_ovlan = IS_MF(bp);
5056 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5057 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5060 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5063 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5065 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5070 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5071 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5075 if (CHIP_MODE_IS_4_PORT(bp))
5076 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5081 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5084 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5085 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5086 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5088 if (CHIP_IS_E2(bp)) {
5089 int has_ovlan = IS_MF(bp);
5090 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5091 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5094 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5095 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5096 REG_WR(bp, i, random32());
5098 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5100 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5101 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5108 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5109 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5111 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5113 if (sizeof(union cdu_context) != 1024)
5114 /* we currently assume that a context is 1024 bytes */
5115 dev_alert(&bp->pdev->dev, "please adjust the size "
5116 "of cdu_context(%ld)\n",
5117 (long)sizeof(union cdu_context));
5119 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5120 val = (4 << 24) + (0 << 12) + 1024;
5121 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5123 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5124 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5125 /* enable context validation interrupt from CFC */
5126 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5128 /* set the thresholds to prevent CFC/CDU race */
5129 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5131 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5133 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5134 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5136 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5137 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5139 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5140 /* Reset PCIE errors for debug */
5141 REG_WR(bp, 0x2814, 0xffffffff);
5142 REG_WR(bp, 0x3820, 0xffffffff);
5144 if (CHIP_IS_E2(bp)) {
5145 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5146 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5147 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5148 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5149 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5150 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5152 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5153 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5154 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5158 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5159 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5160 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5161 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5163 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5164 if (!CHIP_IS_E1(bp)) {
5165 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5166 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5168 if (CHIP_IS_E2(bp)) {
5169 /* Bit-map indicating which L2 hdrs may appear after the
5170 basic Ethernet header */
5171 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5174 if (CHIP_REV_IS_SLOW(bp))
5177 /* finish CFC init */
5178 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5180 BNX2X_ERR("CFC LL_INIT failed\n");
5183 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5185 BNX2X_ERR("CFC AC_INIT failed\n");
5188 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5190 BNX2X_ERR("CFC CAM_INIT failed\n");
5193 REG_WR(bp, CFC_REG_DEBUG0, 0);
5195 if (CHIP_IS_E1(bp)) {
5196 /* read NIG statistic
5197 to see if this is our first up since powerup */
5198 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5199 val = *bnx2x_sp(bp, wb_data[0]);
5201 /* do internal memory self test */
5202 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5203 BNX2X_ERR("internal mem self test failed\n");
5208 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5209 bp->common.shmem_base,
5210 bp->common.shmem2_base);
5212 bnx2x_setup_fan_failure_detection(bp);
5214 /* clear PXP2 attentions */
5215 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5217 enable_blocks_attention(bp);
5218 if (CHIP_PARITY_SUPPORTED(bp))
5219 enable_blocks_parity(bp);
5221 if (!BP_NOMCP(bp)) {
5222 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5223 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5225 u32 shmem_base[2], shmem2_base[2];
5226 shmem_base[0] = bp->common.shmem_base;
5227 shmem2_base[0] = bp->common.shmem2_base;
5228 if (CHIP_IS_E2(bp)) {
5230 SHMEM2_RD(bp, other_shmem_base_addr);
5232 SHMEM2_RD(bp, other_shmem2_base_addr);
5234 bnx2x_acquire_phy_lock(bp);
5235 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5236 bp->common.chip_id);
5237 bnx2x_release_phy_lock(bp);
5240 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5245 static int bnx2x_init_hw_port(struct bnx2x *bp)
5247 int port = BP_PORT(bp);
5248 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5252 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5254 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5256 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5257 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5259 /* Timers bug workaround: disables the pf_master bit in pglue at
5260 * common phase, we need to enable it here before any dmae access are
5261 * attempted. Therefore we manually added the enable-master to the
5262 * port phase (it also happens in the function phase)
5265 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5267 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5268 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5269 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5270 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5272 /* QM cid (connection) count */
5273 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5276 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5277 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5278 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5281 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5283 if (CHIP_MODE_IS_4_PORT(bp))
5284 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5286 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5287 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5288 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5289 /* no pause for emulation and FPGA */
5294 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5295 else if (bp->dev->mtu > 4096) {
5296 if (bp->flags & ONE_PORT_FLAG)
5300 /* (24*1024 + val*4)/256 */
5301 low = 96 + (val/64) +
5302 ((val % 64) ? 1 : 0);
5305 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5306 high = low + 56; /* 14*1024/256 */
5308 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5309 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5312 if (CHIP_MODE_IS_4_PORT(bp)) {
5313 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5314 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5315 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5316 BRB1_REG_MAC_GUARANTIED_0), 40);
5319 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5321 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5322 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5323 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5324 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5326 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5327 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5328 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5329 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5330 if (CHIP_MODE_IS_4_PORT(bp))
5331 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5333 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5334 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5336 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5338 if (!CHIP_IS_E2(bp)) {
5339 /* configure PBF to work without PAUSE mtu 9000 */
5340 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5342 /* update threshold */
5343 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5344 /* update init credit */
5345 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5348 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5350 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5354 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5356 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5357 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5359 if (CHIP_IS_E1(bp)) {
5360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5363 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5365 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5367 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5368 /* init aeu_mask_attn_func_0/1:
5369 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5370 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5371 * bits 4-7 are used for "per vn group attention" */
5372 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5373 (IS_MF(bp) ? 0xF7 : 0x7));
5375 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5376 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5377 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5378 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5379 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5381 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5383 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5385 if (!CHIP_IS_E1(bp)) {
5386 /* 0x2 disable mf_ov, 0x1 enable */
5387 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5388 (IS_MF(bp) ? 0x1 : 0x2));
5390 if (CHIP_IS_E2(bp)) {
5392 switch (bp->mf_mode) {
5393 case MULTI_FUNCTION_SD:
5396 case MULTI_FUNCTION_SI:
5401 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5402 NIG_REG_LLH0_CLS_TYPE), val);
5405 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5406 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5407 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5411 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5412 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5413 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5414 bp->common.shmem_base,
5415 bp->common.shmem2_base);
5416 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5417 bp->common.shmem2_base, port)) {
5418 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5419 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5420 val = REG_RD(bp, reg_addr);
5421 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5422 REG_WR(bp, reg_addr, val);
5424 bnx2x__link_reset(bp);
5429 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5434 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5436 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5438 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5441 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5443 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5446 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5448 u32 i, base = FUNC_ILT_BASE(func);
5449 for (i = base; i < base + ILT_PER_FUNC; i++)
5450 bnx2x_ilt_wr(bp, i, 0);
5453 static int bnx2x_init_hw_func(struct bnx2x *bp)
5455 int port = BP_PORT(bp);
5456 int func = BP_FUNC(bp);
5457 struct bnx2x_ilt *ilt = BP_ILT(bp);
5462 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5464 /* set MSI reconfigure capability */
5465 if (bp->common.int_block == INT_BLOCK_HC) {
5466 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5467 val = REG_RD(bp, addr);
5468 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5469 REG_WR(bp, addr, val);
5473 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5475 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5476 ilt->lines[cdu_ilt_start + i].page =
5477 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5478 ilt->lines[cdu_ilt_start + i].page_mapping =
5479 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5480 /* cdu ilt pages are allocated manually so there's no need to
5483 bnx2x_ilt_init_op(bp, INITOP_SET);
5486 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5488 /* T1 hash bits value determines the T1 number of entries */
5489 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5494 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5495 #endif /* BCM_CNIC */
5497 if (CHIP_IS_E2(bp)) {
5498 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5500 /* Turn on a single ISR mode in IGU if driver is going to use
5503 if (!(bp->flags & USING_MSIX_FLAG))
5504 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5506 * Timers workaround bug: function init part.
5507 * Need to wait 20msec after initializing ILT,
5508 * needed to make sure there are no requests in
5509 * one of the PXP internal queues with "old" ILT addresses
5513 * Master enable - Due to WB DMAE writes performed before this
5514 * register is re-initialized as part of the regular function
5517 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5518 /* Enable the function in IGU */
5519 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5524 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5527 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5529 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5530 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5531 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5539 if (CHIP_IS_E2(bp)) {
5540 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5542 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5546 if (CHIP_MODE_IS_4_PORT(bp))
5547 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5550 REG_WR(bp, QM_REG_PF_EN, 1);
5552 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5554 if (CHIP_MODE_IS_4_PORT(bp))
5555 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5557 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5558 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5559 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5567 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5569 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5571 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5573 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5576 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5579 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5580 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5583 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5585 /* HC init per function */
5586 if (bp->common.int_block == INT_BLOCK_HC) {
5587 if (CHIP_IS_E1H(bp)) {
5588 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5590 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5591 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5593 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5596 int num_segs, sb_idx, prod_offset;
5598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5600 if (CHIP_IS_E2(bp)) {
5601 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5602 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5605 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5607 if (CHIP_IS_E2(bp)) {
5611 * E2 mode: address 0-135 match to the mapping memory;
5612 * 136 - PF0 default prod; 137 - PF1 default prod;
5613 * 138 - PF2 default prod; 139 - PF3 default prod;
5614 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5615 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5618 * E1.5 mode - In backward compatible mode;
5619 * for non default SB; each even line in the memory
5620 * holds the U producer and each odd line hold
5621 * the C producer. The first 128 producers are for
5622 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5623 * producers are for the DSB for each PF.
5624 * Each PF has five segments: (the order inside each
5625 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5626 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5627 * 144-147 attn prods;
5629 /* non-default-status-blocks */
5630 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5631 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5632 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5633 prod_offset = (bp->igu_base_sb + sb_idx) *
5636 for (i = 0; i < num_segs; i++) {
5637 addr = IGU_REG_PROD_CONS_MEMORY +
5638 (prod_offset + i) * 4;
5639 REG_WR(bp, addr, 0);
5641 /* send consumer update with value 0 */
5642 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5643 USTORM_ID, 0, IGU_INT_NOP, 1);
5644 bnx2x_igu_clear_sb(bp,
5645 bp->igu_base_sb + sb_idx);
5648 /* default-status-blocks */
5649 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5650 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5652 if (CHIP_MODE_IS_4_PORT(bp))
5653 dsb_idx = BP_FUNC(bp);
5655 dsb_idx = BP_E1HVN(bp);
5657 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5658 IGU_BC_BASE_DSB_PROD + dsb_idx :
5659 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5661 for (i = 0; i < (num_segs * E1HVN_MAX);
5663 addr = IGU_REG_PROD_CONS_MEMORY +
5664 (prod_offset + i)*4;
5665 REG_WR(bp, addr, 0);
5667 /* send consumer update with 0 */
5668 if (CHIP_INT_MODE_IS_BC(bp)) {
5669 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5670 USTORM_ID, 0, IGU_INT_NOP, 1);
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672 CSTORM_ID, 0, IGU_INT_NOP, 1);
5673 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5674 XSTORM_ID, 0, IGU_INT_NOP, 1);
5675 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5676 TSTORM_ID, 0, IGU_INT_NOP, 1);
5677 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5678 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5680 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5681 USTORM_ID, 0, IGU_INT_NOP, 1);
5682 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5683 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5685 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5687 /* !!! these should become driver const once
5688 rf-tool supports split-68 const */
5689 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5690 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5691 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5692 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5693 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5694 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5698 /* Reset PCIE errors for debug */
5699 REG_WR(bp, 0x2114, 0xffffffff);
5700 REG_WR(bp, 0x2120, 0xffffffff);
5702 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5703 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5704 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5707 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5709 bnx2x_phy_probe(&bp->link_params);
5714 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5718 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5719 BP_ABS_FUNC(bp), load_code);
5722 mutex_init(&bp->dmae_mutex);
5723 rc = bnx2x_gunzip_init(bp);
5727 switch (load_code) {
5728 case FW_MSG_CODE_DRV_LOAD_COMMON:
5729 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5730 rc = bnx2x_init_hw_common(bp, load_code);
5735 case FW_MSG_CODE_DRV_LOAD_PORT:
5736 rc = bnx2x_init_hw_port(bp);
5741 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5742 rc = bnx2x_init_hw_func(bp);
5748 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5752 if (!BP_NOMCP(bp)) {
5753 int mb_idx = BP_FW_MB_IDX(bp);
5755 bp->fw_drv_pulse_wr_seq =
5756 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5757 DRV_PULSE_SEQ_MASK);
5758 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5762 bnx2x_gunzip_end(bp);
5767 void bnx2x_free_mem(struct bnx2x *bp)
5770 #define BNX2X_PCI_FREE(x, y, size) \
5773 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5779 #define BNX2X_FREE(x) \
5791 for_each_queue(bp, i) {
5794 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5795 bnx2x_fp(bp, i, status_blk_mapping),
5796 sizeof(struct host_hc_status_block_e2));
5798 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5799 bnx2x_fp(bp, i, status_blk_mapping),
5800 sizeof(struct host_hc_status_block_e1x));
5803 for_each_queue(bp, i) {
5805 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5806 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5807 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5808 bnx2x_fp(bp, i, rx_desc_mapping),
5809 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5812 bnx2x_fp(bp, i, rx_comp_mapping),
5813 sizeof(struct eth_fast_path_rx_cqe) *
5817 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5818 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5819 bnx2x_fp(bp, i, rx_sge_mapping),
5820 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5823 for_each_queue(bp, i) {
5825 /* fastpath tx rings: tx_buf tx_desc */
5826 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5828 bnx2x_fp(bp, i, tx_desc_mapping),
5829 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5831 /* end of fastpath */
5833 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834 sizeof(struct host_sp_status_block));
5836 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837 sizeof(struct bnx2x_slowpath));
5839 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5842 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5844 BNX2X_FREE(bp->ilt->lines);
5848 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5849 sizeof(struct host_hc_status_block_e2));
5851 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5852 sizeof(struct host_hc_status_block_e1x));
5854 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5857 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5859 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5860 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5862 #undef BNX2X_PCI_FREE
5866 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5868 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5869 if (CHIP_IS_E2(bp)) {
5870 bnx2x_fp(bp, index, sb_index_values) =
5871 (__le16 *)status_blk.e2_sb->sb.index_values;
5872 bnx2x_fp(bp, index, sb_running_index) =
5873 (__le16 *)status_blk.e2_sb->sb.running_index;
5875 bnx2x_fp(bp, index, sb_index_values) =
5876 (__le16 *)status_blk.e1x_sb->sb.index_values;
5877 bnx2x_fp(bp, index, sb_running_index) =
5878 (__le16 *)status_blk.e1x_sb->sb.running_index;
5882 int bnx2x_alloc_mem(struct bnx2x *bp)
5884 #define BNX2X_PCI_ALLOC(x, y, size) \
5886 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5888 goto alloc_mem_err; \
5889 memset(x, 0, size); \
5892 #define BNX2X_ALLOC(x, size) \
5894 x = kzalloc(size, GFP_KERNEL); \
5896 goto alloc_mem_err; \
5903 for_each_queue(bp, i) {
5904 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5905 bnx2x_fp(bp, i, bp) = bp;
5908 BNX2X_PCI_ALLOC(sb->e2_sb,
5909 &bnx2x_fp(bp, i, status_blk_mapping),
5910 sizeof(struct host_hc_status_block_e2));
5912 BNX2X_PCI_ALLOC(sb->e1x_sb,
5913 &bnx2x_fp(bp, i, status_blk_mapping),
5914 sizeof(struct host_hc_status_block_e1x));
5916 set_sb_shortcuts(bp, i);
5919 for_each_queue(bp, i) {
5921 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5922 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5923 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5924 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5925 &bnx2x_fp(bp, i, rx_desc_mapping),
5926 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5928 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5929 &bnx2x_fp(bp, i, rx_comp_mapping),
5930 sizeof(struct eth_fast_path_rx_cqe) *
5934 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5935 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5936 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5937 &bnx2x_fp(bp, i, rx_sge_mapping),
5938 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5941 for_each_queue(bp, i) {
5943 /* fastpath tx rings: tx_buf tx_desc */
5944 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5945 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5946 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5947 &bnx2x_fp(bp, i, tx_desc_mapping),
5948 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5950 /* end of fastpath */
5954 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5955 sizeof(struct host_hc_status_block_e2));
5957 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5958 sizeof(struct host_hc_status_block_e1x));
5960 /* allocate searcher T2 table */
5961 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5965 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5966 sizeof(struct host_sp_status_block));
5968 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5969 sizeof(struct bnx2x_slowpath));
5971 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5973 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5976 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5978 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5981 /* Slow path ring */
5982 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5985 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5986 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5993 #undef BNX2X_PCI_ALLOC
5998 * Init service functions
6000 int bnx2x_func_start(struct bnx2x *bp)
6002 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6004 /* Wait for completion */
6005 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6006 WAIT_RAMROD_COMMON);
6009 int bnx2x_func_stop(struct bnx2x *bp)
6011 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6013 /* Wait for completion */
6014 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6015 0, &(bp->state), WAIT_RAMROD_COMMON);
6019 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6021 * @param bp driver descriptor
6022 * @param set set or clear an entry (1 or 0)
6023 * @param mac pointer to a buffer containing a MAC
6024 * @param cl_bit_vec bit vector of clients to register a MAC for
6025 * @param cam_offset offset in a CAM to use
6026 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6028 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6029 u32 cl_bit_vec, u8 cam_offset,
6032 struct mac_configuration_cmd *config =
6033 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6034 int ramrod_flags = WAIT_RAMROD_COMMON;
6036 bp->set_mac_pending = 1;
6039 config->hdr.length = 1;
6040 config->hdr.offset = cam_offset;
6041 config->hdr.client_id = 0xff;
6042 config->hdr.reserved1 = 0;
6045 config->config_table[0].msb_mac_addr =
6046 swab16(*(u16 *)&mac[0]);
6047 config->config_table[0].middle_mac_addr =
6048 swab16(*(u16 *)&mac[2]);
6049 config->config_table[0].lsb_mac_addr =
6050 swab16(*(u16 *)&mac[4]);
6051 config->config_table[0].clients_bit_vector =
6052 cpu_to_le32(cl_bit_vec);
6053 config->config_table[0].vlan_id = 0;
6054 config->config_table[0].pf_id = BP_FUNC(bp);
6056 SET_FLAG(config->config_table[0].flags,
6057 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6058 T_ETH_MAC_COMMAND_SET);
6060 SET_FLAG(config->config_table[0].flags,
6061 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6062 T_ETH_MAC_COMMAND_INVALIDATE);
6065 SET_FLAG(config->config_table[0].flags,
6066 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6068 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6069 (set ? "setting" : "clearing"),
6070 config->config_table[0].msb_mac_addr,
6071 config->config_table[0].middle_mac_addr,
6072 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6074 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6075 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6076 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6078 /* Wait for a completion */
6079 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6082 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6083 int *state_p, int flags)
6085 /* can take a while if any port is running */
6087 u8 poll = flags & WAIT_RAMROD_POLL;
6088 u8 common = flags & WAIT_RAMROD_COMMON;
6090 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6091 poll ? "polling" : "waiting", state, idx);
6099 bnx2x_rx_int(bp->fp, 10);
6100 /* if index is different from 0
6101 * the reply for some commands will
6102 * be on the non default queue
6105 bnx2x_rx_int(&bp->fp[idx], 10);
6109 mb(); /* state is changed by bnx2x_sp_event() */
6110 if (*state_p == state) {
6111 #ifdef BNX2X_STOP_ON_ERROR
6112 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6124 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6125 poll ? "polling" : "waiting", state, idx);
6126 #ifdef BNX2X_STOP_ON_ERROR
6133 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6135 if (CHIP_IS_E1H(bp))
6136 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6137 else if (CHIP_MODE_IS_4_PORT(bp))
6138 return BP_FUNC(bp) * 32 + rel_offset;
6140 return BP_VN(bp) * 32 + rel_offset;
6143 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6145 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6146 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6148 /* networking MAC */
6149 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6150 (1 << bp->fp->cl_id), cam_offset , 0);
6152 if (CHIP_IS_E1(bp)) {
6154 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6155 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6158 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6161 struct net_device *dev = bp->dev;
6162 struct netdev_hw_addr *ha;
6163 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6164 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6166 netdev_for_each_mc_addr(ha, dev) {
6168 config_cmd->config_table[i].msb_mac_addr =
6169 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6170 config_cmd->config_table[i].middle_mac_addr =
6171 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6172 config_cmd->config_table[i].lsb_mac_addr =
6173 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6175 config_cmd->config_table[i].vlan_id = 0;
6176 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6177 config_cmd->config_table[i].clients_bit_vector =
6178 cpu_to_le32(1 << BP_L_ID(bp));
6180 SET_FLAG(config_cmd->config_table[i].flags,
6181 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6182 T_ETH_MAC_COMMAND_SET);
6185 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6186 config_cmd->config_table[i].msb_mac_addr,
6187 config_cmd->config_table[i].middle_mac_addr,
6188 config_cmd->config_table[i].lsb_mac_addr);
6191 old = config_cmd->hdr.length;
6193 for (; i < old; i++) {
6194 if (CAM_IS_INVALID(config_cmd->
6196 /* already invalidated */
6200 SET_FLAG(config_cmd->config_table[i].flags,
6201 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6202 T_ETH_MAC_COMMAND_INVALIDATE);
6206 config_cmd->hdr.length = i;
6207 config_cmd->hdr.offset = offset;
6208 config_cmd->hdr.client_id = 0xff;
6209 config_cmd->hdr.reserved1 = 0;
6211 bp->set_mac_pending = 1;
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6215 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6217 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6220 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6221 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6222 int ramrod_flags = WAIT_RAMROD_COMMON;
6224 bp->set_mac_pending = 1;
6227 for (i = 0; i < config_cmd->hdr.length; i++)
6228 SET_FLAG(config_cmd->config_table[i].flags,
6229 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6230 T_ETH_MAC_COMMAND_INVALIDATE);
6232 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6233 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6235 /* Wait for a completion */
6236 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6243 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6244 * MAC(s). This function will wait until the ramdord completion
6247 * @param bp driver handle
6248 * @param set set or clear the CAM entry
6250 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6252 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6254 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6255 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6256 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6257 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6259 /* Send a SET_MAC ramrod */
6260 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6266 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6267 struct bnx2x_client_init_params *params,
6269 struct client_init_ramrod_data *data)
6271 /* Clear the buffer */
6272 memset(data, 0, sizeof(*data));
6275 data->general.client_id = params->rxq_params.cl_id;
6276 data->general.statistics_counter_id = params->rxq_params.stat_id;
6277 data->general.statistics_en_flg =
6278 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6279 data->general.activate_flg = activate;
6280 data->general.sp_client_id = params->rxq_params.spcl_id;
6283 data->rx.tpa_en_flg =
6284 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6285 data->rx.vmqueue_mode_en_flg = 0;
6286 data->rx.cache_line_alignment_log_size =
6287 params->rxq_params.cache_line_log;
6288 data->rx.enable_dynamic_hc =
6289 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6290 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6291 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6292 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6294 /* We don't set drop flags */
6295 data->rx.drop_ip_cs_err_flg = 0;
6296 data->rx.drop_tcp_cs_err_flg = 0;
6297 data->rx.drop_ttl0_flg = 0;
6298 data->rx.drop_udp_cs_err_flg = 0;
6300 data->rx.inner_vlan_removal_enable_flg =
6301 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6302 data->rx.outer_vlan_removal_enable_flg =
6303 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6304 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6305 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6306 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6307 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6308 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6309 data->rx.bd_page_base.lo =
6310 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6311 data->rx.bd_page_base.hi =
6312 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6313 data->rx.sge_page_base.lo =
6314 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6315 data->rx.sge_page_base.hi =
6316 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6317 data->rx.cqe_page_base.lo =
6318 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6319 data->rx.cqe_page_base.hi =
6320 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6321 data->rx.is_leading_rss =
6322 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6323 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6326 data->tx.enforce_security_flg = 0; /* VF specific */
6327 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6328 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6329 data->tx.mtu = 0; /* VF specific */
6330 data->tx.tx_bd_page_base.lo =
6331 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6332 data->tx.tx_bd_page_base.hi =
6333 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6335 /* flow control data */
6336 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6337 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6338 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6339 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6340 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6341 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6342 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6344 data->fc.safc_group_num = params->txq_params.cos;
6345 data->fc.safc_group_en_flg =
6346 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6347 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6350 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6352 /* ustorm cxt validation */
6353 cxt->ustorm_ag_context.cdu_usage =
6354 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6355 ETH_CONNECTION_TYPE);
6356 /* xcontext validation */
6357 cxt->xstorm_ag_context.cdu_reserved =
6358 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6359 ETH_CONNECTION_TYPE);
6362 int bnx2x_setup_fw_client(struct bnx2x *bp,
6363 struct bnx2x_client_init_params *params,
6365 struct client_init_ramrod_data *data,
6366 dma_addr_t data_mapping)
6369 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6370 int ramrod_flags = 0, rc;
6372 /* HC and context validation values */
6373 hc_usec = params->txq_params.hc_rate ?
6374 1000000 / params->txq_params.hc_rate : 0;
6375 bnx2x_update_coalesce_sb_index(bp,
6376 params->txq_params.fw_sb_id,
6377 params->txq_params.sb_cq_index,
6378 !(params->txq_params.flags & QUEUE_FLG_HC),
6381 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6383 hc_usec = params->rxq_params.hc_rate ?
6384 1000000 / params->rxq_params.hc_rate : 0;
6385 bnx2x_update_coalesce_sb_index(bp,
6386 params->rxq_params.fw_sb_id,
6387 params->rxq_params.sb_cq_index,
6388 !(params->rxq_params.flags & QUEUE_FLG_HC),
6391 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6392 params->rxq_params.cid);
6395 if (params->txq_params.flags & QUEUE_FLG_STATS)
6396 storm_memset_xstats_zero(bp, BP_PORT(bp),
6397 params->txq_params.stat_id);
6399 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6400 storm_memset_ustats_zero(bp, BP_PORT(bp),
6401 params->rxq_params.stat_id);
6402 storm_memset_tstats_zero(bp, BP_PORT(bp),
6403 params->rxq_params.stat_id);
6406 /* Fill the ramrod data */
6407 bnx2x_fill_cl_init_data(bp, params, activate, data);
6411 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6412 * barrier except from mmiowb() is needed to impose a
6413 * proper ordering of memory operations.
6418 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6419 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6421 /* Wait for completion */
6422 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6423 params->ramrod_params.index,
6424 params->ramrod_params.pstate,
6430 * Configure interrupt mode according to current configuration.
6431 * In case of MSI-X it will also try to enable MSI-X.
6437 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6441 switch (bp->int_mode) {
6443 bnx2x_enable_msi(bp);
6444 /* falling through... */
6447 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6450 /* Set number of queues according to bp->multi_mode value */
6451 bnx2x_set_num_queues(bp);
6453 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6456 /* if we can't use MSI-X we only need one fp,
6457 * so try to enable MSI-X with the requested number of fp's
6458 * and fallback to MSI or legacy INTx with one fp
6460 rc = bnx2x_enable_msix(bp);
6462 /* failed to enable MSI-X */
6465 "Multi requested but failed to "
6466 "enable MSI-X (%d), "
6467 "set number of queues to %d\n",
6472 if (!(bp->flags & DISABLE_MSI_FLAG))
6473 bnx2x_enable_msi(bp);
6482 /* must be called prioir to any HW initializations */
6483 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6485 return L2_ILT_LINES(bp);
6488 void bnx2x_ilt_set_info(struct bnx2x *bp)
6490 struct ilt_client_info *ilt_client;
6491 struct bnx2x_ilt *ilt = BP_ILT(bp);
6494 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6495 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6498 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6499 ilt_client->client_num = ILT_CLIENT_CDU;
6500 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6501 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6502 ilt_client->start = line;
6503 line += L2_ILT_LINES(bp);
6505 line += CNIC_ILT_LINES;
6507 ilt_client->end = line - 1;
6509 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6510 "flags 0x%x, hw psz %d\n",
6513 ilt_client->page_size,
6515 ilog2(ilt_client->page_size >> 12));
6518 if (QM_INIT(bp->qm_cid_count)) {
6519 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6520 ilt_client->client_num = ILT_CLIENT_QM;
6521 ilt_client->page_size = QM_ILT_PAGE_SZ;
6522 ilt_client->flags = 0;
6523 ilt_client->start = line;
6525 /* 4 bytes for each cid */
6526 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6529 ilt_client->end = line - 1;
6531 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6532 "flags 0x%x, hw psz %d\n",
6535 ilt_client->page_size,
6537 ilog2(ilt_client->page_size >> 12));
6541 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6543 ilt_client->client_num = ILT_CLIENT_SRC;
6544 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6545 ilt_client->flags = 0;
6546 ilt_client->start = line;
6547 line += SRC_ILT_LINES;
6548 ilt_client->end = line - 1;
6550 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6551 "flags 0x%x, hw psz %d\n",
6554 ilt_client->page_size,
6556 ilog2(ilt_client->page_size >> 12));
6559 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6563 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6565 ilt_client->client_num = ILT_CLIENT_TM;
6566 ilt_client->page_size = TM_ILT_PAGE_SZ;
6567 ilt_client->flags = 0;
6568 ilt_client->start = line;
6569 line += TM_ILT_LINES;
6570 ilt_client->end = line - 1;
6572 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6573 "flags 0x%x, hw psz %d\n",
6576 ilt_client->page_size,
6578 ilog2(ilt_client->page_size >> 12));
6581 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6585 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6588 struct bnx2x_client_init_params params = { {0} };
6591 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6594 params.ramrod_params.pstate = &fp->state;
6595 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6596 params.ramrod_params.index = fp->index;
6597 params.ramrod_params.cid = fp->cid;
6600 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6602 bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params);
6604 bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params);
6606 rc = bnx2x_setup_fw_client(bp, ¶ms, 1,
6607 bnx2x_sp(bp, client_init_data),
6608 bnx2x_sp_mapping(bp, client_init_data));
6612 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6616 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6618 /* halt the connection */
6619 *p->pstate = BNX2X_FP_STATE_HALTING;
6620 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6623 /* Wait for completion */
6624 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6625 p->pstate, poll_flag);
6626 if (rc) /* timeout */
6629 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6630 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6632 /* Wait for completion */
6633 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6634 p->pstate, poll_flag);
6635 if (rc) /* timeout */
6639 /* delete cfc entry */
6640 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6642 /* Wait for completion */
6643 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6644 p->pstate, WAIT_RAMROD_COMMON);
6648 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6650 struct bnx2x_client_ramrod_params client_stop = {0};
6651 struct bnx2x_fastpath *fp = &bp->fp[index];
6653 client_stop.index = index;
6654 client_stop.cid = fp->cid;
6655 client_stop.cl_id = fp->cl_id;
6656 client_stop.pstate = &(fp->state);
6657 client_stop.poll = 0;
6659 return bnx2x_stop_fw_client(bp, &client_stop);
6663 static void bnx2x_reset_func(struct bnx2x *bp)
6665 int port = BP_PORT(bp);
6666 int func = BP_FUNC(bp);
6668 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6670 offsetof(struct hc_status_block_data_e2, common) :
6671 offsetof(struct hc_status_block_data_e1x, common));
6672 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6673 int pfid_offset = offsetof(struct pci_entity, pf_id);
6675 /* Disable the function in the FW */
6676 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6677 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6678 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6679 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6682 for_each_queue(bp, i) {
6683 struct bnx2x_fastpath *fp = &bp->fp[i];
6685 BAR_CSTRORM_INTMEM +
6686 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6687 + pfunc_offset_fp + pfid_offset,
6688 HC_FUNCTION_DISABLED);
6693 BAR_CSTRORM_INTMEM +
6694 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6695 pfunc_offset_sp + pfid_offset,
6696 HC_FUNCTION_DISABLED);
6699 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6700 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6704 if (bp->common.int_block == INT_BLOCK_HC) {
6705 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6706 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6708 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6709 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6713 /* Disable Timer scan */
6714 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6716 * Wait for at least 10ms and up to 2 second for the timers scan to
6719 for (i = 0; i < 200; i++) {
6721 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6726 bnx2x_clear_func_ilt(bp, func);
6728 /* Timers workaround bug for E2: if this is vnic-3,
6729 * we need to set the entire ilt range for this timers.
6731 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6732 struct ilt_client_info ilt_cli;
6733 /* use dummy TM client */
6734 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6736 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6737 ilt_cli.client_num = ILT_CLIENT_TM;
6739 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6742 /* this assumes that reset_port() called before reset_func()*/
6744 bnx2x_pf_disable(bp);
6749 static void bnx2x_reset_port(struct bnx2x *bp)
6751 int port = BP_PORT(bp);
6754 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6756 /* Do not rcv packets to BRB */
6757 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6758 /* Do not direct rcv packets that are not for MCP to the BRB */
6759 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6760 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6763 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6766 /* Check for BRB port occupancy */
6767 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6769 DP(NETIF_MSG_IFDOWN,
6770 "BRB1 is not empty %d blocks are occupied\n", val);
6772 /* TODO: Close Doorbell port? */
6775 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6777 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6778 BP_ABS_FUNC(bp), reset_code);
6780 switch (reset_code) {
6781 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6782 bnx2x_reset_port(bp);
6783 bnx2x_reset_func(bp);
6784 bnx2x_reset_common(bp);
6787 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6788 bnx2x_reset_port(bp);
6789 bnx2x_reset_func(bp);
6792 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6793 bnx2x_reset_func(bp);
6797 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6802 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6804 int port = BP_PORT(bp);
6808 /* Wait until tx fastpath tasks complete */
6809 for_each_queue(bp, i) {
6810 struct bnx2x_fastpath *fp = &bp->fp[i];
6813 while (bnx2x_has_tx_work_unload(fp)) {
6816 BNX2X_ERR("timeout waiting for queue[%d]\n",
6818 #ifdef BNX2X_STOP_ON_ERROR
6829 /* Give HW time to discard old tx messages */
6832 if (CHIP_IS_E1(bp)) {
6833 /* invalidate mc list,
6834 * wait and poll (interrupts are off)
6836 bnx2x_invlidate_e1_mc_list(bp);
6837 bnx2x_set_eth_mac(bp, 0);
6840 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6842 bnx2x_set_eth_mac(bp, 0);
6844 for (i = 0; i < MC_HASH_SIZE; i++)
6845 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6849 /* Clear iSCSI L2 MAC */
6850 mutex_lock(&bp->cnic_mutex);
6851 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6852 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6853 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6855 mutex_unlock(&bp->cnic_mutex);
6858 if (unload_mode == UNLOAD_NORMAL)
6859 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6861 else if (bp->flags & NO_WOL_FLAG)
6862 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6865 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6866 u8 *mac_addr = bp->dev->dev_addr;
6868 /* The mac address is written to entries 1-4 to
6869 preserve entry 0 which is used by the PMF */
6870 u8 entry = (BP_E1HVN(bp) + 1)*8;
6872 val = (mac_addr[0] << 8) | mac_addr[1];
6873 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6875 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6876 (mac_addr[4] << 8) | mac_addr[5];
6877 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6879 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6882 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6884 /* Close multi and leading connections
6885 Completions for ramrods are collected in a synchronous way */
6886 for_each_queue(bp, i)
6888 if (bnx2x_stop_client(bp, i))
6889 #ifdef BNX2X_STOP_ON_ERROR
6895 rc = bnx2x_func_stop(bp);
6897 BNX2X_ERR("Function stop failed!\n");
6898 #ifdef BNX2X_STOP_ON_ERROR
6904 #ifndef BNX2X_STOP_ON_ERROR
6908 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6910 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6911 "%d, %d, %d\n", BP_PATH(bp),
6912 load_count[BP_PATH(bp)][0],
6913 load_count[BP_PATH(bp)][1],
6914 load_count[BP_PATH(bp)][2]);
6915 load_count[BP_PATH(bp)][0]--;
6916 load_count[BP_PATH(bp)][1 + port]--;
6917 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6918 "%d, %d, %d\n", BP_PATH(bp),
6919 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6920 load_count[BP_PATH(bp)][2]);
6921 if (load_count[BP_PATH(bp)][0] == 0)
6922 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6923 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6924 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6926 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6929 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6930 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6931 bnx2x__link_reset(bp);
6933 /* Disable HW interrupts, NAPI */
6934 bnx2x_netif_stop(bp, 1);
6939 /* Reset the chip */
6940 bnx2x_reset_chip(bp, reset_code);
6942 /* Report UNLOAD_DONE to MCP */
6944 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6948 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6952 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6954 if (CHIP_IS_E1(bp)) {
6955 int port = BP_PORT(bp);
6956 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6957 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6959 val = REG_RD(bp, addr);
6961 REG_WR(bp, addr, val);
6962 } else if (CHIP_IS_E1H(bp)) {
6963 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6964 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6965 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6966 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6970 /* Close gates #2, #3 and #4: */
6971 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6975 /* Gates #2 and #4a are closed/opened for "not E1" only */
6976 if (!CHIP_IS_E1(bp)) {
6978 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6979 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6980 close ? (val | 0x1) : (val & (~(u32)1)));
6982 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6983 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6984 close ? (val | 0x1) : (val & (~(u32)1)));
6988 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6989 val = REG_RD(bp, addr);
6990 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6992 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6993 close ? "closing" : "opening");
6997 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6999 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7001 /* Do some magic... */
7002 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7003 *magic_val = val & SHARED_MF_CLP_MAGIC;
7004 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7007 /* Restore the value of the `magic' bit.
7009 * @param pdev Device handle.
7010 * @param magic_val Old value of the `magic' bit.
7012 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7014 /* Restore the `magic' bit value... */
7015 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7016 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7017 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7021 * Prepares for MCP reset: takes care of CLP configurations.
7024 * @param magic_val Old value of 'magic' bit.
7026 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7029 u32 validity_offset;
7031 DP(NETIF_MSG_HW, "Starting\n");
7033 /* Set `magic' bit in order to save MF config */
7034 if (!CHIP_IS_E1(bp))
7035 bnx2x_clp_reset_prep(bp, magic_val);
7037 /* Get shmem offset */
7038 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7039 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7041 /* Clear validity map flags */
7043 REG_WR(bp, shmem + validity_offset, 0);
7046 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7047 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7049 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7050 * depending on the HW type.
7054 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7056 /* special handling for emulation and FPGA,
7057 wait 10 times longer */
7058 if (CHIP_REV_IS_SLOW(bp))
7059 msleep(MCP_ONE_TIMEOUT*10);
7061 msleep(MCP_ONE_TIMEOUT);
7064 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7066 u32 shmem, cnt, validity_offset, val;
7071 /* Get shmem offset */
7072 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7074 BNX2X_ERR("Shmem 0 return failure\n");
7079 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7081 /* Wait for MCP to come up */
7082 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7083 /* TBD: its best to check validity map of last port.
7084 * currently checks on port 0.
7086 val = REG_RD(bp, shmem + validity_offset);
7087 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7088 shmem + validity_offset, val);
7090 /* check that shared memory is valid. */
7091 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7092 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7095 bnx2x_mcp_wait_one(bp);
7098 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7100 /* Check that shared memory is valid. This indicates that MCP is up. */
7101 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7102 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7103 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7109 /* Restore the `magic' bit value */
7110 if (!CHIP_IS_E1(bp))
7111 bnx2x_clp_reset_done(bp, magic_val);
7116 static void bnx2x_pxp_prep(struct bnx2x *bp)
7118 if (!CHIP_IS_E1(bp)) {
7119 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7120 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7121 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7127 * Reset the whole chip except for:
7129 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7132 * - MISC (including AEU)
7136 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7138 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7141 MISC_REGISTERS_RESET_REG_1_RST_HC |
7142 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7143 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7146 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7147 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7148 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7149 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7150 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7151 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7152 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7153 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7155 reset_mask1 = 0xffffffff;
7158 reset_mask2 = 0xffff;
7160 reset_mask2 = 0x1ffff;
7162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7163 reset_mask1 & (~not_reset_mask1));
7164 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7165 reset_mask2 & (~not_reset_mask2));
7170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7171 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7175 static int bnx2x_process_kill(struct bnx2x *bp)
7179 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7182 /* Empty the Tetris buffer, wait for 1s */
7184 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7185 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7186 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7187 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7188 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7189 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7190 ((port_is_idle_0 & 0x1) == 0x1) &&
7191 ((port_is_idle_1 & 0x1) == 0x1) &&
7192 (pgl_exp_rom2 == 0xffffffff))
7195 } while (cnt-- > 0);
7198 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7200 " outstanding read requests after 1s!\n");
7201 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7202 " port_is_idle_0=0x%08x,"
7203 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7204 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7211 /* Close gates #2, #3 and #4 */
7212 bnx2x_set_234_gates(bp, true);
7214 /* TBD: Indicate that "process kill" is in progress to MCP */
7216 /* Clear "unprepared" bit */
7217 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7220 /* Make sure all is written to the chip before the reset */
7223 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7224 * PSWHST, GRC and PSWRD Tetris buffer.
7228 /* Prepare to chip reset: */
7230 bnx2x_reset_mcp_prep(bp, &val);
7236 /* reset the chip */
7237 bnx2x_process_kill_chip_reset(bp);
7240 /* Recover after reset: */
7242 if (bnx2x_reset_mcp_comp(bp, val))
7248 /* Open the gates #2, #3 and #4 */
7249 bnx2x_set_234_gates(bp, false);
7251 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7252 * reset state, re-enable attentions. */
7257 static int bnx2x_leader_reset(struct bnx2x *bp)
7260 /* Try to recover after the failure */
7261 if (bnx2x_process_kill(bp)) {
7262 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7265 goto exit_leader_reset;
7268 /* Clear "reset is in progress" bit and update the driver state */
7269 bnx2x_set_reset_done(bp);
7270 bp->recovery_state = BNX2X_RECOVERY_DONE;
7274 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7279 /* Assumption: runs under rtnl lock. This together with the fact
7280 * that it's called only from bnx2x_reset_task() ensure that it
7281 * will never be called when netif_running(bp->dev) is false.
7283 static void bnx2x_parity_recover(struct bnx2x *bp)
7285 DP(NETIF_MSG_HW, "Handling parity\n");
7287 switch (bp->recovery_state) {
7288 case BNX2X_RECOVERY_INIT:
7289 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7290 /* Try to get a LEADER_LOCK HW lock */
7291 if (bnx2x_trylock_hw_lock(bp,
7292 HW_LOCK_RESOURCE_RESERVED_08))
7295 /* Stop the driver */
7296 /* If interface has been removed - break */
7297 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7300 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7301 /* Ensure "is_leader" and "recovery_state"
7302 * update values are seen on other CPUs
7307 case BNX2X_RECOVERY_WAIT:
7308 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7309 if (bp->is_leader) {
7310 u32 load_counter = bnx2x_get_load_cnt(bp);
7312 /* Wait until all other functions get
7315 schedule_delayed_work(&bp->reset_task,
7319 /* If all other functions got down -
7320 * try to bring the chip back to
7321 * normal. In any case it's an exit
7322 * point for a leader.
7324 if (bnx2x_leader_reset(bp) ||
7325 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7326 printk(KERN_ERR"%s: Recovery "
7327 "has failed. Power cycle is "
7328 "needed.\n", bp->dev->name);
7329 /* Disconnect this device */
7330 netif_device_detach(bp->dev);
7331 /* Block ifup for all function
7332 * of this ASIC until
7333 * "process kill" or power
7336 bnx2x_set_reset_in_progress(bp);
7337 /* Shut down the power */
7338 bnx2x_set_power_state(bp,
7345 } else { /* non-leader */
7346 if (!bnx2x_reset_is_done(bp)) {
7347 /* Try to get a LEADER_LOCK HW lock as
7348 * long as a former leader may have
7349 * been unloaded by the user or
7350 * released a leadership by another
7353 if (bnx2x_trylock_hw_lock(bp,
7354 HW_LOCK_RESOURCE_RESERVED_08)) {
7355 /* I'm a leader now! Restart a
7362 schedule_delayed_work(&bp->reset_task,
7366 } else { /* A leader has completed
7367 * the "process kill". It's an exit
7368 * point for a non-leader.
7370 bnx2x_nic_load(bp, LOAD_NORMAL);
7371 bp->recovery_state =
7372 BNX2X_RECOVERY_DONE;
7383 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7384 * scheduled on a general queue in order to prevent a dead lock.
7386 static void bnx2x_reset_task(struct work_struct *work)
7388 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7390 #ifdef BNX2X_STOP_ON_ERROR
7391 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7392 " so reset not done to allow debug dump,\n"
7393 KERN_ERR " you will need to reboot when done\n");
7399 if (!netif_running(bp->dev))
7400 goto reset_task_exit;
7402 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7403 bnx2x_parity_recover(bp);
7405 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7406 bnx2x_nic_load(bp, LOAD_NORMAL);
7413 /* end of nic load/unload */
7416 * Init service functions
7419 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7421 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7422 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7423 return base + (BP_ABS_FUNC(bp)) * stride;
7426 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7428 u32 reg = bnx2x_get_pretend_reg(bp);
7430 /* Flush all outstanding writes */
7433 /* Pretend to be function 0 */
7435 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
7437 /* From now we are in the "like-E1" mode */
7438 bnx2x_int_disable(bp);
7440 /* Flush all outstanding writes */
7443 /* Restore the original function */
7444 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7448 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7451 bnx2x_int_disable(bp);
7453 bnx2x_undi_int_disable_e1h(bp);
7456 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7460 /* Check if there is any driver already loaded */
7461 val = REG_RD(bp, MISC_REG_UNPREPARED);
7463 /* Check if it is the UNDI driver
7464 * UNDI driver initializes CID offset for normal bell to 0x7
7466 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7467 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7469 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7470 /* save our pf_num */
7471 int orig_pf_num = bp->pf_num;
7475 /* clear the UNDI indication */
7476 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7478 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7480 /* try unload UNDI on port 0 */
7483 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7484 DRV_MSG_SEQ_NUMBER_MASK);
7485 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7487 /* if UNDI is loaded on the other port */
7488 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7490 /* send "DONE" for previous unload */
7491 bnx2x_fw_command(bp,
7492 DRV_MSG_CODE_UNLOAD_DONE, 0);
7494 /* unload UNDI on port 1 */
7497 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7498 DRV_MSG_SEQ_NUMBER_MASK);
7499 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7501 bnx2x_fw_command(bp, reset_code, 0);
7504 /* now it's safe to release the lock */
7505 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7507 bnx2x_undi_int_disable(bp);
7509 /* close input traffic and wait for it */
7510 /* Do not rcv packets to BRB */
7512 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7513 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7514 /* Do not direct rcv packets that are not for MCP to
7517 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7518 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7521 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7522 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7525 /* save NIG port swap info */
7526 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7527 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7530 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7533 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7535 /* take the NIG out of reset and restore swap values */
7537 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7538 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7539 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7540 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7542 /* send unload done to the MCP */
7543 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7545 /* restore our func and fw_seq */
7546 bp->pf_num = orig_pf_num;
7548 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7549 DRV_MSG_SEQ_NUMBER_MASK);
7551 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7555 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7557 u32 val, val2, val3, val4, id;
7560 /* Get the chip revision id and number. */
7561 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7562 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7563 id = ((val & 0xffff) << 16);
7564 val = REG_RD(bp, MISC_REG_CHIP_REV);
7565 id |= ((val & 0xf) << 12);
7566 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7567 id |= ((val & 0xff) << 4);
7568 val = REG_RD(bp, MISC_REG_BOND_ID);
7570 bp->common.chip_id = id;
7572 /* Set doorbell size */
7573 bp->db_size = (1 << BNX2X_DB_SHIFT);
7575 if (CHIP_IS_E2(bp)) {
7576 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7578 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7580 val = (val >> 1) & 1;
7581 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7583 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7586 if (CHIP_MODE_IS_4_PORT(bp))
7587 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7589 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7591 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7592 bp->pfid = bp->pf_num; /* 0..7 */
7596 * set base FW non-default (fast path) status block id, this value is
7597 * used to initialize the fw_sb_id saved on the fp/queue structure to
7598 * determine the id used by the FW.
7600 if (CHIP_IS_E1x(bp))
7601 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7603 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7605 bp->link_params.chip_id = bp->common.chip_id;
7606 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7608 val = (REG_RD(bp, 0x2874) & 0x55);
7609 if ((bp->common.chip_id & 0x1) ||
7610 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7611 bp->flags |= ONE_PORT_FLAG;
7612 BNX2X_DEV_INFO("single port device\n");
7615 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7616 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7617 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7618 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7619 bp->common.flash_size, bp->common.flash_size);
7621 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7622 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7623 MISC_REG_GENERIC_CR_1 :
7624 MISC_REG_GENERIC_CR_0));
7625 bp->link_params.shmem_base = bp->common.shmem_base;
7626 bp->link_params.shmem2_base = bp->common.shmem2_base;
7627 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7628 bp->common.shmem_base, bp->common.shmem2_base);
7630 if (!bp->common.shmem_base) {
7631 BNX2X_DEV_INFO("MCP not active\n");
7632 bp->flags |= NO_MCP_FLAG;
7636 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7637 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7638 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7639 BNX2X_ERR("BAD MCP validity signature\n");
7641 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7642 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7644 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7645 SHARED_HW_CFG_LED_MODE_MASK) >>
7646 SHARED_HW_CFG_LED_MODE_SHIFT);
7648 bp->link_params.feature_config_flags = 0;
7649 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7650 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7651 bp->link_params.feature_config_flags |=
7652 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7654 bp->link_params.feature_config_flags &=
7655 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7657 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7658 bp->common.bc_ver = val;
7659 BNX2X_DEV_INFO("bc_ver %X\n", val);
7660 if (val < BNX2X_BC_VER) {
7661 /* for now only warn
7662 * later we might need to enforce this */
7663 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7664 "please upgrade BC\n", BNX2X_BC_VER, val);
7666 bp->link_params.feature_config_flags |=
7667 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7668 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7670 bp->link_params.feature_config_flags |=
7671 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7672 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7674 if (BP_E1HVN(bp) == 0) {
7675 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7676 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7678 /* no WOL capability for E1HVN != 0 */
7679 bp->flags |= NO_WOL_FLAG;
7681 BNX2X_DEV_INFO("%sWoL capable\n",
7682 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7684 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7685 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7686 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7687 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7689 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7690 val, val2, val3, val4);
7693 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7694 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7696 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7698 int pfid = BP_FUNC(bp);
7699 int vn = BP_E1HVN(bp);
7704 bp->igu_base_sb = 0xff;
7706 if (CHIP_INT_MODE_IS_BC(bp)) {
7707 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7710 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7713 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7714 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7719 /* IGU in normal mode - read CAM */
7720 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7722 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7723 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7726 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7727 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7729 if (IGU_VEC(val) == 0)
7730 /* default status block */
7731 bp->igu_dsb_id = igu_sb_id;
7733 if (bp->igu_base_sb == 0xff)
7734 bp->igu_base_sb = igu_sb_id;
7739 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7740 if (bp->igu_sb_cnt == 0)
7741 BNX2X_ERR("CAM configuration error\n");
7744 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7747 int cfg_size = 0, idx, port = BP_PORT(bp);
7749 /* Aggregation of supported attributes of all external phys */
7750 bp->port.supported[0] = 0;
7751 bp->port.supported[1] = 0;
7752 switch (bp->link_params.num_phys) {
7754 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7758 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7762 if (bp->link_params.multi_phy_config &
7763 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7764 bp->port.supported[1] =
7765 bp->link_params.phy[EXT_PHY1].supported;
7766 bp->port.supported[0] =
7767 bp->link_params.phy[EXT_PHY2].supported;
7769 bp->port.supported[0] =
7770 bp->link_params.phy[EXT_PHY1].supported;
7771 bp->port.supported[1] =
7772 bp->link_params.phy[EXT_PHY2].supported;
7778 if (!(bp->port.supported[0] || bp->port.supported[1])) {
7779 BNX2X_ERR("NVRAM config error. BAD phy config."
7780 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7782 dev_info.port_hw_config[port].external_phy_config),
7784 dev_info.port_hw_config[port].external_phy_config2));
7788 switch (switch_cfg) {
7790 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7792 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7795 case SWITCH_CFG_10G:
7796 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7798 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7802 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7803 bp->port.link_config[0]);
7806 /* mask what we support according to speed_cap_mask per configuration */
7807 for (idx = 0; idx < cfg_size; idx++) {
7808 if (!(bp->link_params.speed_cap_mask[idx] &
7809 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7810 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7812 if (!(bp->link_params.speed_cap_mask[idx] &
7813 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7814 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7816 if (!(bp->link_params.speed_cap_mask[idx] &
7817 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7818 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7820 if (!(bp->link_params.speed_cap_mask[idx] &
7821 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7822 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7824 if (!(bp->link_params.speed_cap_mask[idx] &
7825 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7826 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7827 SUPPORTED_1000baseT_Full);
7829 if (!(bp->link_params.speed_cap_mask[idx] &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7831 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7833 if (!(bp->link_params.speed_cap_mask[idx] &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7835 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7839 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7840 bp->port.supported[1]);
7843 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7845 u32 link_config, idx, cfg_size = 0;
7846 bp->port.advertising[0] = 0;
7847 bp->port.advertising[1] = 0;
7848 switch (bp->link_params.num_phys) {
7857 for (idx = 0; idx < cfg_size; idx++) {
7858 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7859 link_config = bp->port.link_config[idx];
7860 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7861 case PORT_FEATURE_LINK_SPEED_AUTO:
7862 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7863 bp->link_params.req_line_speed[idx] =
7865 bp->port.advertising[idx] |=
7866 bp->port.supported[idx];
7868 /* force 10G, no AN */
7869 bp->link_params.req_line_speed[idx] =
7871 bp->port.advertising[idx] |=
7872 (ADVERTISED_10000baseT_Full |
7878 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7879 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7880 bp->link_params.req_line_speed[idx] =
7882 bp->port.advertising[idx] |=
7883 (ADVERTISED_10baseT_Full |
7886 BNX2X_ERROR("NVRAM config error. "
7887 "Invalid link_config 0x%x"
7888 " speed_cap_mask 0x%x\n",
7890 bp->link_params.speed_cap_mask[idx]);
7895 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7896 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7897 bp->link_params.req_line_speed[idx] =
7899 bp->link_params.req_duplex[idx] =
7901 bp->port.advertising[idx] |=
7902 (ADVERTISED_10baseT_Half |
7905 BNX2X_ERROR("NVRAM config error. "
7906 "Invalid link_config 0x%x"
7907 " speed_cap_mask 0x%x\n",
7909 bp->link_params.speed_cap_mask[idx]);
7914 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7915 if (bp->port.supported[idx] &
7916 SUPPORTED_100baseT_Full) {
7917 bp->link_params.req_line_speed[idx] =
7919 bp->port.advertising[idx] |=
7920 (ADVERTISED_100baseT_Full |
7923 BNX2X_ERROR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
7927 bp->link_params.speed_cap_mask[idx]);
7932 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7933 if (bp->port.supported[idx] &
7934 SUPPORTED_100baseT_Half) {
7935 bp->link_params.req_line_speed[idx] =
7937 bp->link_params.req_duplex[idx] =
7939 bp->port.advertising[idx] |=
7940 (ADVERTISED_100baseT_Half |
7943 BNX2X_ERROR("NVRAM config error. "
7944 "Invalid link_config 0x%x"
7945 " speed_cap_mask 0x%x\n",
7947 bp->link_params.speed_cap_mask[idx]);
7952 case PORT_FEATURE_LINK_SPEED_1G:
7953 if (bp->port.supported[idx] &
7954 SUPPORTED_1000baseT_Full) {
7955 bp->link_params.req_line_speed[idx] =
7957 bp->port.advertising[idx] |=
7958 (ADVERTISED_1000baseT_Full |
7961 BNX2X_ERROR("NVRAM config error. "
7962 "Invalid link_config 0x%x"
7963 " speed_cap_mask 0x%x\n",
7965 bp->link_params.speed_cap_mask[idx]);
7970 case PORT_FEATURE_LINK_SPEED_2_5G:
7971 if (bp->port.supported[idx] &
7972 SUPPORTED_2500baseX_Full) {
7973 bp->link_params.req_line_speed[idx] =
7975 bp->port.advertising[idx] |=
7976 (ADVERTISED_2500baseX_Full |
7979 BNX2X_ERROR("NVRAM config error. "
7980 "Invalid link_config 0x%x"
7981 " speed_cap_mask 0x%x\n",
7983 bp->link_params.speed_cap_mask[idx]);
7988 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7989 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7990 case PORT_FEATURE_LINK_SPEED_10G_KR:
7991 if (bp->port.supported[idx] &
7992 SUPPORTED_10000baseT_Full) {
7993 bp->link_params.req_line_speed[idx] =
7995 bp->port.advertising[idx] |=
7996 (ADVERTISED_10000baseT_Full |
7999 BNX2X_ERROR("NVRAM config error. "
8000 "Invalid link_config 0x%x"
8001 " speed_cap_mask 0x%x\n",
8003 bp->link_params.speed_cap_mask[idx]);
8009 BNX2X_ERROR("NVRAM config error. "
8010 "BAD link speed link_config 0x%x\n",
8012 bp->link_params.req_line_speed[idx] =
8014 bp->port.advertising[idx] =
8015 bp->port.supported[idx];
8019 bp->link_params.req_flow_ctrl[idx] = (link_config &
8020 PORT_FEATURE_FLOW_CONTROL_MASK);
8021 if ((bp->link_params.req_flow_ctrl[idx] ==
8022 BNX2X_FLOW_CTRL_AUTO) &&
8023 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8024 bp->link_params.req_flow_ctrl[idx] =
8025 BNX2X_FLOW_CTRL_NONE;
8028 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8029 " 0x%x advertising 0x%x\n",
8030 bp->link_params.req_line_speed[idx],
8031 bp->link_params.req_duplex[idx],
8032 bp->link_params.req_flow_ctrl[idx],
8033 bp->port.advertising[idx]);
8037 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8039 mac_hi = cpu_to_be16(mac_hi);
8040 mac_lo = cpu_to_be32(mac_lo);
8041 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8042 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8045 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8047 int port = BP_PORT(bp);
8050 u32 ext_phy_type, ext_phy_config;;
8052 bp->link_params.bp = bp;
8053 bp->link_params.port = port;
8055 bp->link_params.lane_config =
8056 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8058 bp->link_params.speed_cap_mask[0] =
8060 dev_info.port_hw_config[port].speed_capability_mask);
8061 bp->link_params.speed_cap_mask[1] =
8063 dev_info.port_hw_config[port].speed_capability_mask2);
8064 bp->port.link_config[0] =
8065 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8067 bp->port.link_config[1] =
8068 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8070 bp->link_params.multi_phy_config =
8071 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8072 /* If the device is capable of WoL, set the default state according
8075 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8076 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8077 (config & PORT_FEATURE_WOL_ENABLED));
8079 BNX2X_DEV_INFO("lane_config 0x%08x "
8080 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8081 bp->link_params.lane_config,
8082 bp->link_params.speed_cap_mask[0],
8083 bp->port.link_config[0]);
8085 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8086 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8087 bnx2x_phy_probe(&bp->link_params);
8088 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8090 bnx2x_link_settings_requested(bp);
8093 * If connected directly, work with the internal PHY, otherwise, work
8094 * with the external PHY
8098 dev_info.port_hw_config[port].external_phy_config);
8099 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8100 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8101 bp->mdio.prtad = bp->port.phy_addr;
8103 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8104 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8106 XGXS_EXT_PHY_ADDR(ext_phy_config);
8108 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8109 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8110 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8111 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8112 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8115 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8116 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8117 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8121 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8123 int func = BP_ABS_FUNC(bp);
8128 bnx2x_get_common_hwinfo(bp);
8130 if (CHIP_IS_E1x(bp)) {
8131 bp->common.int_block = INT_BLOCK_HC;
8133 bp->igu_dsb_id = DEF_SB_IGU_ID;
8134 bp->igu_base_sb = 0;
8135 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8137 bp->common.int_block = INT_BLOCK_IGU;
8138 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8139 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8140 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8141 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8143 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8145 bnx2x_get_igu_cam_info(bp);
8148 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8149 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8152 * Initialize MF configuration
8158 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8159 if (SHMEM2_HAS(bp, mf_cfg_addr))
8160 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8162 bp->common.mf_cfg_base = bp->common.shmem_base +
8163 offsetof(struct shmem_region, func_mb) +
8164 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8166 MF_CFG_RD(bp, func_mf_config[func].config);
8168 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8169 FUNC_MF_CFG_E1HOV_TAG_MASK);
8170 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8172 BNX2X_DEV_INFO("%s function mode\n",
8173 IS_MF(bp) ? "multi" : "single");
8176 val = (MF_CFG_RD(bp, func_mf_config[func].
8178 FUNC_MF_CFG_E1HOV_TAG_MASK);
8179 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8181 BNX2X_DEV_INFO("MF OV for func %d is %d "
8183 func, bp->mf_ov, bp->mf_ov);
8185 BNX2X_ERROR("No valid MF OV for func %d,"
8186 " aborting\n", func);
8191 BNX2X_ERROR("VN %d in single function mode,"
8192 " aborting\n", BP_E1HVN(bp));
8198 /* adjust igu_sb_cnt to MF for E1x */
8199 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8200 bp->igu_sb_cnt /= E1HVN_MAX;
8203 * adjust E2 sb count: to be removed when FW will support
8204 * more then 16 L2 clients
8206 #define MAX_L2_CLIENTS 16
8208 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8209 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8211 if (!BP_NOMCP(bp)) {
8212 bnx2x_get_port_hwinfo(bp);
8215 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8216 DRV_MSG_SEQ_NUMBER_MASK);
8217 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8221 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8222 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8223 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8224 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8225 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8226 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8227 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8228 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8229 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8230 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8231 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8233 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8241 /* only supposed to happen on emulation/FPGA */
8242 BNX2X_ERROR("warning: random MAC workaround active\n");
8243 random_ether_addr(bp->dev->dev_addr);
8244 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8250 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8252 int cnt, i, block_end, rodi;
8253 char vpd_data[BNX2X_VPD_LEN+1];
8254 char str_id_reg[VENDOR_ID_LEN+1];
8255 char str_id_cap[VENDOR_ID_LEN+1];
8258 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8259 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8261 if (cnt < BNX2X_VPD_LEN)
8264 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8265 PCI_VPD_LRDT_RO_DATA);
8270 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8271 pci_vpd_lrdt_size(&vpd_data[i]);
8273 i += PCI_VPD_LRDT_TAG_SIZE;
8275 if (block_end > BNX2X_VPD_LEN)
8278 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8279 PCI_VPD_RO_KEYWORD_MFR_ID);
8283 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8285 if (len != VENDOR_ID_LEN)
8288 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8290 /* vendor specific info */
8291 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8292 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8293 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8294 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8296 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8297 PCI_VPD_RO_KEYWORD_VENDOR0);
8299 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8301 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8303 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8304 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8305 bp->fw_ver[len] = ' ';
8314 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8320 /* Disable interrupt handling until HW is initialized */
8321 atomic_set(&bp->intr_sem, 1);
8322 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8324 mutex_init(&bp->port.phy_mutex);
8325 mutex_init(&bp->fw_mb_mutex);
8326 spin_lock_init(&bp->stats_lock);
8328 mutex_init(&bp->cnic_mutex);
8331 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8332 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8334 rc = bnx2x_get_hwinfo(bp);
8337 rc = bnx2x_alloc_mem_bp(bp);
8339 bnx2x_read_fwinfo(bp);
8343 /* need to reset chip if undi was active */
8345 bnx2x_undi_unload(bp);
8347 if (CHIP_REV_IS_FPGA(bp))
8348 dev_err(&bp->pdev->dev, "FPGA detected\n");
8350 if (BP_NOMCP(bp) && (func == 0))
8351 dev_err(&bp->pdev->dev, "MCP disabled, "
8352 "must load devices in order!\n");
8354 /* Set multi queue mode */
8355 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8356 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8357 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8358 "requested is not MSI-X\n");
8359 multi_mode = ETH_RSS_MODE_DISABLED;
8361 bp->multi_mode = multi_mode;
8362 bp->int_mode = int_mode;
8364 bp->dev->features |= NETIF_F_GRO;
8368 bp->flags &= ~TPA_ENABLE_FLAG;
8369 bp->dev->features &= ~NETIF_F_LRO;
8371 bp->flags |= TPA_ENABLE_FLAG;
8372 bp->dev->features |= NETIF_F_LRO;
8374 bp->disable_tpa = disable_tpa;
8377 bp->dropless_fc = 0;
8379 bp->dropless_fc = dropless_fc;
8383 bp->tx_ring_size = MAX_TX_AVAIL;
8387 /* make sure that the numbers are in the right granularity */
8388 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8389 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8391 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8392 bp->current_interval = (poll ? poll : timer_interval);
8394 init_timer(&bp->timer);
8395 bp->timer.expires = jiffies + bp->current_interval;
8396 bp->timer.data = (unsigned long) bp;
8397 bp->timer.function = bnx2x_timer;
8403 /****************************************************************************
8404 * General service functions
8405 ****************************************************************************/
8407 /* called with rtnl_lock */
8408 static int bnx2x_open(struct net_device *dev)
8410 struct bnx2x *bp = netdev_priv(dev);
8412 netif_carrier_off(dev);
8414 bnx2x_set_power_state(bp, PCI_D0);
8416 if (!bnx2x_reset_is_done(bp)) {
8418 /* Reset MCP mail box sequence if there is on going
8423 /* If it's the first function to load and reset done
8424 * is still not cleared it may mean that. We don't
8425 * check the attention state here because it may have
8426 * already been cleared by a "common" reset but we
8427 * shell proceed with "process kill" anyway.
8429 if ((bnx2x_get_load_cnt(bp) == 0) &&
8430 bnx2x_trylock_hw_lock(bp,
8431 HW_LOCK_RESOURCE_RESERVED_08) &&
8432 (!bnx2x_leader_reset(bp))) {
8433 DP(NETIF_MSG_HW, "Recovered in open\n");
8437 bnx2x_set_power_state(bp, PCI_D3hot);
8439 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8440 " completed yet. Try again later. If u still see this"
8441 " message after a few retries then power cycle is"
8442 " required.\n", bp->dev->name);
8448 bp->recovery_state = BNX2X_RECOVERY_DONE;
8450 return bnx2x_nic_load(bp, LOAD_OPEN);
8453 /* called with rtnl_lock */
8454 static int bnx2x_close(struct net_device *dev)
8456 struct bnx2x *bp = netdev_priv(dev);
8458 /* Unload the driver, release IRQs */
8459 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8460 bnx2x_set_power_state(bp, PCI_D3hot);
8465 /* called with netif_tx_lock from dev_mcast.c */
8466 void bnx2x_set_rx_mode(struct net_device *dev)
8468 struct bnx2x *bp = netdev_priv(dev);
8469 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8470 int port = BP_PORT(bp);
8472 if (bp->state != BNX2X_STATE_OPEN) {
8473 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8477 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8479 if (dev->flags & IFF_PROMISC)
8480 rx_mode = BNX2X_RX_MODE_PROMISC;
8481 else if ((dev->flags & IFF_ALLMULTI) ||
8482 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8484 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8485 else { /* some multicasts */
8486 if (CHIP_IS_E1(bp)) {
8488 * set mc list, do not wait as wait implies sleep
8489 * and set_rx_mode can be invoked from non-sleepable
8492 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8493 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8494 BNX2X_MAX_MULTICAST*(1 + port));
8496 bnx2x_set_e1_mc_list(bp, offset);
8498 /* Accept one or more multicasts */
8499 struct netdev_hw_addr *ha;
8500 u32 mc_filter[MC_HASH_SIZE];
8501 u32 crc, bit, regidx;
8504 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8506 netdev_for_each_mc_addr(ha, dev) {
8507 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8510 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8512 bit = (crc >> 24) & 0xff;
8515 mc_filter[regidx] |= (1 << bit);
8518 for (i = 0; i < MC_HASH_SIZE; i++)
8519 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8524 bp->rx_mode = rx_mode;
8525 bnx2x_set_storm_rx_mode(bp);
8528 /* called with rtnl_lock */
8529 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8530 int devad, u16 addr)
8532 struct bnx2x *bp = netdev_priv(netdev);
8536 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8537 prtad, devad, addr);
8539 /* The HW expects different devad if CL22 is used */
8540 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8542 bnx2x_acquire_phy_lock(bp);
8543 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8544 bnx2x_release_phy_lock(bp);
8545 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8552 /* called with rtnl_lock */
8553 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8554 u16 addr, u16 value)
8556 struct bnx2x *bp = netdev_priv(netdev);
8559 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8560 " value 0x%x\n", prtad, devad, addr, value);
8562 /* The HW expects different devad if CL22 is used */
8563 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8565 bnx2x_acquire_phy_lock(bp);
8566 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8567 bnx2x_release_phy_lock(bp);
8571 /* called with rtnl_lock */
8572 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8574 struct bnx2x *bp = netdev_priv(dev);
8575 struct mii_ioctl_data *mdio = if_mii(ifr);
8577 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8578 mdio->phy_id, mdio->reg_num, mdio->val_in);
8580 if (!netif_running(dev))
8583 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8586 #ifdef CONFIG_NET_POLL_CONTROLLER
8587 static void poll_bnx2x(struct net_device *dev)
8589 struct bnx2x *bp = netdev_priv(dev);
8591 disable_irq(bp->pdev->irq);
8592 bnx2x_interrupt(bp->pdev->irq, dev);
8593 enable_irq(bp->pdev->irq);
8597 static const struct net_device_ops bnx2x_netdev_ops = {
8598 .ndo_open = bnx2x_open,
8599 .ndo_stop = bnx2x_close,
8600 .ndo_start_xmit = bnx2x_start_xmit,
8601 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8602 .ndo_set_mac_address = bnx2x_change_mac_addr,
8603 .ndo_validate_addr = eth_validate_addr,
8604 .ndo_do_ioctl = bnx2x_ioctl,
8605 .ndo_change_mtu = bnx2x_change_mtu,
8606 .ndo_tx_timeout = bnx2x_tx_timeout,
8608 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8610 #ifdef CONFIG_NET_POLL_CONTROLLER
8611 .ndo_poll_controller = poll_bnx2x,
8615 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8616 struct net_device *dev)
8621 SET_NETDEV_DEV(dev, &pdev->dev);
8622 bp = netdev_priv(dev);
8627 bp->pf_num = PCI_FUNC(pdev->devfn);
8629 rc = pci_enable_device(pdev);
8631 dev_err(&bp->pdev->dev,
8632 "Cannot enable PCI device, aborting\n");
8636 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8637 dev_err(&bp->pdev->dev,
8638 "Cannot find PCI device base address, aborting\n");
8640 goto err_out_disable;
8643 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8644 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8645 " base address, aborting\n");
8647 goto err_out_disable;
8650 if (atomic_read(&pdev->enable_cnt) == 1) {
8651 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8653 dev_err(&bp->pdev->dev,
8654 "Cannot obtain PCI resources, aborting\n");
8655 goto err_out_disable;
8658 pci_set_master(pdev);
8659 pci_save_state(pdev);
8662 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8663 if (bp->pm_cap == 0) {
8664 dev_err(&bp->pdev->dev,
8665 "Cannot find power management capability, aborting\n");
8667 goto err_out_release;
8670 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8671 if (bp->pcie_cap == 0) {
8672 dev_err(&bp->pdev->dev,
8673 "Cannot find PCI Express capability, aborting\n");
8675 goto err_out_release;
8678 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8679 bp->flags |= USING_DAC_FLAG;
8680 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8681 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8682 " failed, aborting\n");
8684 goto err_out_release;
8687 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8688 dev_err(&bp->pdev->dev,
8689 "System does not support DMA, aborting\n");
8691 goto err_out_release;
8694 dev->mem_start = pci_resource_start(pdev, 0);
8695 dev->base_addr = dev->mem_start;
8696 dev->mem_end = pci_resource_end(pdev, 0);
8698 dev->irq = pdev->irq;
8700 bp->regview = pci_ioremap_bar(pdev, 0);
8702 dev_err(&bp->pdev->dev,
8703 "Cannot map register space, aborting\n");
8705 goto err_out_release;
8708 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8709 min_t(u64, BNX2X_DB_SIZE(bp),
8710 pci_resource_len(pdev, 2)));
8711 if (!bp->doorbells) {
8712 dev_err(&bp->pdev->dev,
8713 "Cannot map doorbell space, aborting\n");
8718 bnx2x_set_power_state(bp, PCI_D0);
8720 /* clean indirect addresses */
8721 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8722 PCICFG_VENDOR_ID_OFFSET);
8723 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8724 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8725 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8726 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8728 /* Reset the load counter */
8729 bnx2x_clear_load_cnt(bp);
8731 dev->watchdog_timeo = TX_TIMEOUT;
8733 dev->netdev_ops = &bnx2x_netdev_ops;
8734 bnx2x_set_ethtool_ops(dev);
8735 dev->features |= NETIF_F_SG;
8736 dev->features |= NETIF_F_HW_CSUM;
8737 if (bp->flags & USING_DAC_FLAG)
8738 dev->features |= NETIF_F_HIGHDMA;
8739 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8740 dev->features |= NETIF_F_TSO6;
8742 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8743 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
8745 dev->vlan_features |= NETIF_F_SG;
8746 dev->vlan_features |= NETIF_F_HW_CSUM;
8747 if (bp->flags & USING_DAC_FLAG)
8748 dev->vlan_features |= NETIF_F_HIGHDMA;
8749 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8750 dev->vlan_features |= NETIF_F_TSO6;
8753 /* get_port_hwinfo() will set prtad and mmds properly */
8754 bp->mdio.prtad = MDIO_PRTAD_NONE;
8756 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8758 bp->mdio.mdio_read = bnx2x_mdio_read;
8759 bp->mdio.mdio_write = bnx2x_mdio_write;
8765 iounmap(bp->regview);
8768 if (bp->doorbells) {
8769 iounmap(bp->doorbells);
8770 bp->doorbells = NULL;
8774 if (atomic_read(&pdev->enable_cnt) == 1)
8775 pci_release_regions(pdev);
8778 pci_disable_device(pdev);
8779 pci_set_drvdata(pdev, NULL);
8785 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8786 int *width, int *speed)
8788 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8790 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8792 /* return value of 1=2.5GHz 2=5GHz */
8793 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8796 static int bnx2x_check_firmware(struct bnx2x *bp)
8798 const struct firmware *firmware = bp->firmware;
8799 struct bnx2x_fw_file_hdr *fw_hdr;
8800 struct bnx2x_fw_file_section *sections;
8801 u32 offset, len, num_ops;
8806 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8809 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8810 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8812 /* Make sure none of the offsets and sizes make us read beyond
8813 * the end of the firmware data */
8814 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8815 offset = be32_to_cpu(sections[i].offset);
8816 len = be32_to_cpu(sections[i].len);
8817 if (offset + len > firmware->size) {
8818 dev_err(&bp->pdev->dev,
8819 "Section %d length is out of bounds\n", i);
8824 /* Likewise for the init_ops offsets */
8825 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8826 ops_offsets = (u16 *)(firmware->data + offset);
8827 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8829 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8830 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8831 dev_err(&bp->pdev->dev,
8832 "Section offset %d is out of bounds\n", i);
8837 /* Check FW version */
8838 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8839 fw_ver = firmware->data + offset;
8840 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8841 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8842 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8843 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8844 dev_err(&bp->pdev->dev,
8845 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8846 fw_ver[0], fw_ver[1], fw_ver[2],
8847 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8848 BCM_5710_FW_MINOR_VERSION,
8849 BCM_5710_FW_REVISION_VERSION,
8850 BCM_5710_FW_ENGINEERING_VERSION);
8857 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8859 const __be32 *source = (const __be32 *)_source;
8860 u32 *target = (u32 *)_target;
8863 for (i = 0; i < n/4; i++)
8864 target[i] = be32_to_cpu(source[i]);
8868 Ops array is stored in the following format:
8869 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8871 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8873 const __be32 *source = (const __be32 *)_source;
8874 struct raw_op *target = (struct raw_op *)_target;
8877 for (i = 0, j = 0; i < n/8; i++, j += 2) {
8878 tmp = be32_to_cpu(source[j]);
8879 target[i].op = (tmp >> 24) & 0xff;
8880 target[i].offset = tmp & 0xffffff;
8881 target[i].raw_data = be32_to_cpu(source[j + 1]);
8886 * IRO array is stored in the following format:
8887 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8889 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8891 const __be32 *source = (const __be32 *)_source;
8892 struct iro *target = (struct iro *)_target;
8895 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8896 target[i].base = be32_to_cpu(source[j]);
8898 tmp = be32_to_cpu(source[j]);
8899 target[i].m1 = (tmp >> 16) & 0xffff;
8900 target[i].m2 = tmp & 0xffff;
8902 tmp = be32_to_cpu(source[j]);
8903 target[i].m3 = (tmp >> 16) & 0xffff;
8904 target[i].size = tmp & 0xffff;
8909 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8911 const __be16 *source = (const __be16 *)_source;
8912 u16 *target = (u16 *)_target;
8915 for (i = 0; i < n/2; i++)
8916 target[i] = be16_to_cpu(source[i]);
8919 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8921 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8922 bp->arr = kmalloc(len, GFP_KERNEL); \
8924 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8927 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8928 (u8 *)bp->arr, len); \
8931 int bnx2x_init_firmware(struct bnx2x *bp)
8933 const char *fw_file_name;
8934 struct bnx2x_fw_file_hdr *fw_hdr;
8938 fw_file_name = FW_FILE_NAME_E1;
8939 else if (CHIP_IS_E1H(bp))
8940 fw_file_name = FW_FILE_NAME_E1H;
8941 else if (CHIP_IS_E2(bp))
8942 fw_file_name = FW_FILE_NAME_E2;
8944 BNX2X_ERR("Unsupported chip revision\n");
8948 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8950 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8952 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8953 goto request_firmware_exit;
8956 rc = bnx2x_check_firmware(bp);
8958 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8959 goto request_firmware_exit;
8962 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8964 /* Initialize the pointers to the init arrays */
8966 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8969 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8972 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8975 /* STORMs firmware */
8976 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8977 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8978 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8979 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8980 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8981 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8982 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8983 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8984 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8985 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8986 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8987 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8988 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8989 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8990 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8991 be32_to_cpu(fw_hdr->csem_pram_data.offset);
8993 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8998 kfree(bp->init_ops_offsets);
8999 init_offsets_alloc_err:
9000 kfree(bp->init_ops);
9002 kfree(bp->init_data);
9003 request_firmware_exit:
9004 release_firmware(bp->firmware);
9009 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9011 int cid_count = L2_FP_COUNT(l2_cid_count);
9014 cid_count += CNIC_CID_MAX;
9016 return roundup(cid_count, QM_CID_ROUND);
9019 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9020 const struct pci_device_id *ent)
9022 struct net_device *dev = NULL;
9024 int pcie_width, pcie_speed;
9027 switch (ent->driver_data) {
9031 cid_count = FP_SB_MAX_E1x;
9036 cid_count = FP_SB_MAX_E2;
9040 pr_err("Unknown board_type (%ld), aborting\n",
9045 cid_count += CNIC_CONTEXT_USE;
9047 /* dev zeroed in init_etherdev */
9048 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9050 dev_err(&pdev->dev, "Cannot allocate net device\n");
9054 bp = netdev_priv(dev);
9055 bp->msg_enable = debug;
9057 pci_set_drvdata(pdev, dev);
9059 bp->l2_cid_count = cid_count;
9061 rc = bnx2x_init_dev(pdev, dev);
9067 rc = bnx2x_init_bp(bp);
9071 /* calc qm_cid_count */
9072 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9074 rc = register_netdev(dev);
9076 dev_err(&pdev->dev, "Cannot register net device\n");
9080 /* Configure interupt mode: try to enable MSI-X/MSI if
9081 * needed, set bp->num_queues appropriately.
9083 bnx2x_set_int_mode(bp);
9085 /* Add all NAPI objects */
9086 bnx2x_add_all_napi(bp);
9088 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9090 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9091 " IRQ %d, ", board_info[ent->driver_data].name,
9092 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9094 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9095 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9096 "5GHz (Gen2)" : "2.5GHz",
9097 dev->base_addr, bp->pdev->irq);
9098 pr_cont("node addr %pM\n", dev->dev_addr);
9104 iounmap(bp->regview);
9107 iounmap(bp->doorbells);
9111 if (atomic_read(&pdev->enable_cnt) == 1)
9112 pci_release_regions(pdev);
9114 pci_disable_device(pdev);
9115 pci_set_drvdata(pdev, NULL);
9120 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9122 struct net_device *dev = pci_get_drvdata(pdev);
9126 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9129 bp = netdev_priv(dev);
9131 unregister_netdev(dev);
9133 /* Delete all NAPI objects */
9134 bnx2x_del_all_napi(bp);
9136 /* Disable MSI/MSI-X */
9137 bnx2x_disable_msi(bp);
9139 /* Make sure RESET task is not scheduled before continuing */
9140 cancel_delayed_work_sync(&bp->reset_task);
9143 iounmap(bp->regview);
9146 iounmap(bp->doorbells);
9148 bnx2x_free_mem_bp(bp);
9152 if (atomic_read(&pdev->enable_cnt) == 1)
9153 pci_release_regions(pdev);
9155 pci_disable_device(pdev);
9156 pci_set_drvdata(pdev, NULL);
9159 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9163 bp->state = BNX2X_STATE_ERROR;
9165 bp->rx_mode = BNX2X_RX_MODE_NONE;
9167 bnx2x_netif_stop(bp, 0);
9168 netif_carrier_off(bp->dev);
9170 del_timer_sync(&bp->timer);
9171 bp->stats_state = STATS_STATE_DISABLED;
9172 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9177 /* Free SKBs, SGEs, TPA pool and driver internals */
9178 bnx2x_free_skbs(bp);
9180 for_each_queue(bp, i)
9181 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9185 bp->state = BNX2X_STATE_CLOSED;
9190 static void bnx2x_eeh_recover(struct bnx2x *bp)
9194 mutex_init(&bp->port.phy_mutex);
9196 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9197 bp->link_params.shmem_base = bp->common.shmem_base;
9198 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9200 if (!bp->common.shmem_base ||
9201 (bp->common.shmem_base < 0xA0000) ||
9202 (bp->common.shmem_base >= 0xC0000)) {
9203 BNX2X_DEV_INFO("MCP not active\n");
9204 bp->flags |= NO_MCP_FLAG;
9208 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9209 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9210 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9211 BNX2X_ERR("BAD MCP validity signature\n");
9213 if (!BP_NOMCP(bp)) {
9215 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9216 DRV_MSG_SEQ_NUMBER_MASK);
9217 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9222 * bnx2x_io_error_detected - called when PCI error is detected
9223 * @pdev: Pointer to PCI device
9224 * @state: The current pci connection state
9226 * This function is called after a PCI bus error affecting
9227 * this device has been detected.
9229 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9230 pci_channel_state_t state)
9232 struct net_device *dev = pci_get_drvdata(pdev);
9233 struct bnx2x *bp = netdev_priv(dev);
9237 netif_device_detach(dev);
9239 if (state == pci_channel_io_perm_failure) {
9241 return PCI_ERS_RESULT_DISCONNECT;
9244 if (netif_running(dev))
9245 bnx2x_eeh_nic_unload(bp);
9247 pci_disable_device(pdev);
9251 /* Request a slot reset */
9252 return PCI_ERS_RESULT_NEED_RESET;
9256 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9257 * @pdev: Pointer to PCI device
9259 * Restart the card from scratch, as if from a cold-boot.
9261 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9263 struct net_device *dev = pci_get_drvdata(pdev);
9264 struct bnx2x *bp = netdev_priv(dev);
9268 if (pci_enable_device(pdev)) {
9270 "Cannot re-enable PCI device after reset\n");
9272 return PCI_ERS_RESULT_DISCONNECT;
9275 pci_set_master(pdev);
9276 pci_restore_state(pdev);
9278 if (netif_running(dev))
9279 bnx2x_set_power_state(bp, PCI_D0);
9283 return PCI_ERS_RESULT_RECOVERED;
9287 * bnx2x_io_resume - called when traffic can start flowing again
9288 * @pdev: Pointer to PCI device
9290 * This callback is called when the error recovery driver tells us that
9291 * its OK to resume normal operation.
9293 static void bnx2x_io_resume(struct pci_dev *pdev)
9295 struct net_device *dev = pci_get_drvdata(pdev);
9296 struct bnx2x *bp = netdev_priv(dev);
9298 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9299 printk(KERN_ERR "Handling parity error recovery. "
9300 "Try again later\n");
9306 bnx2x_eeh_recover(bp);
9308 if (netif_running(dev))
9309 bnx2x_nic_load(bp, LOAD_NORMAL);
9311 netif_device_attach(dev);
9316 static struct pci_error_handlers bnx2x_err_handler = {
9317 .error_detected = bnx2x_io_error_detected,
9318 .slot_reset = bnx2x_io_slot_reset,
9319 .resume = bnx2x_io_resume,
9322 static struct pci_driver bnx2x_pci_driver = {
9323 .name = DRV_MODULE_NAME,
9324 .id_table = bnx2x_pci_tbl,
9325 .probe = bnx2x_init_one,
9326 .remove = __devexit_p(bnx2x_remove_one),
9327 .suspend = bnx2x_suspend,
9328 .resume = bnx2x_resume,
9329 .err_handler = &bnx2x_err_handler,
9332 static int __init bnx2x_init(void)
9336 pr_info("%s", version);
9338 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9339 if (bnx2x_wq == NULL) {
9340 pr_err("Cannot create workqueue\n");
9344 ret = pci_register_driver(&bnx2x_pci_driver);
9346 pr_err("Cannot register driver\n");
9347 destroy_workqueue(bnx2x_wq);
9352 static void __exit bnx2x_cleanup(void)
9354 pci_unregister_driver(&bnx2x_pci_driver);
9356 destroy_workqueue(bnx2x_wq);
9359 module_init(bnx2x_init);
9360 module_exit(bnx2x_cleanup);
9364 /* count denotes the number of new completions we have seen */
9365 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9367 struct eth_spe *spe;
9369 #ifdef BNX2X_STOP_ON_ERROR
9370 if (unlikely(bp->panic))
9374 spin_lock_bh(&bp->spq_lock);
9375 BUG_ON(bp->cnic_spq_pending < count);
9376 bp->cnic_spq_pending -= count;
9379 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9380 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9381 & SPE_HDR_CONN_TYPE) >>
9382 SPE_HDR_CONN_TYPE_SHIFT;
9384 /* Set validation for iSCSI L2 client before sending SETUP
9387 if (type == ETH_CONNECTION_TYPE) {
9388 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9389 hdr.conn_and_cmd_data) >>
9390 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9392 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9393 bnx2x_set_ctx_validation(&bp->context.
9394 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9395 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9398 /* There may be not more than 8 L2 and COMMON SPEs and not more
9399 * than 8 L5 SPEs in the air.
9401 if ((type == NONE_CONNECTION_TYPE) ||
9402 (type == ETH_CONNECTION_TYPE)) {
9403 if (!atomic_read(&bp->spq_left))
9406 atomic_dec(&bp->spq_left);
9407 } else if (type == ISCSI_CONNECTION_TYPE) {
9408 if (bp->cnic_spq_pending >=
9409 bp->cnic_eth_dev.max_kwqe_pending)
9412 bp->cnic_spq_pending++;
9414 BNX2X_ERR("Unknown SPE type: %d\n", type);
9419 spe = bnx2x_sp_get_next(bp);
9420 *spe = *bp->cnic_kwq_cons;
9422 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9423 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9425 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9426 bp->cnic_kwq_cons = bp->cnic_kwq;
9428 bp->cnic_kwq_cons++;
9430 bnx2x_sp_prod_update(bp);
9431 spin_unlock_bh(&bp->spq_lock);
9434 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9435 struct kwqe_16 *kwqes[], u32 count)
9437 struct bnx2x *bp = netdev_priv(dev);
9440 #ifdef BNX2X_STOP_ON_ERROR
9441 if (unlikely(bp->panic))
9445 spin_lock_bh(&bp->spq_lock);
9447 for (i = 0; i < count; i++) {
9448 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9450 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9453 *bp->cnic_kwq_prod = *spe;
9455 bp->cnic_kwq_pending++;
9457 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9458 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9459 spe->data.update_data_addr.hi,
9460 spe->data.update_data_addr.lo,
9461 bp->cnic_kwq_pending);
9463 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9464 bp->cnic_kwq_prod = bp->cnic_kwq;
9466 bp->cnic_kwq_prod++;
9469 spin_unlock_bh(&bp->spq_lock);
9471 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9472 bnx2x_cnic_sp_post(bp, 0);
9477 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9479 struct cnic_ops *c_ops;
9482 mutex_lock(&bp->cnic_mutex);
9483 c_ops = bp->cnic_ops;
9485 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9486 mutex_unlock(&bp->cnic_mutex);
9491 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9493 struct cnic_ops *c_ops;
9497 c_ops = rcu_dereference(bp->cnic_ops);
9499 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9506 * for commands that have no data
9508 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9510 struct cnic_ctl_info ctl = {0};
9514 return bnx2x_cnic_ctl_send(bp, &ctl);
9517 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9519 struct cnic_ctl_info ctl;
9521 /* first we tell CNIC and only then we count this as a completion */
9522 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9523 ctl.data.comp.cid = cid;
9525 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9526 bnx2x_cnic_sp_post(bp, 0);
9529 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9531 struct bnx2x *bp = netdev_priv(dev);
9535 case DRV_CTL_CTXTBL_WR_CMD: {
9536 u32 index = ctl->data.io.offset;
9537 dma_addr_t addr = ctl->data.io.dma_addr;
9539 bnx2x_ilt_wr(bp, index, addr);
9543 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9544 int count = ctl->data.credit.credit_count;
9546 bnx2x_cnic_sp_post(bp, count);
9550 /* rtnl_lock is held. */
9551 case DRV_CTL_START_L2_CMD: {
9552 u32 cli = ctl->data.ring.client_id;
9554 /* Set iSCSI MAC address */
9555 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9560 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9561 * because it's the only way for UIO Client to accept
9562 * multicasts (in non-promiscuous mode only one Client per
9563 * function will receive multicast packets (leading in our
9566 bnx2x_rxq_set_mac_filters(bp, cli,
9567 BNX2X_ACCEPT_UNICAST |
9568 BNX2X_ACCEPT_BROADCAST |
9569 BNX2X_ACCEPT_ALL_MULTICAST);
9570 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9575 /* rtnl_lock is held. */
9576 case DRV_CTL_STOP_L2_CMD: {
9577 u32 cli = ctl->data.ring.client_id;
9579 /* Stop accepting on iSCSI L2 ring */
9580 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9581 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9586 /* Unset iSCSI L2 MAC */
9587 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9590 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9591 int count = ctl->data.credit.credit_count;
9593 smp_mb__before_atomic_inc();
9594 atomic_add(count, &bp->spq_left);
9595 smp_mb__after_atomic_inc();
9600 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9607 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9609 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9611 if (bp->flags & USING_MSIX_FLAG) {
9612 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9613 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9614 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9616 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9617 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9620 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9622 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9624 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9625 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9626 cp->irq_arr[1].status_blk = bp->def_status_blk;
9627 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9628 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9633 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9636 struct bnx2x *bp = netdev_priv(dev);
9637 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9642 if (atomic_read(&bp->intr_sem) != 0)
9645 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9649 bp->cnic_kwq_cons = bp->cnic_kwq;
9650 bp->cnic_kwq_prod = bp->cnic_kwq;
9651 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9653 bp->cnic_spq_pending = 0;
9654 bp->cnic_kwq_pending = 0;
9656 bp->cnic_data = data;
9659 cp->drv_state = CNIC_DRV_STATE_REGD;
9660 cp->iro_arr = bp->iro_arr;
9662 bnx2x_setup_cnic_irq_info(bp);
9664 rcu_assign_pointer(bp->cnic_ops, ops);
9669 static int bnx2x_unregister_cnic(struct net_device *dev)
9671 struct bnx2x *bp = netdev_priv(dev);
9672 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9674 mutex_lock(&bp->cnic_mutex);
9675 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9676 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9677 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9680 rcu_assign_pointer(bp->cnic_ops, NULL);
9681 mutex_unlock(&bp->cnic_mutex);
9683 kfree(bp->cnic_kwq);
9684 bp->cnic_kwq = NULL;
9689 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9691 struct bnx2x *bp = netdev_priv(dev);
9692 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9694 cp->drv_owner = THIS_MODULE;
9695 cp->chip_id = CHIP_ID(bp);
9696 cp->pdev = bp->pdev;
9697 cp->io_base = bp->regview;
9698 cp->io_base2 = bp->doorbells;
9699 cp->max_kwqe_pending = 8;
9700 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9701 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9702 bnx2x_cid_ilt_lines(bp);
9703 cp->ctx_tbl_len = CNIC_ILT_LINES;
9704 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
9705 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9706 cp->drv_ctl = bnx2x_drv_ctl;
9707 cp->drv_register_cnic = bnx2x_register_cnic;
9708 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9709 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9710 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9712 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9713 "starting cid %d\n",
9720 EXPORT_SYMBOL(bnx2x_cnic_probe);
9722 #endif /* BCM_CNIC */