1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
121 static struct workqueue_struct *bnx2x_wq;
123 enum bnx2x_board_type {
129 /* indexed by board_type, above */
132 } board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
152 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
153 u32 addr, dma_addr_t mapping)
155 REG_WR(bp, addr, U64_LO(mapping));
156 REG_WR(bp, addr + 4, U64_HI(mapping));
159 static inline void __storm_memset_fill(struct bnx2x *bp,
160 u32 addr, size_t size, u32 val)
163 for (i = 0; i < size/4; i++)
164 REG_WR(bp, addr + (i * 4), val);
167 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
168 u8 port, u16 stat_id)
170 size_t size = sizeof(struct ustorm_per_client_stats);
172 u32 addr = BAR_USTRORM_INTMEM +
173 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
175 __storm_memset_fill(bp, addr, size, 0);
178 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
181 size_t size = sizeof(struct tstorm_per_client_stats);
183 u32 addr = BAR_TSTRORM_INTMEM +
184 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
186 __storm_memset_fill(bp, addr, size, 0);
189 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
192 size_t size = sizeof(struct xstorm_per_client_stats);
194 u32 addr = BAR_XSTRORM_INTMEM +
195 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
197 __storm_memset_fill(bp, addr, size, 0);
201 static inline void storm_memset_spq_addr(struct bnx2x *bp,
202 dma_addr_t mapping, u16 abs_fid)
204 u32 addr = XSEM_REG_FAST_MEMORY +
205 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
207 __storm_memset_dma_mapping(bp, addr, mapping);
210 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
212 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
215 static inline void storm_memset_func_cfg(struct bnx2x *bp,
216 struct tstorm_eth_function_common_config *tcfg,
219 size_t size = sizeof(struct tstorm_eth_function_common_config);
221 u32 addr = BAR_TSTRORM_INTMEM +
222 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
224 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
227 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
228 struct stats_indication_flags *flags,
231 size_t size = sizeof(struct stats_indication_flags);
233 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
235 __storm_memset_struct(bp, addr, size, (u32 *)flags);
238 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
242 size_t size = sizeof(struct stats_indication_flags);
244 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
253 size_t size = sizeof(struct stats_indication_flags);
255 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
264 size_t size = sizeof(struct stats_indication_flags);
266 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
272 dma_addr_t mapping, u16 abs_fid)
274 u32 addr = BAR_XSTRORM_INTMEM +
275 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
277 __storm_memset_dma_mapping(bp, addr, mapping);
280 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
281 dma_addr_t mapping, u16 abs_fid)
283 u32 addr = BAR_TSTRORM_INTMEM +
284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
286 __storm_memset_dma_mapping(bp, addr, mapping);
289 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
292 u32 addr = BAR_USTRORM_INTMEM +
293 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
295 __storm_memset_dma_mapping(bp, addr, mapping);
298 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
301 u32 addr = BAR_CSTRORM_INTMEM +
302 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
304 __storm_memset_dma_mapping(bp, addr, mapping);
307 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
310 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
312 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
314 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
316 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
320 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
333 static inline void storm_memset_eq_data(struct bnx2x *bp,
334 struct event_ring_data *eq_data,
337 size_t size = sizeof(struct event_ring_data);
339 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
341 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
344 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
347 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
348 REG_WR16(bp, addr, eq_prod);
351 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
352 u16 fw_sb_id, u8 sb_index,
357 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
361 sizeof(struct hc_index_data)*sb_index +
362 offsetof(struct hc_index_data, timeout);
363 REG_WR8(bp, addr, ticks);
364 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
365 port, fw_sb_id, sb_index, ticks);
367 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
368 u16 fw_sb_id, u8 sb_index,
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
373 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
377 sizeof(struct hc_index_data)*sb_index +
378 offsetof(struct hc_index_data, flags);
379 u16 flags = REG_RD16(bp, addr);
381 flags &= ~HC_INDEX_DATA_HC_ENABLED;
382 flags |= enable_flag;
383 REG_WR16(bp, addr, flags);
384 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
385 port, fw_sb_id, sb_index, disable);
389 * locking is done by mcp
391 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
393 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
394 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
395 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
396 PCICFG_VENDOR_ID_OFFSET);
399 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
403 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
404 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
405 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
406 PCICFG_VENDOR_ID_OFFSET);
411 const u32 dmae_reg_go_c[] = {
412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
414 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
415 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
418 /* copy command into DMAE command memory and set DMAE command go */
419 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
424 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
425 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
426 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
428 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
429 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
431 REG_WR(bp, dmae_reg_go_c[idx], 1);
434 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
437 struct dmae_command dmae;
438 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
441 if (!bp->dmae_ready) {
442 u32 *data = bnx2x_sp(bp, wb_data[0]);
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
445 " using indirect\n", dst_addr, len32);
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
450 memset(&dmae, 0, sizeof(struct dmae_command));
452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
458 DMAE_CMD_ENDIANITY_DW_SWAP |
460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
462 dmae.src_addr_lo = U64_LO(dma_addr);
463 dmae.src_addr_hi = U64_HI(dma_addr);
464 dmae.dst_addr_lo = dst_addr >> 2;
465 dmae.dst_addr_hi = 0;
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
469 dmae.comp_val = DMAE_COMP_VAL;
471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
473 "dst_addr [%x:%08x (%08x)]\n"
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
482 mutex_lock(&bp->dmae_mutex);
486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
494 BNX2X_ERR("DMAE timeout!\n");
498 /* adjust delay for emulation/FPGA */
499 if (CHIP_REV_IS_SLOW(bp))
505 mutex_unlock(&bp->dmae_mutex);
508 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
510 struct dmae_command dmae;
511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
514 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]);
518 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
519 " using indirect\n", src_addr, len32);
520 for (i = 0; i < len32; i++)
521 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
525 memset(&dmae, 0, sizeof(struct dmae_command));
527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
533 DMAE_CMD_ENDIANITY_DW_SWAP |
535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
554 mutex_lock(&bp->dmae_mutex);
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
563 while (*wb_comp != DMAE_COMP_VAL) {
566 BNX2X_ERR("DMAE timeout!\n");
570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
580 mutex_unlock(&bp->dmae_mutex);
583 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
586 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
589 while (len > dmae_wr_max) {
590 bnx2x_write_dmae(bp, phys_addr + offset,
591 addr + offset, dmae_wr_max);
592 offset += dmae_wr_max * 4;
596 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
599 /* used only for slowpath so not inlined */
600 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
604 wb_write[0] = val_hi;
605 wb_write[1] = val_lo;
606 REG_WR_DMAE(bp, reg, wb_write, 2);
610 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
614 REG_RD_DMAE(bp, reg, wb_data, 2);
616 return HILO_U64(wb_data[0], wb_data[1]);
620 static int bnx2x_mc_assert(struct bnx2x *bp)
624 u32 row0, row1, row2, row3;
627 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
628 XSTORM_ASSERT_LIST_INDEX_OFFSET);
630 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
632 /* print the asserts */
633 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
635 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
636 XSTORM_ASSERT_LIST_OFFSET(i));
637 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
638 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
639 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
640 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
641 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
642 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
644 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
645 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
646 " 0x%08x 0x%08x 0x%08x\n",
647 i, row3, row2, row1, row0);
655 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
656 TSTORM_ASSERT_LIST_INDEX_OFFSET);
658 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
660 /* print the asserts */
661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
663 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
664 TSTORM_ASSERT_LIST_OFFSET(i));
665 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
666 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
667 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
668 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
669 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
670 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
674 " 0x%08x 0x%08x 0x%08x\n",
675 i, row3, row2, row1, row0);
683 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
684 CSTORM_ASSERT_LIST_INDEX_OFFSET);
686 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
688 /* print the asserts */
689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
691 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
692 CSTORM_ASSERT_LIST_OFFSET(i));
693 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
694 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
695 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
696 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
697 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
698 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
701 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
702 " 0x%08x 0x%08x 0x%08x\n",
703 i, row3, row2, row1, row0);
711 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
712 USTORM_ASSERT_LIST_INDEX_OFFSET);
714 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
716 /* print the asserts */
717 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
719 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
720 USTORM_ASSERT_LIST_OFFSET(i));
721 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
722 USTORM_ASSERT_LIST_OFFSET(i) + 4);
723 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
724 USTORM_ASSERT_LIST_OFFSET(i) + 8);
725 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_OFFSET(i) + 12);
728 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
729 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
730 " 0x%08x 0x%08x 0x%08x\n",
731 i, row3, row2, row1, row0);
741 static void bnx2x_fw_dump(struct bnx2x *bp)
749 BNX2X_ERR("NO MCP - can not dump\n");
753 addr = bp->common.shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
756 pr_err("begin fw dump (mark 0x%x)\n", mark);
759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
760 for (word = 0; word < 8; word++)
761 data[word] = htonl(REG_RD(bp, offset + 4*word));
763 pr_cont("%s", (char *)data);
765 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
766 for (word = 0; word < 8; word++)
767 data[word] = htonl(REG_RD(bp, offset + 4*word));
769 pr_cont("%s", (char *)data);
771 pr_err("end of fw dump\n");
774 void bnx2x_panic_dump(struct bnx2x *bp)
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780 #ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
784 bp->stats_state = STATS_STATE_DISABLED;
785 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
787 BNX2X_ERR("begin crash dump -----------------\n");
791 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
792 " spq_prod_idx(0x%x)\n",
793 bp->def_idx, bp->def_att_idx,
794 bp->attn_state, bp->spq_prod_idx);
795 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
796 bp->def_status_blk->atten_status_block.attn_bits,
797 bp->def_status_blk->atten_status_block.attn_bits_ack,
798 bp->def_status_blk->atten_status_block.status_block_id,
799 bp->def_status_blk->atten_status_block.attn_bits_index);
801 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
803 bp->def_status_blk->sp_sb.index_values[i],
804 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
806 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
807 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
808 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
811 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
812 "pf_id(0x%x) vnic_id(0x%x) "
813 "vf_id(0x%x) vf_valid (0x%x)\n",
814 sp_sb_data.igu_sb_id,
815 sp_sb_data.igu_seg_id,
816 sp_sb_data.p_func.pf_id,
817 sp_sb_data.p_func.vnic_id,
818 sp_sb_data.p_func.vf_id,
819 sp_sb_data.p_func.vf_valid);
822 for_each_queue(bp, i) {
823 struct bnx2x_fastpath *fp = &bp->fp[i];
825 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p =
827 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p =
829 sb_data_e1x.index_data;
834 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
835 " rx_comp_prod(0x%x)"
836 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
837 i, fp->rx_bd_prod, fp->rx_bd_cons,
839 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
840 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
841 " fp_hc_idx(0x%x)\n",
842 fp->rx_sge_prod, fp->last_max_sge,
843 le16_to_cpu(fp->fp_hc_idx));
846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
852 loop = HC_SB_MAX_INDICES_E1X;
856 BNX2X_ERR(" run indexes (");
857 for (j = 0; j < HC_SB_MAX_SM; j++)
859 fp->sb_running_index[j],
860 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
862 BNX2X_ERR(" indexes (");
863 for (j = 0; j < loop; j++)
865 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " ");
869 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x;
872 /* copy sb data in here */
873 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
880 sb_data_e1x.common.p_func.pf_id,
881 sb_data_e1x.common.p_func.vf_id,
882 sb_data_e1x.common.p_func.vf_valid,
883 sb_data_e1x.common.p_func.vnic_id,
884 sb_data_e1x.common.same_igu_sb_1b);
887 for (j = 0; j < HC_SB_MAX_SM; j++) {
888 pr_cont("SM[%d] __flags (0x%x) "
889 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
890 "time_to_expire (0x%x) "
891 "timer_value(0x%x)\n", j,
893 hc_sm_p[j].igu_sb_id,
894 hc_sm_p[j].igu_seg_id,
895 hc_sm_p[j].time_to_expire,
896 hc_sm_p[j].timer_value);
900 for (j = 0; j < loop; j++) {
901 pr_cont("INDEX[%d] flags (0x%x) "
902 "timeout (0x%x)\n", j,
904 hc_index_p[j].timeout);
908 #ifdef BNX2X_STOP_ON_ERROR
911 for_each_queue(bp, i) {
912 struct bnx2x_fastpath *fp = &bp->fp[i];
914 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
915 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
916 for (j = start; j != end; j = RX_BD(j + 1)) {
917 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
918 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
920 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
921 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
924 start = RX_SGE(fp->rx_sge_prod);
925 end = RX_SGE(fp->last_max_sge);
926 for (j = start; j != end; j = RX_SGE(j + 1)) {
927 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
928 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
930 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
931 i, j, rx_sge[1], rx_sge[0], sw_page->page);
934 start = RCQ_BD(fp->rx_comp_cons - 10);
935 end = RCQ_BD(fp->rx_comp_cons + 503);
936 for (j = start; j != end; j = RCQ_BD(j + 1)) {
937 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
939 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
940 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
945 for_each_queue(bp, i) {
946 struct bnx2x_fastpath *fp = &bp->fp[i];
948 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
949 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
950 for (j = start; j != end; j = TX_BD(j + 1)) {
951 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
953 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
954 i, j, sw_bd->skb, sw_bd->first_bd);
957 start = TX_BD(fp->tx_bd_cons - 10);
958 end = TX_BD(fp->tx_bd_cons + 254);
959 for (j = start; j != end; j = TX_BD(j + 1)) {
960 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
962 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
963 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
969 BNX2X_ERR("end crash dump -----------------\n");
972 void bnx2x_int_enable(struct bnx2x *bp)
974 int port = BP_PORT(bp);
975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
976 u32 val = REG_RD(bp, addr);
977 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
978 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
981 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
982 HC_CONFIG_0_REG_INT_LINE_EN_0);
983 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
984 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
986 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
987 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
988 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
989 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
991 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
992 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
993 HC_CONFIG_0_REG_INT_LINE_EN_0 |
994 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
996 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
999 REG_WR(bp, addr, val);
1001 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1004 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1005 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1007 REG_WR(bp, addr, val);
1009 * Ensure that HC_CONFIG is written before leading/trailing edge config
1014 if (CHIP_IS_E1H(bp)) {
1015 /* init leading/trailing edge */
1017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1019 /* enable nig and gpio3 attention */
1024 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1025 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1028 /* Make sure that interrupts are indeed enabled from here on */
1032 void bnx2x_int_disable(struct bnx2x *bp)
1034 int port = BP_PORT(bp);
1035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1036 u32 val = REG_RD(bp, addr);
1038 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1039 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1040 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1041 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1043 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1046 /* flush all outstanding writes */
1049 REG_WR(bp, addr, val);
1050 if (REG_RD(bp, addr) != val)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1054 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1059 /* disable interrupt handling */
1060 atomic_inc(&bp->intr_sem);
1061 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1064 /* prevent the HW from sending interrupts */
1065 bnx2x_int_disable(bp);
1067 /* make sure all ISRs are done */
1069 synchronize_irq(bp->msix_table[0].vector);
1074 for_each_queue(bp, i)
1075 synchronize_irq(bp->msix_table[i + offset].vector);
1077 synchronize_irq(bp->pdev->irq);
1079 /* make sure sp_task is not running */
1080 cancel_delayed_work(&bp->sp_task);
1081 flush_workqueue(bnx2x_wq);
1087 * General service functions
1090 /* Return true if succeeded to acquire the lock */
1091 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1094 u32 resource_bit = (1 << resource);
1095 int func = BP_FUNC(bp);
1096 u32 hw_lock_control_reg;
1098 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1100 /* Validating that the resource is within range */
1101 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1103 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1104 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1109 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1111 hw_lock_control_reg =
1112 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1114 /* Try to acquire the lock */
1115 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1116 lock_status = REG_RD(bp, hw_lock_control_reg);
1117 if (lock_status & resource_bit)
1120 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1126 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1129 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1130 union eth_rx_cqe *rr_cqe)
1132 struct bnx2x *bp = fp->bp;
1133 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1134 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1137 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1138 fp->index, cid, command, bp->state,
1139 rr_cqe->ramrod_cqe.ramrod_type);
1141 switch (command | fp->state) {
1142 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1143 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1144 fp->state = BNX2X_FP_STATE_OPEN;
1147 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1148 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1149 fp->state = BNX2X_FP_STATE_HALTED;
1152 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1153 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1154 fp->state = BNX2X_FP_STATE_TERMINATED;
1158 BNX2X_ERR("unexpected MC reply (%d) "
1159 "fp[%d] state is %x\n",
1160 command, fp->index, fp->state);
1166 /* push the change in fp->state and towards the memory */
1172 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1174 struct bnx2x *bp = netdev_priv(dev_instance);
1175 u16 status = bnx2x_ack_int(bp);
1179 /* Return here if interrupt is shared and it's not for us */
1180 if (unlikely(status == 0)) {
1181 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1184 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1186 /* Return here if interrupt is disabled */
1187 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1188 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1192 #ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i];
1200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1201 if (status & mask) {
1202 /* Handle Rx and Tx according to SB id */
1203 prefetch(fp->rx_cons_sb);
1204 prefetch(fp->tx_cons_sb);
1205 prefetch(&fp->sb_running_index[SM_RX_ID]);
1206 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1213 if (status & (mask | 0x1)) {
1214 struct cnic_ops *c_ops = NULL;
1217 c_ops = rcu_dereference(bp->cnic_ops);
1219 c_ops->cnic_handler(bp->cnic_data, NULL);
1226 if (unlikely(status & 0x1)) {
1227 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1234 if (unlikely(status))
1235 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1241 /* end of fast path */
1247 * General service functions
1250 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1253 u32 resource_bit = (1 << resource);
1254 int func = BP_FUNC(bp);
1255 u32 hw_lock_control_reg;
1258 /* Validating that the resource is within range */
1259 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1261 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1262 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1267 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1269 hw_lock_control_reg =
1270 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1273 /* Validating that the resource is not already taken */
1274 lock_status = REG_RD(bp, hw_lock_control_reg);
1275 if (lock_status & resource_bit) {
1276 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1277 lock_status, resource_bit);
1281 /* Try for 5 second every 5ms */
1282 for (cnt = 0; cnt < 1000; cnt++) {
1283 /* Try to acquire the lock */
1284 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1285 lock_status = REG_RD(bp, hw_lock_control_reg);
1286 if (lock_status & resource_bit)
1291 DP(NETIF_MSG_HW, "Timeout\n");
1295 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1298 u32 resource_bit = (1 << resource);
1299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
1302 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1319 /* Validating that the resource is currently taken */
1320 lock_status = REG_RD(bp, hw_lock_control_reg);
1321 if (!(lock_status & resource_bit)) {
1322 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1323 lock_status, resource_bit);
1327 REG_WR(bp, hw_lock_control_reg, resource_bit);
1332 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1334 /* The GPIO should be swapped if swap register is set and active */
1335 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1336 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1337 int gpio_shift = gpio_num +
1338 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1339 u32 gpio_mask = (1 << gpio_shift);
1343 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1344 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1348 /* read GPIO value */
1349 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1351 /* get the requested pin value */
1352 if ((gpio_reg & gpio_mask) == gpio_mask)
1357 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1362 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1364 /* The GPIO should be swapped if swap register is set and active */
1365 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1366 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1367 int gpio_shift = gpio_num +
1368 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1369 u32 gpio_mask = (1 << gpio_shift);
1372 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1373 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1378 /* read GPIO and mask except the float bits */
1379 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1382 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1383 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1384 gpio_num, gpio_shift);
1385 /* clear FLOAT and set CLR */
1386 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1387 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1390 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1391 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1392 gpio_num, gpio_shift);
1393 /* clear FLOAT and set SET */
1394 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1395 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1398 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1399 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1400 gpio_num, gpio_shift);
1402 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1409 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1410 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1415 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1417 /* The GPIO should be swapped if swap register is set and active */
1418 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1419 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1420 int gpio_shift = gpio_num +
1421 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1422 u32 gpio_mask = (1 << gpio_shift);
1425 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1426 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1430 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1432 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1435 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1436 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1437 "output low\n", gpio_num, gpio_shift);
1438 /* clear SET and set CLR */
1439 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1440 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1443 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1444 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1445 "output high\n", gpio_num, gpio_shift);
1446 /* clear CLR and set SET */
1447 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1448 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1455 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1461 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1463 u32 spio_mask = (1 << spio_num);
1466 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1467 (spio_num > MISC_REGISTERS_SPIO_7)) {
1468 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1472 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1473 /* read SPIO and mask except the float bits */
1474 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1477 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1478 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1479 /* clear FLOAT and set CLR */
1480 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1481 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1484 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1485 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1486 /* clear FLOAT and set SET */
1487 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1488 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1491 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1492 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1494 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1501 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1507 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1509 u32 sel_phy_idx = 0;
1510 if (bp->link_vars.link_up) {
1511 sel_phy_idx = EXT_PHY1;
1512 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1513 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1514 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1515 sel_phy_idx = EXT_PHY2;
1518 switch (bnx2x_phy_selection(&bp->link_params)) {
1519 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1520 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1521 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1522 sel_phy_idx = EXT_PHY1;
1524 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1525 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1526 sel_phy_idx = EXT_PHY2;
1531 * The selected actived PHY is always after swapping (in case PHY
1532 * swapping is enabled). So when swapping is enabled, we need to reverse
1536 if (bp->link_params.multi_phy_config &
1537 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1538 if (sel_phy_idx == EXT_PHY1)
1539 sel_phy_idx = EXT_PHY2;
1540 else if (sel_phy_idx == EXT_PHY2)
1541 sel_phy_idx = EXT_PHY1;
1543 return LINK_CONFIG_IDX(sel_phy_idx);
1546 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1548 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1549 switch (bp->link_vars.ieee_fc &
1550 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1551 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1552 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1556 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1557 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1561 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1562 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1566 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1573 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1575 if (!BP_NOMCP(bp)) {
1577 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1578 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1579 /* Initialize link parameters structure variables */
1580 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */
1582 if (bp->dev->mtu > 5000)
1583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1587 bnx2x_acquire_phy_lock(bp);
1589 if (load_mode == LOAD_DIAG) {
1590 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1591 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1594 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1596 bnx2x_release_phy_lock(bp);
1598 bnx2x_calc_fc_adv(bp);
1600 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1601 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1602 bnx2x_link_report(bp);
1604 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1607 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1611 void bnx2x_link_set(struct bnx2x *bp)
1613 if (!BP_NOMCP(bp)) {
1614 bnx2x_acquire_phy_lock(bp);
1615 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1616 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1617 bnx2x_release_phy_lock(bp);
1619 bnx2x_calc_fc_adv(bp);
1621 BNX2X_ERR("Bootcode is missing - can not set link\n");
1624 static void bnx2x__link_reset(struct bnx2x *bp)
1626 if (!BP_NOMCP(bp)) {
1627 bnx2x_acquire_phy_lock(bp);
1628 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1629 bnx2x_release_phy_lock(bp);
1631 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1634 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1638 if (!BP_NOMCP(bp)) {
1639 bnx2x_acquire_phy_lock(bp);
1640 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1642 bnx2x_release_phy_lock(bp);
1644 BNX2X_ERR("Bootcode is missing - can not test link\n");
1649 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1651 u32 r_param = bp->link_vars.line_speed / 8;
1652 u32 fair_periodic_timeout_usec;
1655 memset(&(bp->cmng.rs_vars), 0,
1656 sizeof(struct rate_shaping_vars_per_port));
1657 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1659 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1660 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1662 /* this is the threshold below which no timer arming will occur
1663 1.25 coefficient is for the threshold to be a little bigger
1664 than the real time, to compensate for timer in-accuracy */
1665 bp->cmng.rs_vars.rs_threshold =
1666 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1668 /* resolution of fairness timer */
1669 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1670 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1671 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1673 /* this is the threshold below which we won't arm the timer anymore */
1674 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1676 /* we multiply by 1e3/8 to get bytes/msec.
1677 We don't want the credits to pass a credit
1678 of the t_fair*FAIR_MEM (algorithm resolution) */
1679 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1680 /* since each tick is 4 usec */
1681 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1684 /* Calculates the sum of vn_min_rates.
1685 It's needed for further normalizing of the min_rates.
1687 sum of vn_min_rates.
1689 0 - if all the min_rates are 0.
1690 In the later case fainess algorithm should be deactivated.
1691 If not all min_rates are zero then those that are zeroes will be set to 1.
1693 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1696 int port = BP_PORT(bp);
1699 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port;
1702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1706 /* Skip hidden vns */
1707 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1710 /* If min rate is zero - set it to 1 */
1712 vn_min_rate = DEF_MIN_RATE;
1716 bp->vn_weight_sum += vn_min_rate;
1719 /* ... only if all min rates are zeros - disable fairness */
1721 bp->cmng.flags.cmng_enables &=
1722 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1723 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1724 " fairness will be disabled\n");
1726 bp->cmng.flags.cmng_enables |=
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1730 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1732 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn;
1734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1735 u16 vn_min_rate, vn_max_rate;
1738 /* If function is hidden - set min and max to zeroes */
1739 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1746 /* If min rate is zero - set it to 1 */
1748 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1753 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1754 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1756 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1757 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1759 /* global vn counter - maximal Mbps for this vn */
1760 m_rs_vn.vn_counter.rate = vn_max_rate;
1762 /* quota - number of bytes transmitted in this period */
1763 m_rs_vn.vn_counter.quota =
1764 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1766 if (bp->vn_weight_sum) {
1767 /* credit for each period of the fairness algorithm:
1768 number of bytes in T_FAIR (the vn share the port rate).
1769 vn_weight_sum should not be larger than 10000, thus
1770 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1772 m_fair_vn.vn_credit_delta =
1773 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1774 (8 * bp->vn_weight_sum))),
1775 (bp->cmng.fair_vars.fair_threshold * 2));
1776 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1777 m_fair_vn.vn_credit_delta);
1780 /* Store it to internal memory */
1781 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1782 REG_WR(bp, BAR_XSTRORM_INTMEM +
1783 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1784 ((u32 *)(&m_rs_vn))[i]);
1786 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1787 REG_WR(bp, BAR_XSTRORM_INTMEM +
1788 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1789 ((u32 *)(&m_fair_vn))[i]);
1791 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1793 if (CHIP_REV_IS_SLOW(bp))
1794 return CMNG_FNS_NONE;
1796 return CMNG_FNS_MINMAX;
1798 return CMNG_FNS_NONE;
1801 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1806 return; /* what should be the default bvalue in this case */
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp);
1811 MF_CFG_RD(bp, func_mf_config[func].config);
1815 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1818 if (cmng_type == CMNG_FNS_MINMAX) {
1821 /* clear cmng_enables */
1822 bp->cmng.flags.cmng_enables = 0;
1824 /* read mf conf from shmem */
1826 bnx2x_read_mf_cfg(bp);
1828 /* Init rate shaping and fairness contexts */
1829 bnx2x_init_port_minmax(bp);
1831 /* vn_weight_sum and enable fairness if not 0 */
1832 bnx2x_calc_vn_weight_sum(bp);
1834 /* calculate and set min-max rate for each vn */
1835 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1836 bnx2x_init_vn_minmax(bp, vn);
1838 /* always enable rate shaping and fairness */
1839 bp->cmng.flags.cmng_enables |=
1840 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1841 if (!bp->vn_weight_sum)
1842 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1843 " fairness will be disabled\n");
1847 /* rate shaping and fairness are disabled */
1849 "rate shaping and fairness are disabled\n");
1852 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1854 int port = BP_PORT(bp);
1858 /* Set the attention towards other drivers on the same port */
1859 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1860 if (vn == BP_E1HVN(bp))
1863 func = ((vn << 1) | port);
1864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1865 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1869 /* This function is called upon link interrupt */
1870 static void bnx2x_link_attn(struct bnx2x *bp)
1872 u32 prev_link_status = bp->link_vars.link_status;
1873 /* Make sure that we are synced with the current statistics */
1874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1876 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1878 if (bp->link_vars.link_up) {
1880 /* dropless flow control */
1881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1882 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0;
1885 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1888 REG_WR(bp, BAR_USTRORM_INTMEM +
1889 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1894 struct host_port_stats *pstats;
1896 pstats = bnx2x_sp(bp, port_stats);
1897 /* reset old bmac stats */
1898 memset(&(pstats->mac_stx[0]), 0,
1899 sizeof(struct mac_stx));
1901 if (bp->state == BNX2X_STATE_OPEN)
1902 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1905 /* indicate link status only if link status actually changed */
1906 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp);
1910 int port = BP_PORT(bp);
1914 /* Set the attention towards other drivers on the same port */
1915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1919 func = ((vn << 1) | port);
1920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1924 if (bp->link_vars.link_up) {
1927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
1930 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1931 bnx2x_init_vn_minmax(bp, 2*vn + port);
1933 /* Store it to internal memory */
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1936 REG_WR(bp, BAR_XSTRORM_INTMEM +
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1938 ((u32 *)(&bp->cmng))[i]);
1943 void bnx2x__link_status_update(struct bnx2x *bp)
1945 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1948 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1950 if (bp->link_vars.link_up)
1951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1955 bnx2x_calc_vn_weight_sum(bp);
1957 /* indicate link status */
1958 bnx2x_link_report(bp);
1961 static void bnx2x_pmf_update(struct bnx2x *bp)
1963 int port = BP_PORT(bp);
1967 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1969 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1982 * General service functions
1985 /* send the MCP a request, block until there is a reply */
1986 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1988 int func = BP_FUNC(bp);
1989 u32 seq = ++bp->fw_seq;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1994 mutex_lock(&bp->fw_mb_mutex);
1995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2000 /* let the FW do it's magic ... */
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2005 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2008 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2009 cnt*delay, rc, seq);
2011 /* is this a reply to our command? */
2012 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2013 rc &= FW_MSG_CODE_MASK;
2016 BNX2X_ERR("FW failed to respond!\n");
2020 mutex_unlock(&bp->fw_mb_mutex);
2025 /* must be called under rtnl_lock */
2026 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2028 u32 mask = (1 << cl_id);
2030 /* initial seeting is BNX2X_ACCEPT_NONE */
2031 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2032 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2033 u8 unmatched_unicast = 0;
2035 if (filters & BNX2X_PROMISCUOUS_MODE) {
2036 /* promiscious - accept all, drop none */
2037 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2038 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2040 if (filters & BNX2X_ACCEPT_UNICAST) {
2041 /* accept matched ucast */
2044 if (filters & BNX2X_ACCEPT_MULTICAST) {
2045 /* accept matched mcast */
2048 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2049 /* accept all mcast */
2053 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2054 /* accept all mcast */
2058 if (filters & BNX2X_ACCEPT_BROADCAST) {
2059 /* accept (all) bcast */
2064 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2065 bp->mac_filters.ucast_drop_all | mask :
2066 bp->mac_filters.ucast_drop_all & ~mask;
2068 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2069 bp->mac_filters.mcast_drop_all | mask :
2070 bp->mac_filters.mcast_drop_all & ~mask;
2072 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2073 bp->mac_filters.bcast_drop_all | mask :
2074 bp->mac_filters.bcast_drop_all & ~mask;
2076 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2077 bp->mac_filters.ucast_accept_all | mask :
2078 bp->mac_filters.ucast_accept_all & ~mask;
2080 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2081 bp->mac_filters.mcast_accept_all | mask :
2082 bp->mac_filters.mcast_accept_all & ~mask;
2084 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2085 bp->mac_filters.bcast_accept_all | mask :
2086 bp->mac_filters.bcast_accept_all & ~mask;
2088 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2089 bp->mac_filters.unmatched_unicast | mask :
2090 bp->mac_filters.unmatched_unicast & ~mask;
2093 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2095 if (FUNC_CONFIG(p->func_flgs)) {
2096 struct tstorm_eth_function_common_config tcfg = {0};
2099 if (p->func_flgs & FUNC_FLG_TPA)
2100 tcfg.config_flags |=
2101 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2104 if (p->func_flgs & FUNC_FLG_RSS) {
2105 u16 rss_flgs = (p->rss->mode <<
2106 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2108 if (p->rss->cap & RSS_IPV4_CAP)
2109 rss_flgs |= RSS_IPV4_CAP_MASK;
2110 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2111 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2112 if (p->rss->cap & RSS_IPV6_CAP)
2113 rss_flgs |= RSS_IPV6_CAP_MASK;
2114 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2115 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2117 tcfg.config_flags |= rss_flgs;
2118 tcfg.rss_result_mask = p->rss->result_mask;
2122 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2125 /* Enable the function in the FW */
2126 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2127 storm_memset_func_en(bp, p->func_id, 1);
2130 if (p->func_flgs & FUNC_FLG_STATS) {
2131 struct stats_indication_flags stats_flags = {0};
2132 stats_flags.collect_eth = 1;
2134 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2135 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2137 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2138 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2140 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2141 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2143 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2144 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2148 if (p->func_flgs & FUNC_FLG_SPQ) {
2149 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2150 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2151 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2155 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2156 struct bnx2x_fastpath *fp)
2160 /* calculate queue flags */
2161 flags |= QUEUE_FLG_CACHE_ALIGN;
2162 flags |= QUEUE_FLG_HC;
2163 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2166 flags |= QUEUE_FLG_VLAN;
2167 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2170 if (!fp->disable_tpa)
2171 flags |= QUEUE_FLG_TPA;
2173 flags |= QUEUE_FLG_STATS;
2178 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2180 struct bnx2x_rxq_init_params *rxq_init)
2184 u16 tpa_agg_size = 0;
2186 /* calculate queue flags */
2187 u16 flags = bnx2x_get_cl_flags(bp, fp);
2189 if (!fp->disable_tpa) {
2190 pause->sge_th_hi = 250;
2191 pause->sge_th_lo = 150;
2192 tpa_agg_size = min_t(u32,
2193 (min_t(u32, 8, MAX_SKB_FRAGS) *
2194 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2195 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2197 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2198 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2199 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2203 /* pause - not for e1 */
2204 if (!CHIP_IS_E1(bp)) {
2205 pause->bd_th_hi = 350;
2206 pause->bd_th_lo = 250;
2207 pause->rcq_th_hi = 350;
2208 pause->rcq_th_lo = 250;
2209 pause->sge_th_hi = 0;
2210 pause->sge_th_lo = 0;
2215 rxq_init->flags = flags;
2216 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2217 rxq_init->dscr_map = fp->rx_desc_mapping;
2218 rxq_init->sge_map = fp->rx_sge_mapping;
2219 rxq_init->rcq_map = fp->rx_comp_mapping;
2220 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2221 rxq_init->mtu = bp->dev->mtu;
2222 rxq_init->buf_sz = bp->rx_buf_size;
2223 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2224 rxq_init->cl_id = fp->cl_id;
2225 rxq_init->spcl_id = fp->cl_id;
2226 rxq_init->stat_id = fp->cl_id;
2227 rxq_init->tpa_agg_sz = tpa_agg_size;
2228 rxq_init->sge_buf_sz = sge_sz;
2229 rxq_init->max_sges_pkt = max_sge;
2230 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2231 rxq_init->fw_sb_id = fp->fw_sb_id;
2233 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2235 rxq_init->cid = HW_CID(bp, fp->cid);
2237 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2240 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2241 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2243 u16 flags = bnx2x_get_cl_flags(bp, fp);
2245 txq_init->flags = flags;
2246 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2247 txq_init->dscr_map = fp->tx_desc_mapping;
2248 txq_init->stat_id = fp->cl_id;
2249 txq_init->cid = HW_CID(bp, fp->cid);
2250 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2251 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2252 txq_init->fw_sb_id = fp->fw_sb_id;
2253 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2256 void bnx2x_pf_init(struct bnx2x *bp)
2258 struct bnx2x_func_init_params func_init = {0};
2259 struct bnx2x_rss_params rss = {0};
2260 struct event_ring_data eq_data = { {0} };
2263 /* pf specific setups */
2264 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2267 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2273 * Although RSS is meaningless when there is a single HW queue we
2274 * still need it enabled in order to have HW Rx hash generated.
2276 * if (is_eth_multi(bp))
2277 * flags |= FUNC_FLG_RSS;
2280 /* function setup */
2281 if (flags & FUNC_FLG_RSS) {
2282 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2283 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2284 rss.mode = bp->multi_mode;
2285 rss.result_mask = MULTI_MASK;
2286 func_init.rss = &rss;
2289 func_init.func_flgs = flags;
2290 func_init.pf_id = BP_FUNC(bp);
2291 func_init.func_id = BP_FUNC(bp);
2292 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2293 func_init.spq_map = bp->spq_mapping;
2294 func_init.spq_prod = bp->spq_prod_idx;
2296 bnx2x_func_init(bp, &func_init);
2298 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2301 Congestion management values depend on the link rate
2302 There is no active link so initial link rate is set to 10 Gbps.
2303 When the link comes up The congestion management values are
2304 re-calculated according to the actual link rate.
2306 bp->link_vars.line_speed = SPEED_10000;
2307 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2309 /* Only the PMF sets the HW */
2311 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2313 /* no rx until link is up */
2314 bp->rx_mode = BNX2X_RX_MODE_NONE;
2315 bnx2x_set_storm_rx_mode(bp);
2317 /* init Event Queue */
2318 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2319 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2320 eq_data.producer = bp->eq_prod;
2321 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2322 eq_data.sb_id = DEF_SB_ID;
2323 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2327 static void bnx2x_e1h_disable(struct bnx2x *bp)
2329 int port = BP_PORT(bp);
2331 netif_tx_disable(bp->dev);
2333 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2335 netif_carrier_off(bp->dev);
2338 static void bnx2x_e1h_enable(struct bnx2x *bp)
2340 int port = BP_PORT(bp);
2342 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2344 /* Tx queue should be only reenabled */
2345 netif_tx_wake_all_queues(bp->dev);
2348 * Should not call netif_carrier_on since it will be called if the link
2349 * is up when checking for link state
2353 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2355 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2357 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2360 * This is the only place besides the function initialization
2361 * where the bp->flags can change so it is done without any
2364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2366 bp->flags |= MF_FUNC_DIS;
2368 bnx2x_e1h_disable(bp);
2370 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2371 bp->flags &= ~MF_FUNC_DIS;
2373 bnx2x_e1h_enable(bp);
2375 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2377 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2379 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2380 bnx2x_link_sync_notify(bp);
2381 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2382 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2385 /* Report results to MCP */
2387 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2389 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2392 /* must be called under the spq lock */
2393 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2395 struct eth_spe *next_spe = bp->spq_prod_bd;
2397 if (bp->spq_prod_bd == bp->spq_last_bd) {
2398 bp->spq_prod_bd = bp->spq;
2399 bp->spq_prod_idx = 0;
2400 DP(NETIF_MSG_TIMER, "end of spq\n");
2408 /* must be called under the spq lock */
2409 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2411 int func = BP_FUNC(bp);
2413 /* Make sure that BD data is updated before writing the producer */
2416 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2421 /* the slow path queue is odd since completions arrive on the fastpath ring */
2422 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2423 u32 data_hi, u32 data_lo, int common)
2425 struct eth_spe *spe;
2428 #ifdef BNX2X_STOP_ON_ERROR
2429 if (unlikely(bp->panic))
2433 spin_lock_bh(&bp->spq_lock);
2435 if (!bp->spq_left) {
2436 BNX2X_ERR("BUG! SPQ ring full!\n");
2437 spin_unlock_bh(&bp->spq_lock);
2442 spe = bnx2x_sp_get_next(bp);
2444 /* CID needs port number to be encoded int it */
2445 spe->hdr.conn_and_cmd_data =
2446 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2451 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2452 * TRAFFIC_STOP, TRAFFIC_START
2454 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2455 & SPE_HDR_CONN_TYPE;
2457 /* ETH ramrods: SETUP, HALT */
2458 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2459 & SPE_HDR_CONN_TYPE;
2461 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2462 SPE_HDR_FUNCTION_ID);
2464 spe->hdr.type = cpu_to_le16(type);
2466 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2467 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2469 /* stats ramrod has it's own slot on the spq */
2470 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2471 /* It's ok if the actual decrement is issued towards the memory
2472 * somewhere between the spin_lock and spin_unlock. Thus no
2473 * more explict memory barrier is needed.
2477 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2478 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2479 "type(0x%x) left %x\n",
2480 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2481 (u32)(U64_LO(bp->spq_mapping) +
2482 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2483 HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left);
2485 bnx2x_sp_prod_update(bp);
2486 spin_unlock_bh(&bp->spq_lock);
2490 /* acquire split MCP access lock register */
2491 static int bnx2x_acquire_alr(struct bnx2x *bp)
2497 for (j = 0; j < 1000; j++) {
2499 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2500 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2501 if (val & (1L << 31))
2506 if (!(val & (1L << 31))) {
2507 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2514 /* release split MCP access lock register */
2515 static void bnx2x_release_alr(struct bnx2x *bp)
2517 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2520 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2521 #define BNX2X_DEF_SB_IDX 0x0002
2523 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2525 struct host_sp_status_block *def_sb = bp->def_status_blk;
2528 barrier(); /* status block is written to by the chip */
2529 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2530 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2531 rc |= BNX2X_DEF_SB_ATT_IDX;
2534 if (bp->def_idx != def_sb->sp_sb.running_index) {
2535 bp->def_idx = def_sb->sp_sb.running_index;
2536 rc |= BNX2X_DEF_SB_IDX;
2539 /* Do not reorder: indecies reading should complete before handling */
2545 * slow path service functions
2548 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2550 int port = BP_PORT(bp);
2551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
2553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0;
2560 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n");
2563 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2564 aeu_mask = REG_RD(bp, aeu_addr);
2566 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2567 aeu_mask, asserted);
2568 aeu_mask &= ~(asserted & 0x3ff);
2569 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2571 REG_WR(bp, aeu_addr, aeu_mask);
2572 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2574 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2575 bp->attn_state |= asserted;
2576 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2578 if (asserted & ATTN_HARD_WIRED_MASK) {
2579 if (asserted & ATTN_NIG_FOR_FUNC) {
2581 bnx2x_acquire_phy_lock(bp);
2583 /* save nig interrupt mask */
2584 nig_mask = REG_RD(bp, nig_int_mask_addr);
2585 REG_WR(bp, nig_int_mask_addr, 0);
2587 bnx2x_link_attn(bp);
2589 /* handle unicore attn? */
2591 if (asserted & ATTN_SW_TIMER_4_FUNC)
2592 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2594 if (asserted & GPIO_2_FUNC)
2595 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2597 if (asserted & GPIO_3_FUNC)
2598 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2600 if (asserted & GPIO_4_FUNC)
2601 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2604 if (asserted & ATTN_GENERAL_ATTN_1) {
2605 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2608 if (asserted & ATTN_GENERAL_ATTN_2) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2612 if (asserted & ATTN_GENERAL_ATTN_3) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2617 if (asserted & ATTN_GENERAL_ATTN_4) {
2618 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2619 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2621 if (asserted & ATTN_GENERAL_ATTN_5) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2625 if (asserted & ATTN_GENERAL_ATTN_6) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2631 } /* if hardwired */
2633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2635 REG_WR(bp, hc_addr, asserted);
2637 /* now set back the mask */
2638 if (asserted & ATTN_NIG_FOR_FUNC) {
2639 REG_WR(bp, nig_int_mask_addr, nig_mask);
2640 bnx2x_release_phy_lock(bp);
2644 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2646 int port = BP_PORT(bp);
2648 /* mark the failure */
2651 dev_info.port_hw_config[port].external_phy_config);
2653 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2658 /* log the failure */
2659 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact OEM Support for assistance\n");
2664 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2666 int port = BP_PORT(bp);
2670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2679 BNX2X_ERR("SPIO5 hw attention\n");
2681 /* Fan failure attention */
2682 bnx2x_hw_reset_phy(&bp->link_params);
2683 bnx2x_fan_failure(bp);
2686 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2688 bnx2x_acquire_phy_lock(bp);
2689 bnx2x_handle_module_detect_int(&bp->link_params);
2690 bnx2x_release_phy_lock(bp);
2693 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2697 REG_WR(bp, reg_offset, val);
2699 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2700 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2705 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2709 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2711 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2712 BNX2X_ERR("DB hw attention 0x%x\n", val);
2713 /* DORQ discard attention */
2715 BNX2X_ERR("FATAL error from DORQ\n");
2718 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2720 int port = BP_PORT(bp);
2723 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2726 val = REG_RD(bp, reg_offset);
2727 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2728 REG_WR(bp, reg_offset, val);
2730 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2731 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2736 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2740 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2742 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2743 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2744 /* CFC error attention */
2746 BNX2X_ERR("FATAL error from CFC\n");
2749 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2751 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2752 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2753 /* RQ_USDMDP_FIFO_OVERFLOW */
2755 BNX2X_ERR("FATAL error from PXP\n");
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2760 int port = BP_PORT(bp);
2763 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2764 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2766 val = REG_RD(bp, reg_offset);
2767 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2768 REG_WR(bp, reg_offset, val);
2770 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2771 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2776 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2780 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2782 if (attn & BNX2X_PMF_LINK_ASSERT) {
2783 int func = BP_FUNC(bp);
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2787 MF_CFG_RD(bp, func_mf_config[func].config);
2788 val = SHMEM_RD(bp, func_mb[func].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK)
2791 (val & DRV_STATUS_DCC_EVENT_MASK));
2792 bnx2x__link_status_update(bp);
2793 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2794 bnx2x_pmf_update(bp);
2796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ?
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2822 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ?
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2831 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2836 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2838 * should be run under rtnl lock
2840 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2850 * should be run under rtnl lock
2852 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2862 * should be run under rtnl lock
2864 bool bnx2x_reset_is_done(struct bnx2x *bp)
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2872 * should be run under rtnl lock
2874 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2887 * should be run under rtnl lock
2889 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2904 * should be run under rtnl lock
2906 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2911 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2917 static inline void _print_next_block(int idx, const char *blk)
2924 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2957 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
3008 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3048 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3078 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3107 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3109 struct attn_route attn;
3110 int port = BP_PORT(bp);
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3129 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3131 struct attn_route attn, *group_mask;
3132 int port = BP_PORT(bp);
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
3140 bnx2x_acquire_alr(bp);
3142 if (bnx2x_chk_parity_attn(bp)) {
3143 bp->recovery_state = BNX2X_RECOVERY_INIT;
3144 bnx2x_set_reset_in_progress(bp);
3145 schedule_delayed_work(&bp->reset_task, 0);
3146 /* Disable HW interrupts */
3147 bnx2x_int_disable(bp);
3148 bnx2x_release_alr(bp);
3149 /* In case of parity errors don't handle attentions so that
3150 * other function would "see" parity errors.
3155 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) {
3164 group_mask = &bp->attn_group[index];
3166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3167 index, group_mask->sig[0], group_mask->sig[1],
3168 group_mask->sig[2], group_mask->sig[3]);
3170 bnx2x_attn_int_deasserted3(bp,
3171 attn.sig[3] & group_mask->sig[3]);
3172 bnx2x_attn_int_deasserted1(bp,
3173 attn.sig[1] & group_mask->sig[1]);
3174 bnx2x_attn_int_deasserted2(bp,
3175 attn.sig[2] & group_mask->sig[2]);
3176 bnx2x_attn_int_deasserted0(bp,
3177 attn.sig[0] & group_mask->sig[0]);
3181 bnx2x_release_alr(bp);
3183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3188 REG_WR(bp, reg_addr, val);
3190 if (~bp->attn_state & deasserted)
3191 BNX2X_ERR("IGU ERROR\n");
3193 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3194 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3197 aeu_mask = REG_RD(bp, reg_addr);
3199 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3200 aeu_mask, deasserted);
3201 aeu_mask |= (deasserted & 0x3ff);
3202 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3204 REG_WR(bp, reg_addr, aeu_mask);
3205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3208 bp->attn_state &= ~deasserted;
3209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3212 static void bnx2x_attn_int(struct bnx2x *bp)
3214 /* read local copy of bits */
3215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3219 u32 attn_state = bp->attn_state;
3221 /* look for changed bits */
3222 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3223 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3226 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3227 attn_bits, attn_ack, asserted, deasserted);
3229 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3230 BNX2X_ERR("BAD attention state\n");
3232 /* handle bits that were raised */
3234 bnx2x_attn_int_asserted(bp, asserted);
3237 bnx2x_attn_int_deasserted(bp, deasserted);
3240 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3242 /* No memory barriers */
3243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3244 mmiowb(); /* keep prod updates ordered */
3248 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3249 union event_ring_elem *elem)
3251 if (!bp->cnic_eth_dev.starting_cid ||
3252 cid < bp->cnic_eth_dev.starting_cid)
3255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3257 if (unlikely(elem->message.data.cfc_del_event.error)) {
3258 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3260 bnx2x_panic_dump(bp);
3262 bnx2x_cnic_cfc_comp(bp, cid);
3267 static void bnx2x_eq_int(struct bnx2x *bp)
3269 u16 hw_cons, sw_cons, sw_prod;
3270 union event_ring_elem *elem;
3275 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3277 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3278 * when we get the the next-page we nned to adjust so the loop
3279 * condition below will be met. The next element is the size of a
3280 * regular element and hence incrementing by 1
3282 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3285 /* This function may never run in parralel with itself for a
3286 * specific bp, thus there is no need in "paired" read memory
3289 sw_cons = bp->eq_cons;
3290 sw_prod = bp->eq_prod;
3292 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3293 hw_cons, sw_cons, bp->spq_left);
3295 for (; sw_cons != hw_cons;
3296 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3299 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3301 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3302 opcode = elem->message.opcode;
3305 /* handle eq element */
3307 case EVENT_RING_OPCODE_STAT_QUERY:
3308 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3309 /* nothing to do with stats comp */
3312 case EVENT_RING_OPCODE_CFC_DEL:
3313 /* handle according to cid range */
3315 * we may want to verify here that the bp state is
3318 DP(NETIF_MSG_IFDOWN,
3319 "got delete ramrod for MULTI[%d]\n", cid);
3321 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3324 bnx2x_fp(bp, cid, state) =
3325 BNX2X_FP_STATE_CLOSED;
3330 switch (opcode | bp->state) {
3331 case (EVENT_RING_OPCODE_FUNCTION_START |
3332 BNX2X_STATE_OPENING_WAIT4_PORT):
3333 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3334 bp->state = BNX2X_STATE_FUNC_STARTED;
3337 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3338 BNX2X_STATE_CLOSING_WAIT4_HALT):
3339 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3340 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3343 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3344 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3345 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3346 bp->set_mac_pending = 0;
3349 case (EVENT_RING_OPCODE_SET_MAC |
3350 BNX2X_STATE_CLOSING_WAIT4_HALT):
3351 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3352 bp->set_mac_pending = 0;
3355 /* unknown event log error and continue */
3356 BNX2X_ERR("Unknown EQ event %d\n",
3357 elem->message.opcode);
3365 bp->eq_cons = sw_cons;
3366 bp->eq_prod = sw_prod;
3367 /* Make sure that above mem writes were issued towards the memory */
3370 /* update producer */
3371 bnx2x_update_eq_prod(bp, bp->eq_prod);
3374 static void bnx2x_sp_task(struct work_struct *work)
3376 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3379 /* Return here if interrupt is disabled */
3380 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3381 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3385 status = bnx2x_update_dsb_idx(bp);
3386 /* if (status == 0) */
3387 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3389 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3392 if (status & BNX2X_DEF_SB_ATT_IDX) {
3394 status &= ~BNX2X_DEF_SB_ATT_IDX;
3397 /* SP events: STAT_QUERY and others */
3398 if (status & BNX2X_DEF_SB_IDX) {
3400 /* Handle EQ completions */
3403 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3404 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3406 status &= ~BNX2X_DEF_SB_IDX;
3409 if (unlikely(status))
3410 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3413 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3414 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3417 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3419 struct net_device *dev = dev_instance;
3420 struct bnx2x *bp = netdev_priv(dev);
3422 /* Return here if interrupt is disabled */
3423 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3424 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3428 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3429 IGU_INT_DISABLE, 0);
3431 #ifdef BNX2X_STOP_ON_ERROR
3432 if (unlikely(bp->panic))
3438 struct cnic_ops *c_ops;
3441 c_ops = rcu_dereference(bp->cnic_ops);
3443 c_ops->cnic_handler(bp->cnic_data, NULL);
3447 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3452 /* end of slow path */
3454 static void bnx2x_timer(unsigned long data)
3456 struct bnx2x *bp = (struct bnx2x *) data;
3458 if (!netif_running(bp->dev))
3461 if (atomic_read(&bp->intr_sem) != 0)
3465 struct bnx2x_fastpath *fp = &bp->fp[0];
3469 rc = bnx2x_rx_int(fp, 1000);
3472 if (!BP_NOMCP(bp)) {
3473 int func = BP_FUNC(bp);
3477 ++bp->fw_drv_pulse_wr_seq;
3478 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3479 /* TBD - add SYSTEM_TIME */
3480 drv_pulse = bp->fw_drv_pulse_wr_seq;
3481 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3483 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3484 MCP_PULSE_SEQ_MASK);
3485 /* The delta between driver pulse and mcp response
3486 * should be 1 (before mcp response) or 0 (after mcp response)
3488 if ((drv_pulse != mcp_pulse) &&
3489 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3490 /* someone lost a heartbeat... */
3491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3492 drv_pulse, mcp_pulse);
3496 if (bp->state == BNX2X_STATE_OPEN)
3497 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3500 mod_timer(&bp->timer, jiffies + bp->current_interval);
3503 /* end of Statistics */
3508 * nic init service functions
3511 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3514 if (!(len%4) && !(addr%4))
3515 for (i = 0; i < len; i += 4)
3516 REG_WR(bp, addr + i, fill);
3518 for (i = 0; i < len; i++)
3519 REG_WR8(bp, addr + i, fill);
3523 /* helper: writes FP SP data to FW - data_size in dwords */
3524 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3530 for (index = 0; index < data_size; index++)
3531 REG_WR(bp, BAR_CSTRORM_INTMEM +
3532 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3534 *(sb_data_p + index));
3537 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3541 struct hc_status_block_data_e1x sb_data_e1x;
3543 /* disable the function first */
3544 memset(&sb_data_e1x, 0,
3545 sizeof(struct hc_status_block_data_e1x));
3546 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3547 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_valid = false;
3549 sb_data_p = (u32 *)&sb_data_e1x;
3550 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3552 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3554 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3555 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3556 CSTORM_STATUS_BLOCK_SIZE);
3557 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3558 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3559 CSTORM_SYNC_BLOCK_SIZE);
3562 /* helper: writes SP SB data to FW */
3563 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3564 struct hc_sp_status_block_data *sp_sb_data)
3566 int func = BP_FUNC(bp);
3568 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3569 REG_WR(bp, BAR_CSTRORM_INTMEM +
3570 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3572 *((u32 *)sp_sb_data + i));
3575 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3577 int func = BP_FUNC(bp);
3578 struct hc_sp_status_block_data sp_sb_data;
3579 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3581 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3582 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3583 sp_sb_data.p_func.vf_valid = false;
3585 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3587 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3588 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3589 CSTORM_SP_STATUS_BLOCK_SIZE);
3590 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3591 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3592 CSTORM_SP_SYNC_BLOCK_SIZE);
3598 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3599 int igu_sb_id, int igu_seg_id)
3601 hc_sm->igu_sb_id = igu_sb_id;
3602 hc_sm->igu_seg_id = igu_seg_id;
3603 hc_sm->timer_value = 0xFF;
3604 hc_sm->time_to_expire = 0xFFFFFFFF;
3607 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3608 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3612 struct hc_status_block_data_e1x sb_data_e1x;
3613 struct hc_status_block_sm *hc_sm_p;
3614 struct hc_index_data *hc_index_p;
3618 igu_seg_id = HC_SEG_ACCESS_NORM;
3620 bnx2x_zero_fp_sb(bp, fw_sb_id);
3622 memset(&sb_data_e1x, 0,
3623 sizeof(struct hc_status_block_data_e1x));
3624 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3625 sb_data_e1x.common.p_func.vf_id = 0xff;
3626 sb_data_e1x.common.p_func.vf_valid = false;
3627 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
3628 sb_data_e1x.common.same_igu_sb_1b = true;
3629 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3630 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3631 hc_sm_p = sb_data_e1x.common.state_machine;
3632 hc_index_p = sb_data_e1x.index_data;
3633 sb_data_p = (u32 *)&sb_data_e1x;
3634 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3637 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3638 igu_sb_id, igu_seg_id);
3639 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3640 igu_sb_id, igu_seg_id);
3642 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3644 /* write indecies to HW */
3645 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3648 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3649 u8 sb_index, u8 disable, u16 usec)
3651 int port = BP_PORT(bp);
3652 u8 ticks = usec / BNX2X_BTR;
3654 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3656 disable = disable ? 1 : (usec ? 0 : 1);
3657 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3660 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3661 u16 tx_usec, u16 rx_usec)
3663 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3665 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3668 static void bnx2x_init_def_sb(struct bnx2x *bp)
3670 struct host_sp_status_block *def_sb = bp->def_status_blk;
3671 dma_addr_t mapping = bp->def_status_blk_mapping;
3672 int igu_sp_sb_index;
3674 int port = BP_PORT(bp);
3675 int func = BP_FUNC(bp);
3679 struct hc_sp_status_block_data sp_sb_data;
3680 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3682 igu_sp_sb_index = DEF_SB_IGU_ID;
3683 igu_seg_id = HC_SEG_ACCESS_DEF;
3686 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3687 atten_status_block);
3688 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
3692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3696 /* take care of sig[0]..sig[4] */
3697 for (sindex = 0; sindex < 4; sindex++)
3698 bp->attn_group[index].sig[sindex] =
3699 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
3702 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3703 HC_REG_ATTN_MSG0_ADDR_L);
3704 REG_WR(bp, reg_offset, U64_LO(section));
3705 REG_WR(bp, reg_offset + 4, U64_HI(section));
3707 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3710 bnx2x_zero_sp_sb(bp);
3712 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3713 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3714 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3715 sp_sb_data.igu_seg_id = igu_seg_id;
3716 sp_sb_data.p_func.pf_id = func;
3717 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
3718 sp_sb_data.p_func.vf_id = 0xff;
3720 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3722 bp->stats_pending = 0;
3723 bp->set_mac_pending = 0;
3725 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
3728 void bnx2x_update_coalesce(struct bnx2x *bp)
3732 for_each_queue(bp, i)
3733 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
3734 bp->rx_ticks, bp->tx_ticks);
3737 static void bnx2x_init_sp_ring(struct bnx2x *bp)
3739 spin_lock_init(&bp->spq_lock);
3741 bp->spq_left = MAX_SPQ_PENDING;
3742 bp->spq_prod_idx = 0;
3743 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3744 bp->spq_prod_bd = bp->spq;
3745 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3748 static void bnx2x_init_eq_ring(struct bnx2x *bp)
3751 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3752 union event_ring_elem *elem =
3753 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
3755 elem->next_page.addr.hi =
3756 cpu_to_le32(U64_HI(bp->eq_mapping +
3757 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3758 elem->next_page.addr.lo =
3759 cpu_to_le32(U64_LO(bp->eq_mapping +
3760 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
3763 bp->eq_prod = NUM_EQ_DESC;
3764 bp->eq_cons_sb = BNX2X_EQ_INDEX;
3767 static void bnx2x_init_ind_table(struct bnx2x *bp)
3769 int func = BP_FUNC(bp);
3772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3780 bp->fp->cl_id + (i % bp->num_queues));
3783 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3785 int mode = bp->rx_mode;
3788 /* All but management unicast packets should pass to the host as well */
3790 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3791 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3792 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3793 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3796 case BNX2X_RX_MODE_NONE: /* no Rx */
3797 cl_id = BP_L_ID(bp);
3798 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
3801 case BNX2X_RX_MODE_NORMAL:
3802 cl_id = BP_L_ID(bp);
3803 bnx2x_rxq_set_mac_filters(bp, cl_id,
3804 BNX2X_ACCEPT_UNICAST |
3805 BNX2X_ACCEPT_BROADCAST |
3806 BNX2X_ACCEPT_MULTICAST);
3809 case BNX2X_RX_MODE_ALLMULTI:
3810 cl_id = BP_L_ID(bp);
3811 bnx2x_rxq_set_mac_filters(bp, cl_id,
3812 BNX2X_ACCEPT_UNICAST |
3813 BNX2X_ACCEPT_BROADCAST |
3814 BNX2X_ACCEPT_ALL_MULTICAST);
3817 case BNX2X_RX_MODE_PROMISC:
3818 cl_id = BP_L_ID(bp);
3819 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3821 /* pass management unicast packets as well */
3822 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3826 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3831 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
3832 NIG_REG_LLH0_BRB1_DRV_MASK,
3835 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3836 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3837 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3838 bp->mac_filters.ucast_drop_all,
3839 bp->mac_filters.mcast_drop_all,
3840 bp->mac_filters.bcast_drop_all,
3841 bp->mac_filters.ucast_accept_all,
3842 bp->mac_filters.mcast_accept_all,
3843 bp->mac_filters.bcast_accept_all
3846 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
3849 static void bnx2x_init_internal_common(struct bnx2x *bp)
3853 if (!CHIP_IS_E1(bp)) {
3855 /* xstorm needs to know whether to add ovlan to packets or not,
3856 * in switch-independent we'll write 0 to here... */
3857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3861 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3863 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3867 /* Zero this manually as its initialization is
3868 currently missing in the initTool */
3869 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3870 REG_WR(bp, BAR_USTRORM_INTMEM +
3871 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3874 static void bnx2x_init_internal_port(struct bnx2x *bp)
3879 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3881 switch (load_code) {
3882 case FW_MSG_CODE_DRV_LOAD_COMMON:
3883 bnx2x_init_internal_common(bp);
3886 case FW_MSG_CODE_DRV_LOAD_PORT:
3887 bnx2x_init_internal_port(bp);
3890 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3891 /* internal memory per function is
3892 initialized inside bnx2x_pf_init */
3896 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3901 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3905 fp->state = BNX2X_FP_STATE_CLOSED;
3907 fp->index = fp->cid = fp_idx;
3908 fp->cl_id = BP_L_ID(bp) + fp_idx;
3909 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3910 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3911 /* qZone id equals to FW (per path) client id */
3912 fp->cl_qzone_id = fp->cl_id +
3913 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
3915 fp->ustorm_rx_prods_offset =
3916 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3917 /* Setup SB indicies */
3918 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3919 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3921 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
3922 "cl_id %d fw_sb %d igu_sb %d\n",
3923 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
3925 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
3926 fp->fw_sb_id, fp->igu_sb_id);
3928 bnx2x_update_fpsb_idx(fp);
3931 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3935 for_each_queue(bp, i)
3936 bnx2x_init_fp_sb(bp, i);
3939 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3940 BNX2X_VF_ID_INVALID, false,
3941 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
3945 /* ensure status block indices were read */
3948 bnx2x_init_def_sb(bp);
3949 bnx2x_update_dsb_idx(bp);
3950 bnx2x_init_rx_rings(bp);
3951 bnx2x_init_tx_rings(bp);
3952 bnx2x_init_sp_ring(bp);
3953 bnx2x_init_eq_ring(bp);
3954 bnx2x_init_internal(bp, load_code);
3956 bnx2x_init_ind_table(bp);
3957 bnx2x_stats_init(bp);
3959 /* At this point, we are ready for interrupts */
3960 atomic_set(&bp->intr_sem, 0);
3962 /* flush all before enabling interrupts */
3966 bnx2x_int_enable(bp);
3968 /* Check for SPIO5 */
3969 bnx2x_attn_int_deasserted0(bp,
3970 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3971 AEU_INPUTS_ATTN_BITS_SPIO5);
3974 /* end of nic init */
3977 * gzip service functions
3980 static int bnx2x_gunzip_init(struct bnx2x *bp)
3982 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3983 &bp->gunzip_mapping, GFP_KERNEL);
3984 if (bp->gunzip_buf == NULL)
3987 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3988 if (bp->strm == NULL)
3991 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3993 if (bp->strm->workspace == NULL)
4003 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4004 bp->gunzip_mapping);
4005 bp->gunzip_buf = NULL;
4008 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4009 " un-compression\n");
4013 static void bnx2x_gunzip_end(struct bnx2x *bp)
4015 kfree(bp->strm->workspace);
4020 if (bp->gunzip_buf) {
4021 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4022 bp->gunzip_mapping);
4023 bp->gunzip_buf = NULL;
4027 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4031 /* check gzip header */
4032 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4033 BNX2X_ERR("Bad gzip header\n");
4041 if (zbuf[3] & FNAME)
4042 while ((zbuf[n++] != 0) && (n < len));
4044 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4045 bp->strm->avail_in = len - n;
4046 bp->strm->next_out = bp->gunzip_buf;
4047 bp->strm->avail_out = FW_BUF_SIZE;
4049 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4053 rc = zlib_inflate(bp->strm, Z_FINISH);
4054 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4055 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4058 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4059 if (bp->gunzip_outlen & 0x3)
4060 netdev_err(bp->dev, "Firmware decompression error:"
4061 " gunzip_outlen (%d) not aligned\n",
4063 bp->gunzip_outlen >>= 2;
4065 zlib_inflateEnd(bp->strm);
4067 if (rc == Z_STREAM_END)
4073 /* nic load/unload */
4076 * General service functions
4079 /* send a NIG loopback debug packet */
4080 static void bnx2x_lb_pckt(struct bnx2x *bp)
4084 /* Ethernet source and destination addresses */
4085 wb_write[0] = 0x55555555;
4086 wb_write[1] = 0x55555555;
4087 wb_write[2] = 0x20; /* SOP */
4088 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4090 /* NON-IP protocol */
4091 wb_write[0] = 0x09000000;
4092 wb_write[1] = 0x55555555;
4093 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4094 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4097 /* some of the internal memories
4098 * are not directly readable from the driver
4099 * to test them we send debug packets
4101 static int bnx2x_int_mem_test(struct bnx2x *bp)
4107 if (CHIP_REV_IS_FPGA(bp))
4109 else if (CHIP_REV_IS_EMUL(bp))
4114 /* Disable inputs of parser neighbor blocks */
4115 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4116 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4117 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4118 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4120 /* Write 0 to parser credits for CFC search request */
4121 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4123 /* send Ethernet packet */
4126 /* TODO do i reset NIG statistic? */
4127 /* Wait until NIG register shows 1 packet of size 0x10 */
4128 count = 1000 * factor;
4131 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4132 val = *bnx2x_sp(bp, wb_data[0]);
4140 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4144 /* Wait until PRS register shows 1 packet */
4145 count = 1000 * factor;
4147 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4155 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4159 /* Reset and init BRB, PRS */
4160 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4164 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4165 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4167 DP(NETIF_MSG_HW, "part2\n");
4169 /* Disable inputs of parser neighbor blocks */
4170 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4171 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4172 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4173 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4175 /* Write 0 to parser credits for CFC search request */
4176 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4178 /* send 10 Ethernet packets */
4179 for (i = 0; i < 10; i++)
4182 /* Wait until NIG register shows 10 + 1
4183 packets of size 11*0x10 = 0xb0 */
4184 count = 1000 * factor;
4187 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4188 val = *bnx2x_sp(bp, wb_data[0]);
4196 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4200 /* Wait until PRS register shows 2 packets */
4201 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4203 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4205 /* Write 1 to parser credits for CFC search request */
4206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4208 /* Wait until PRS register shows 3 packets */
4209 msleep(10 * factor);
4210 /* Wait until NIG register shows 1 packet of size 0x10 */
4211 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4215 /* clear NIG EOP FIFO */
4216 for (i = 0; i < 11; i++)
4217 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4218 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4220 BNX2X_ERR("clear of NIG failed\n");
4224 /* Reset and init BRB, PRS, NIG */
4225 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4229 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4230 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4233 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4236 /* Enable inputs of parser neighbor blocks */
4237 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4238 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4239 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4240 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4242 DP(NETIF_MSG_HW, "done\n");
4247 static void enable_blocks_attention(struct bnx2x *bp)
4249 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4251 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4253 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4254 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4255 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4257 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4258 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4259 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4260 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4261 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4262 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4263 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4264 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4265 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4266 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4267 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4268 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4269 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4270 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4271 if (CHIP_REV_IS_FPGA(bp))
4272 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4274 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4275 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4277 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4278 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4279 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4280 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4281 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4282 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4283 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
4286 static const struct {
4289 } bnx2x_parity_mask[] = {
4290 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
4291 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
4293 {HC_REG_HC_PRTY_MASK, 0xffffffff},
4294 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
4295 {QM_REG_QM_PRTY_MASK, 0x0},
4296 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4297 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4298 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4299 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4300 {CDU_REG_CDU_PRTY_MASK, 0x0},
4301 {CFC_REG_CFC_PRTY_MASK, 0x0},
4302 {DBG_REG_DBG_PRTY_MASK, 0x0},
4303 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4304 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4305 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4306 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4307 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4308 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4309 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4310 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4311 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4312 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4313 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4314 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4315 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4316 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4317 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4320 static void enable_blocks_parity(struct bnx2x *bp)
4324 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4325 REG_WR(bp, bnx2x_parity_mask[i].addr,
4326 bnx2x_parity_mask[i].mask);
4330 static void bnx2x_reset_common(struct bnx2x *bp)
4333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4338 static void bnx2x_init_pxp(struct bnx2x *bp)
4341 int r_order, w_order;
4343 pci_read_config_word(bp->pdev,
4344 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4345 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4346 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4348 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4350 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4354 bnx2x_init_pxp_arb(bp, r_order, w_order);
4357 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4367 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4368 SHARED_HW_CFG_FAN_FAILURE_MASK;
4370 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4374 * The fan failure mechanism is usually related to the PHY type since
4375 * the power consumption of the board is affected by the PHY. Currently,
4376 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4378 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4379 for (port = PORT_0; port < PORT_MAX; port++) {
4381 bnx2x_fan_failure_det_req(
4383 bp->common.shmem_base,
4384 bp->common.shmem2_base,
4388 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4390 if (is_required == 0)
4393 /* Fan failure is indicated by SPIO 5 */
4394 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4395 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4397 /* set to active low mode */
4398 val = REG_RD(bp, MISC_REG_SPIO_INT);
4399 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4400 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4401 REG_WR(bp, MISC_REG_SPIO_INT, val);
4403 /* enable interrupt to signal the IGU */
4404 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4405 val |= (1 << MISC_REGISTERS_SPIO_5);
4406 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4409 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4413 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
4415 bnx2x_reset_common(bp);
4416 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4419 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4420 if (CHIP_IS_E1H(bp))
4421 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4423 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4425 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
4427 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4428 if (CHIP_IS_E1(bp)) {
4429 /* enable HW interrupt from PXP on USDM overflow
4430 bit 16 on INT_MASK_0 */
4431 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4434 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4438 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4439 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4441 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4442 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4443 /* make sure this value is 0 */
4444 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4446 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4447 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4448 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4449 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4450 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4453 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4456 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4457 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4459 /* let the HW do it's magic ... */
4461 /* finish PXP init */
4462 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4464 BNX2X_ERR("PXP2 CFG failed\n");
4467 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4469 BNX2X_ERR("PXP2 RD_INIT failed\n");
4473 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4474 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4476 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4478 /* clean the DMAE memory */
4480 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4482 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4483 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4484 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4485 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4487 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4488 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4489 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4490 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4492 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4494 /* QM queues pointers table */
4495 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4497 /* soft reset pulse */
4498 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4499 REG_WR(bp, QM_REG_SOFT_RESET, 0);
4502 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4505 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4506 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4508 if (!CHIP_REV_IS_SLOW(bp)) {
4509 /* enable hw interrupt from doorbell Q */
4510 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4513 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4514 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4515 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4518 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4520 if (CHIP_IS_E1H(bp))
4521 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
4523 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4524 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4526 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4528 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4529 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4530 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4531 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4533 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4534 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4535 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4536 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4539 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4541 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4544 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4545 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4546 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4548 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4549 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4550 REG_WR(bp, i, random32());
4551 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4553 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4554 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4555 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4556 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4557 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4558 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4559 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4560 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4561 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4562 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4564 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4566 if (sizeof(union cdu_context) != 1024)
4567 /* we currently assume that a context is 1024 bytes */
4568 dev_alert(&bp->pdev->dev, "please adjust the size "
4569 "of cdu_context(%ld)\n",
4570 (long)sizeof(union cdu_context));
4572 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4573 val = (4 << 24) + (0 << 12) + 1024;
4574 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4576 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4577 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4578 /* enable context validation interrupt from CFC */
4579 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4581 /* set the thresholds to prevent CFC/CDU race */
4582 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4584 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4585 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4587 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4588 /* Reset PCIE errors for debug */
4589 REG_WR(bp, 0x2814, 0xffffffff);
4590 REG_WR(bp, 0x3820, 0xffffffff);
4592 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4594 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4595 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4597 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4598 if (CHIP_IS_E1H(bp)) {
4599 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4600 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
4603 if (CHIP_REV_IS_SLOW(bp))
4606 /* finish CFC init */
4607 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4609 BNX2X_ERR("CFC LL_INIT failed\n");
4612 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4614 BNX2X_ERR("CFC AC_INIT failed\n");
4617 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4619 BNX2X_ERR("CFC CAM_INIT failed\n");
4622 REG_WR(bp, CFC_REG_DEBUG0, 0);
4624 /* read NIG statistic
4625 to see if this is our first up since powerup */
4626 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4627 val = *bnx2x_sp(bp, wb_data[0]);
4629 /* do internal memory self test */
4630 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4631 BNX2X_ERR("internal mem self test failed\n");
4635 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4636 bp->common.shmem_base,
4637 bp->common.shmem2_base);
4639 bnx2x_setup_fan_failure_detection(bp);
4641 /* clear PXP2 attentions */
4642 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4644 enable_blocks_attention(bp);
4645 if (CHIP_PARITY_SUPPORTED(bp))
4646 enable_blocks_parity(bp);
4648 if (!BP_NOMCP(bp)) {
4649 bnx2x_acquire_phy_lock(bp);
4650 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4651 bp->common.shmem2_base);
4652 bnx2x_release_phy_lock(bp);
4654 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4659 static int bnx2x_init_hw_port(struct bnx2x *bp)
4661 int port = BP_PORT(bp);
4662 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4666 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4668 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4670 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4671 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4673 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4674 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4676 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4678 /* QM cid (connection) count */
4679 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
4682 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4683 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4684 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4687 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4689 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4690 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4691 /* no pause for emulation and FPGA */
4696 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4697 else if (bp->dev->mtu > 4096) {
4698 if (bp->flags & ONE_PORT_FLAG)
4702 /* (24*1024 + val*4)/256 */
4703 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4706 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4707 high = low + 56; /* 14*1024/256 */
4709 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4710 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4713 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4715 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4716 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4717 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4718 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4720 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4721 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4722 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4725 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4726 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4728 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4730 /* configure PBF to work without PAUSE mtu 9000 */
4731 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4733 /* update threshold */
4734 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4735 /* update init credit */
4736 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4739 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4741 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4744 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4746 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4747 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4749 if (CHIP_IS_E1(bp)) {
4750 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4751 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4753 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4755 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4756 /* init aeu_mask_attn_func_0/1:
4757 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4758 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4759 * bits 4-7 are used for "per vn group attention" */
4760 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4761 (IS_MF(bp) ? 0xF7 : 0x7));
4763 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4764 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4765 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4766 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4767 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4769 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4771 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4773 if (CHIP_IS_E1H(bp)) {
4774 /* 0x2 disable mf_ov, 0x1 enable */
4775 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4776 (IS_MF(bp) ? 0x1 : 0x2));
4779 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4780 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4785 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4786 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4787 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4788 bp->common.shmem_base,
4789 bp->common.shmem2_base);
4790 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4791 bp->common.shmem2_base, port)) {
4792 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4793 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4794 val = REG_RD(bp, reg_addr);
4795 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4796 REG_WR(bp, reg_addr, val);
4798 bnx2x__link_reset(bp);
4803 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4807 if (CHIP_IS_E1H(bp))
4808 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4810 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4812 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4815 static int bnx2x_init_hw_func(struct bnx2x *bp)
4817 int port = BP_PORT(bp);
4818 int func = BP_FUNC(bp);
4819 struct bnx2x_ilt *ilt = BP_ILT(bp);
4824 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4826 /* set MSI reconfigure capability */
4827 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4828 val = REG_RD(bp, addr);
4829 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4830 REG_WR(bp, addr, val);
4833 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
4835 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4836 ilt->lines[cdu_ilt_start + i].page =
4837 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4838 ilt->lines[cdu_ilt_start + i].page_mapping =
4839 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4840 /* cdu ilt pages are allocated manually so there's no need to
4843 bnx2x_ilt_init_op(bp, INITOP_SET);
4845 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
4847 /* T1 hash bits value determines the T1 number of entries */
4848 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
4853 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4854 #endif /* BCM_CNIC */
4858 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4860 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4861 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4864 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4865 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4866 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4867 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4868 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4870 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4871 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4872 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
4875 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
4876 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
4877 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
4878 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
4879 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4880 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4884 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4887 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4888 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
4891 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4893 /* HC init per function */
4894 if (CHIP_IS_E1H(bp)) {
4895 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4897 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4898 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4900 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4902 /* Reset PCIE errors for debug */
4903 REG_WR(bp, 0x2114, 0xffffffff);
4904 REG_WR(bp, 0x2120, 0xffffffff);
4906 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
4907 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
4908 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
4909 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
4910 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
4911 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
4913 bnx2x_phy_probe(&bp->link_params);
4917 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4921 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4922 BP_FUNC(bp), load_code);
4925 mutex_init(&bp->dmae_mutex);
4926 rc = bnx2x_gunzip_init(bp);
4930 switch (load_code) {
4931 case FW_MSG_CODE_DRV_LOAD_COMMON:
4932 rc = bnx2x_init_hw_common(bp, load_code);
4937 case FW_MSG_CODE_DRV_LOAD_PORT:
4938 rc = bnx2x_init_hw_port(bp);
4943 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4944 rc = bnx2x_init_hw_func(bp);
4950 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4954 if (!BP_NOMCP(bp)) {
4955 int func = BP_FUNC(bp);
4957 bp->fw_drv_pulse_wr_seq =
4958 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4959 DRV_PULSE_SEQ_MASK);
4960 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4964 bnx2x_gunzip_end(bp);
4969 void bnx2x_free_mem(struct bnx2x *bp)
4972 #define BNX2X_PCI_FREE(x, y, size) \
4975 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
4981 #define BNX2X_FREE(x) \
4993 for_each_queue(bp, i) {
4995 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
4996 bnx2x_fp(bp, i, status_blk_mapping),
4997 sizeof(struct host_hc_status_block_e1x));
5000 for_each_queue(bp, i) {
5002 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5003 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5004 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5005 bnx2x_fp(bp, i, rx_desc_mapping),
5006 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5008 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5009 bnx2x_fp(bp, i, rx_comp_mapping),
5010 sizeof(struct eth_fast_path_rx_cqe) *
5014 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5015 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5016 bnx2x_fp(bp, i, rx_sge_mapping),
5017 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5020 for_each_queue(bp, i) {
5022 /* fastpath tx rings: tx_buf tx_desc */
5023 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5024 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5025 bnx2x_fp(bp, i, tx_desc_mapping),
5026 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5028 /* end of fastpath */
5030 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5031 sizeof(struct host_sp_status_block));
5033 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5034 sizeof(struct bnx2x_slowpath));
5036 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5039 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5041 BNX2X_FREE(bp->ilt->lines);
5044 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5045 sizeof(struct host_hc_status_block_e1x));
5046 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5048 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5050 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5051 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5053 #undef BNX2X_PCI_FREE
5057 int bnx2x_alloc_mem(struct bnx2x *bp)
5060 #define BNX2X_PCI_ALLOC(x, y, size) \
5062 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5064 goto alloc_mem_err; \
5065 memset(x, 0, size); \
5068 #define BNX2X_ALLOC(x, size) \
5070 x = kzalloc(size, GFP_KERNEL); \
5072 goto alloc_mem_err; \
5080 for_each_queue(bp, i) {
5081 bnx2x_fp(bp, i, bp) = bp;
5085 &bnx2x_fp(bp, i, status_blk_mapping),
5086 sizeof(struct host_hc_status_block_e1x));
5088 bnx2x_fp(bp, i, status_blk.e1x_sb) =
5089 (struct host_hc_status_block_e1x *)p;
5091 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5092 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5093 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5094 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
5097 for_each_queue(bp, i) {
5099 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5100 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5101 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5102 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5103 &bnx2x_fp(bp, i, rx_desc_mapping),
5104 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5106 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5107 &bnx2x_fp(bp, i, rx_comp_mapping),
5108 sizeof(struct eth_fast_path_rx_cqe) *
5112 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5113 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5114 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5115 &bnx2x_fp(bp, i, rx_sge_mapping),
5116 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5119 for_each_queue(bp, i) {
5121 /* fastpath tx rings: tx_buf tx_desc */
5122 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5123 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5124 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5125 &bnx2x_fp(bp, i, tx_desc_mapping),
5126 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5128 /* end of fastpath */
5131 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5132 sizeof(struct host_hc_status_block_e1x));
5134 /* allocate searcher T2 table */
5135 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5139 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5140 sizeof(struct host_sp_status_block));
5142 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5143 sizeof(struct bnx2x_slowpath));
5145 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5146 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5149 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5151 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5154 /* Slow path ring */
5155 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5158 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5159 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5166 #undef BNX2X_PCI_ALLOC
5171 * Init service functions
5173 int bnx2x_func_start(struct bnx2x *bp)
5175 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
5177 /* Wait for completion */
5178 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5179 WAIT_RAMROD_COMMON);
5182 int bnx2x_func_stop(struct bnx2x *bp)
5184 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
5186 /* Wait for completion */
5187 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5188 0, &(bp->state), WAIT_RAMROD_COMMON);
5192 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
5194 * @param bp driver descriptor
5195 * @param set set or clear an entry (1 or 0)
5196 * @param mac pointer to a buffer containing a MAC
5197 * @param cl_bit_vec bit vector of clients to register a MAC for
5198 * @param cam_offset offset in a CAM to use
5199 * @param is_bcast is the set MAC a broadcast address (for E1 only)
5201 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5202 u32 cl_bit_vec, u8 cam_offset,
5205 struct mac_configuration_cmd *config =
5206 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5207 int ramrod_flags = WAIT_RAMROD_COMMON;
5209 bp->set_mac_pending = 1;
5212 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5213 config->hdr.offset = cam_offset;
5214 config->hdr.client_id = 0xff;
5215 config->hdr.reserved1 = 0;
5217 config->hdr.length = 1;
5218 config->hdr.offset = cam_offset;
5219 config->hdr.client_id = 0xff;
5220 config->hdr.reserved1 = 0;
5223 config->config_table[0].msb_mac_addr =
5224 swab16(*(u16 *)&mac[0]);
5225 config->config_table[0].middle_mac_addr =
5226 swab16(*(u16 *)&mac[2]);
5227 config->config_table[0].lsb_mac_addr =
5228 swab16(*(u16 *)&mac[4]);
5229 config->config_table[0].clients_bit_vector =
5230 cpu_to_le32(cl_bit_vec);
5231 config->config_table[0].vlan_id = 0;
5232 config->config_table[0].pf_id = BP_FUNC(bp);
5234 SET_FLAG(config->config_table[0].flags,
5235 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5236 T_ETH_MAC_COMMAND_SET);
5238 SET_FLAG(config->config_table[0].flags,
5239 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5240 T_ETH_MAC_COMMAND_INVALIDATE);
5243 SET_FLAG(config->config_table[0].flags,
5244 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
5246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
5247 (set ? "setting" : "clearing"),
5248 config->config_table[0].msb_mac_addr,
5249 config->config_table[0].middle_mac_addr,
5250 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
5252 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
5254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
5256 /* Wait for a completion */
5257 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
5261 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5262 int *state_p, int flags)
5264 /* can take a while if any port is running */
5266 u8 poll = flags & WAIT_RAMROD_POLL;
5267 u8 common = flags & WAIT_RAMROD_COMMON;
5269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5270 poll ? "polling" : "waiting", state, idx);
5278 bnx2x_rx_int(bp->fp, 10);
5279 /* if index is different from 0
5280 * the reply for some commands will
5281 * be on the non default queue
5284 bnx2x_rx_int(&bp->fp[idx], 10);
5288 mb(); /* state is changed by bnx2x_sp_event() */
5289 if (*state_p == state) {
5290 #ifdef BNX2X_STOP_ON_ERROR
5291 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5303 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5304 poll ? "polling" : "waiting", state, idx);
5305 #ifdef BNX2X_STOP_ON_ERROR
5312 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
5314 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
5317 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
5319 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5320 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
5322 /* networking MAC */
5323 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5324 (1 << bp->fp->cl_id), cam_offset , 0);
5326 if (CHIP_IS_E1(bp)) {
5328 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5329 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5332 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
5335 struct net_device *dev = bp->dev;
5336 struct netdev_hw_addr *ha;
5337 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5338 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5340 netdev_for_each_mc_addr(ha, dev) {
5342 config_cmd->config_table[i].msb_mac_addr =
5343 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
5344 config_cmd->config_table[i].middle_mac_addr =
5345 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
5346 config_cmd->config_table[i].lsb_mac_addr =
5347 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
5349 config_cmd->config_table[i].vlan_id = 0;
5350 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
5351 config_cmd->config_table[i].clients_bit_vector =
5352 cpu_to_le32(1 << BP_L_ID(bp));
5354 SET_FLAG(config_cmd->config_table[i].flags,
5355 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5356 T_ETH_MAC_COMMAND_SET);
5359 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
5360 config_cmd->config_table[i].msb_mac_addr,
5361 config_cmd->config_table[i].middle_mac_addr,
5362 config_cmd->config_table[i].lsb_mac_addr);
5365 old = config_cmd->hdr.length;
5367 for (; i < old; i++) {
5368 if (CAM_IS_INVALID(config_cmd->
5370 /* already invalidated */
5374 SET_FLAG(config_cmd->config_table[i].flags,
5375 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5376 T_ETH_MAC_COMMAND_INVALIDATE);
5380 config_cmd->hdr.length = i;
5381 config_cmd->hdr.offset = offset;
5382 config_cmd->hdr.client_id = 0xff;
5383 config_cmd->hdr.reserved1 = 0;
5385 bp->set_mac_pending = 1;
5388 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5389 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5391 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
5394 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5395 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5396 int ramrod_flags = WAIT_RAMROD_COMMON;
5398 bp->set_mac_pending = 1;
5401 for (i = 0; i < config_cmd->hdr.length; i++)
5402 SET_FLAG(config_cmd->config_table[i].flags,
5403 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5404 T_ETH_MAC_COMMAND_INVALIDATE);
5406 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5407 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5409 /* Wait for a completion */
5410 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
5418 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
5419 * MAC(s). This function will wait until the ramdord completion
5422 * @param bp driver handle
5423 * @param set set or clear the CAM entry
5425 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
5427 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
5429 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5430 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
5431 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
5432 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
5434 /* Send a SET_MAC ramrod */
5435 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
5441 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
5442 struct bnx2x_client_init_params *params,
5444 struct client_init_ramrod_data *data)
5446 /* Clear the buffer */
5447 memset(data, 0, sizeof(*data));
5450 data->general.client_id = params->rxq_params.cl_id;
5451 data->general.statistics_counter_id = params->rxq_params.stat_id;
5452 data->general.statistics_en_flg =
5453 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
5454 data->general.activate_flg = activate;
5455 data->general.sp_client_id = params->rxq_params.spcl_id;
5458 data->rx.tpa_en_flg =
5459 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
5460 data->rx.vmqueue_mode_en_flg = 0;
5461 data->rx.cache_line_alignment_log_size =
5462 params->rxq_params.cache_line_log;
5463 data->rx.enable_dynamic_hc =
5464 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
5465 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
5466 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
5467 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
5469 /* We don't set drop flags */
5470 data->rx.drop_ip_cs_err_flg = 0;
5471 data->rx.drop_tcp_cs_err_flg = 0;
5472 data->rx.drop_ttl0_flg = 0;
5473 data->rx.drop_udp_cs_err_flg = 0;
5475 data->rx.inner_vlan_removal_enable_flg =
5476 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
5477 data->rx.outer_vlan_removal_enable_flg =
5478 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
5479 data->rx.status_block_id = params->rxq_params.fw_sb_id;
5480 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
5481 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
5482 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
5483 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
5484 data->rx.bd_page_base.lo =
5485 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
5486 data->rx.bd_page_base.hi =
5487 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
5488 data->rx.sge_page_base.lo =
5489 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
5490 data->rx.sge_page_base.hi =
5491 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
5492 data->rx.cqe_page_base.lo =
5493 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
5494 data->rx.cqe_page_base.hi =
5495 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
5496 data->rx.is_leading_rss =
5497 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
5498 data->rx.is_approx_mcast = data->rx.is_leading_rss;
5501 data->tx.enforce_security_flg = 0; /* VF specific */
5502 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
5503 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
5504 data->tx.mtu = 0; /* VF specific */
5505 data->tx.tx_bd_page_base.lo =
5506 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
5507 data->tx.tx_bd_page_base.hi =
5508 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
5510 /* flow control data */
5511 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
5512 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
5513 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
5514 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
5515 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
5516 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
5517 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
5519 data->fc.safc_group_num = params->txq_params.cos;
5520 data->fc.safc_group_en_flg =
5521 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
5522 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
5525 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
5527 /* ustorm cxt validation */
5528 cxt->ustorm_ag_context.cdu_usage =
5529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
5530 ETH_CONNECTION_TYPE);
5531 /* xcontext validation */
5532 cxt->xstorm_ag_context.cdu_reserved =
5533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
5534 ETH_CONNECTION_TYPE);
5537 int bnx2x_setup_fw_client(struct bnx2x *bp,
5538 struct bnx2x_client_init_params *params,
5540 struct client_init_ramrod_data *data,
5541 dma_addr_t data_mapping)
5544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5545 int ramrod_flags = 0, rc;
5547 /* HC and context validation values */
5548 hc_usec = params->txq_params.hc_rate ?
5549 1000000 / params->txq_params.hc_rate : 0;
5550 bnx2x_update_coalesce_sb_index(bp,
5551 params->txq_params.fw_sb_id,
5552 params->txq_params.sb_cq_index,
5553 !(params->txq_params.flags & QUEUE_FLG_HC),
5556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
5558 hc_usec = params->rxq_params.hc_rate ?
5559 1000000 / params->rxq_params.hc_rate : 0;
5560 bnx2x_update_coalesce_sb_index(bp,
5561 params->rxq_params.fw_sb_id,
5562 params->rxq_params.sb_cq_index,
5563 !(params->rxq_params.flags & QUEUE_FLG_HC),
5566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
5567 params->rxq_params.cid);
5570 if (params->txq_params.flags & QUEUE_FLG_STATS)
5571 storm_memset_xstats_zero(bp, BP_PORT(bp),
5572 params->txq_params.stat_id);
5574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
5575 storm_memset_ustats_zero(bp, BP_PORT(bp),
5576 params->rxq_params.stat_id);
5577 storm_memset_tstats_zero(bp, BP_PORT(bp),
5578 params->rxq_params.stat_id);
5581 /* Fill the ramrod data */
5582 bnx2x_fill_cl_init_data(bp, params, activate, data);
5586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
5587 * barrier except from mmiowb() is needed to impose a
5588 * proper ordering of memory operations.
5593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
5594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
5596 /* Wait for completion */
5597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
5598 params->ramrod_params.index,
5599 params->ramrod_params.pstate,
5604 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5607 switch (bp->multi_mode) {
5608 case ETH_RSS_MODE_DISABLED:
5612 case ETH_RSS_MODE_REGULAR:
5614 bp->num_queues = min_t(u32, num_queues,
5615 BNX2X_MAX_QUEUES(bp));
5617 bp->num_queues = min_t(u32, num_online_cpus(),
5618 BNX2X_MAX_QUEUES(bp));
5628 void bnx2x_ilt_set_info(struct bnx2x *bp)
5630 struct ilt_client_info *ilt_client;
5631 struct bnx2x_ilt *ilt = BP_ILT(bp);
5634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5638 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5639 ilt_client->client_num = ILT_CLIENT_CDU;
5640 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5641 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5642 ilt_client->start = line;
5643 line += L2_ILT_LINES(bp);
5645 line += CNIC_ILT_LINES;
5647 ilt_client->end = line - 1;
5649 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5650 "flags 0x%x, hw psz %d\n",
5653 ilt_client->page_size,
5655 ilog2(ilt_client->page_size >> 12));
5658 if (QM_INIT(bp->qm_cid_count)) {
5659 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5660 ilt_client->client_num = ILT_CLIENT_QM;
5661 ilt_client->page_size = QM_ILT_PAGE_SZ;
5662 ilt_client->flags = 0;
5663 ilt_client->start = line;
5665 /* 4 bytes for each cid */
5666 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5669 ilt_client->end = line - 1;
5671 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5672 "flags 0x%x, hw psz %d\n",
5675 ilt_client->page_size,
5677 ilog2(ilt_client->page_size >> 12));
5681 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5683 ilt_client->client_num = ILT_CLIENT_SRC;
5684 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5685 ilt_client->flags = 0;
5686 ilt_client->start = line;
5687 line += SRC_ILT_LINES;
5688 ilt_client->end = line - 1;
5690 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5691 "flags 0x%x, hw psz %d\n",
5694 ilt_client->page_size,
5696 ilog2(ilt_client->page_size >> 12));
5699 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5703 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5705 ilt_client->client_num = ILT_CLIENT_TM;
5706 ilt_client->page_size = TM_ILT_PAGE_SZ;
5707 ilt_client->flags = 0;
5708 ilt_client->start = line;
5709 line += TM_ILT_LINES;
5710 ilt_client->end = line - 1;
5712 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5713 "flags 0x%x, hw psz %d\n",
5716 ilt_client->page_size,
5718 ilog2(ilt_client->page_size >> 12));
5721 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5724 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5727 struct bnx2x_client_init_params params = { {0} };
5730 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5733 params.ramrod_params.pstate = &fp->state;
5734 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5735 params.ramrod_params.index = fp->index;
5736 params.ramrod_params.cid = fp->cid;
5739 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5741 bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params);
5743 bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params);
5745 rc = bnx2x_setup_fw_client(bp, ¶ms, 1,
5746 bnx2x_sp(bp, client_init_data),
5747 bnx2x_sp_mapping(bp, client_init_data));
5751 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
5755 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5757 /* halt the connection */
5758 *p->pstate = BNX2X_FP_STATE_HALTING;
5759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5762 /* Wait for completion */
5763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5764 p->pstate, poll_flag);
5765 if (rc) /* timeout */
5768 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5771 /* Wait for completion */
5772 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5773 p->pstate, poll_flag);
5774 if (rc) /* timeout */
5778 /* delete cfc entry */
5779 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
5781 /* Wait for completion */
5782 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
5783 p->pstate, WAIT_RAMROD_COMMON);
5787 static int bnx2x_stop_client(struct bnx2x *bp, int index)
5789 struct bnx2x_client_ramrod_params client_stop = {0};
5790 struct bnx2x_fastpath *fp = &bp->fp[index];
5792 client_stop.index = index;
5793 client_stop.cid = fp->cid;
5794 client_stop.cl_id = fp->cl_id;
5795 client_stop.pstate = &(fp->state);
5796 client_stop.poll = 0;
5798 return bnx2x_stop_fw_client(bp, &client_stop);
5802 static void bnx2x_reset_func(struct bnx2x *bp)
5804 int port = BP_PORT(bp);
5805 int func = BP_FUNC(bp);
5807 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5808 offsetof(struct hc_status_block_data_e1x, common);
5809 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5810 int pfid_offset = offsetof(struct pci_entity, pf_id);
5812 /* Disable the function in the FW */
5813 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
5814 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
5815 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
5816 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
5819 for_each_queue(bp, i) {
5820 struct bnx2x_fastpath *fp = &bp->fp[i];
5822 BAR_CSTRORM_INTMEM +
5823 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
5824 + pfunc_offset_fp + pfid_offset,
5825 HC_FUNCTION_DISABLED);
5830 BAR_CSTRORM_INTMEM +
5831 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5832 pfunc_offset_sp + pfid_offset,
5833 HC_FUNCTION_DISABLED);
5836 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
5837 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
5841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5842 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5845 /* Disable Timer scan */
5846 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5848 * Wait for at least 10ms and up to 2 second for the timers scan to
5851 for (i = 0; i < 200; i++) {
5853 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5858 base = FUNC_ILT_BASE(func);
5859 for (i = base; i < base + ILT_PER_FUNC; i++)
5860 bnx2x_ilt_wr(bp, i, 0);
5865 static void bnx2x_reset_port(struct bnx2x *bp)
5867 int port = BP_PORT(bp);
5870 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5872 /* Do not rcv packets to BRB */
5873 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5874 /* Do not direct rcv packets that are not for MCP to the BRB */
5875 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5876 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5879 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5882 /* Check for BRB port occupancy */
5883 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5885 DP(NETIF_MSG_IFDOWN,
5886 "BRB1 is not empty %d blocks are occupied\n", val);
5888 /* TODO: Close Doorbell port? */
5891 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5893 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5894 BP_FUNC(bp), reset_code);
5896 switch (reset_code) {
5897 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5898 bnx2x_reset_port(bp);
5899 bnx2x_reset_func(bp);
5900 bnx2x_reset_common(bp);
5903 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5904 bnx2x_reset_port(bp);
5905 bnx2x_reset_func(bp);
5908 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5909 bnx2x_reset_func(bp);
5913 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5918 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5920 int port = BP_PORT(bp);
5924 /* Wait until tx fastpath tasks complete */
5925 for_each_queue(bp, i) {
5926 struct bnx2x_fastpath *fp = &bp->fp[i];
5929 while (bnx2x_has_tx_work_unload(fp)) {
5932 BNX2X_ERR("timeout waiting for queue[%d]\n",
5934 #ifdef BNX2X_STOP_ON_ERROR
5945 /* Give HW time to discard old tx messages */
5948 if (CHIP_IS_E1(bp)) {
5949 /* invalidate mc list,
5950 * wait and poll (interrupts are off)
5952 bnx2x_invlidate_e1_mc_list(bp);
5953 bnx2x_set_eth_mac(bp, 0);
5956 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5958 bnx2x_set_eth_mac(bp, 0);
5960 for (i = 0; i < MC_HASH_SIZE; i++)
5961 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5965 /* Clear iSCSI L2 MAC */
5966 mutex_lock(&bp->cnic_mutex);
5967 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5968 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5969 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5971 mutex_unlock(&bp->cnic_mutex);
5974 if (unload_mode == UNLOAD_NORMAL)
5975 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5977 else if (bp->flags & NO_WOL_FLAG)
5978 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5981 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5982 u8 *mac_addr = bp->dev->dev_addr;
5984 /* The mac address is written to entries 1-4 to
5985 preserve entry 0 which is used by the PMF */
5986 u8 entry = (BP_E1HVN(bp) + 1)*8;
5988 val = (mac_addr[0] << 8) | mac_addr[1];
5989 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5991 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5992 (mac_addr[4] << 8) | mac_addr[5];
5993 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5998 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6000 /* Close multi and leading connections
6001 Completions for ramrods are collected in a synchronous way */
6002 for_each_queue(bp, i)
6004 if (bnx2x_stop_client(bp, i))
6005 #ifdef BNX2X_STOP_ON_ERROR
6011 rc = bnx2x_func_stop(bp);
6013 BNX2X_ERR("Function stop failed!\n");
6014 #ifdef BNX2X_STOP_ON_ERROR
6020 #ifndef BNX2X_STOP_ON_ERROR
6024 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6026 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
6027 load_count[0], load_count[1], load_count[2]);
6029 load_count[1 + port]--;
6030 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
6031 load_count[0], load_count[1], load_count[2]);
6032 if (load_count[0] == 0)
6033 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6034 else if (load_count[1 + port] == 0)
6035 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6037 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6040 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6041 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6042 bnx2x__link_reset(bp);
6044 /* Disable HW interrupts, NAPI */
6045 bnx2x_netif_stop(bp, 1);
6048 bnx2x_free_irq(bp, false);
6050 /* Reset the chip */
6051 bnx2x_reset_chip(bp, reset_code);
6053 /* Report UNLOAD_DONE to MCP */
6055 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6059 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6063 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6065 if (CHIP_IS_E1(bp)) {
6066 int port = BP_PORT(bp);
6067 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6068 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6070 val = REG_RD(bp, addr);
6072 REG_WR(bp, addr, val);
6073 } else if (CHIP_IS_E1H(bp)) {
6074 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6075 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6076 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6077 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6082 /* Close gates #2, #3 and #4: */
6083 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6087 /* Gates #2 and #4a are closed/opened for "not E1" only */
6088 if (!CHIP_IS_E1(bp)) {
6090 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6091 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6092 close ? (val | 0x1) : (val & (~(u32)1)));
6094 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6095 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6096 close ? (val | 0x1) : (val & (~(u32)1)));
6100 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6101 val = REG_RD(bp, addr);
6102 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6104 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6105 close ? "closing" : "opening");
6109 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6111 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6113 /* Do some magic... */
6114 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6115 *magic_val = val & SHARED_MF_CLP_MAGIC;
6116 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6119 /* Restore the value of the `magic' bit.
6121 * @param pdev Device handle.
6122 * @param magic_val Old value of the `magic' bit.
6124 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6126 /* Restore the `magic' bit value... */
6127 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6128 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6129 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6130 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6131 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6132 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6135 /* Prepares for MCP reset: takes care of CLP configurations.
6138 * @param magic_val Old value of 'magic' bit.
6140 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6143 u32 validity_offset;
6145 DP(NETIF_MSG_HW, "Starting\n");
6147 /* Set `magic' bit in order to save MF config */
6148 if (!CHIP_IS_E1(bp))
6149 bnx2x_clp_reset_prep(bp, magic_val);
6151 /* Get shmem offset */
6152 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6153 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6155 /* Clear validity map flags */
6157 REG_WR(bp, shmem + validity_offset, 0);
6160 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6161 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
6163 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6164 * depending on the HW type.
6168 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6170 /* special handling for emulation and FPGA,
6171 wait 10 times longer */
6172 if (CHIP_REV_IS_SLOW(bp))
6173 msleep(MCP_ONE_TIMEOUT*10);
6175 msleep(MCP_ONE_TIMEOUT);
6178 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6180 u32 shmem, cnt, validity_offset, val;
6185 /* Get shmem offset */
6186 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6188 BNX2X_ERR("Shmem 0 return failure\n");
6193 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6195 /* Wait for MCP to come up */
6196 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6197 /* TBD: its best to check validity map of last port.
6198 * currently checks on port 0.
6200 val = REG_RD(bp, shmem + validity_offset);
6201 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6202 shmem + validity_offset, val);
6204 /* check that shared memory is valid. */
6205 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6206 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6209 bnx2x_mcp_wait_one(bp);
6212 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6214 /* Check that shared memory is valid. This indicates that MCP is up. */
6215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6216 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6217 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6223 /* Restore the `magic' bit value */
6224 if (!CHIP_IS_E1(bp))
6225 bnx2x_clp_reset_done(bp, magic_val);
6230 static void bnx2x_pxp_prep(struct bnx2x *bp)
6232 if (!CHIP_IS_E1(bp)) {
6233 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6234 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6235 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6241 * Reset the whole chip except for:
6243 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6246 * - MISC (including AEU)
6250 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6252 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6255 MISC_REGISTERS_RESET_REG_1_RST_HC |
6256 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6257 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6260 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6261 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6262 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6263 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6264 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6265 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6266 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6267 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6269 reset_mask1 = 0xffffffff;
6272 reset_mask2 = 0xffff;
6274 reset_mask2 = 0x1ffff;
6276 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6277 reset_mask1 & (~not_reset_mask1));
6278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6279 reset_mask2 & (~not_reset_mask2));
6284 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6289 static int bnx2x_process_kill(struct bnx2x *bp)
6293 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6296 /* Empty the Tetris buffer, wait for 1s */
6298 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6299 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6300 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6301 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6302 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6303 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6304 ((port_is_idle_0 & 0x1) == 0x1) &&
6305 ((port_is_idle_1 & 0x1) == 0x1) &&
6306 (pgl_exp_rom2 == 0xffffffff))
6309 } while (cnt-- > 0);
6312 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6314 " outstanding read requests after 1s!\n");
6315 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6316 " port_is_idle_0=0x%08x,"
6317 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6318 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6325 /* Close gates #2, #3 and #4 */
6326 bnx2x_set_234_gates(bp, true);
6328 /* TBD: Indicate that "process kill" is in progress to MCP */
6330 /* Clear "unprepared" bit */
6331 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6334 /* Make sure all is written to the chip before the reset */
6337 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6338 * PSWHST, GRC and PSWRD Tetris buffer.
6342 /* Prepare to chip reset: */
6344 bnx2x_reset_mcp_prep(bp, &val);
6350 /* reset the chip */
6351 bnx2x_process_kill_chip_reset(bp);
6354 /* Recover after reset: */
6356 if (bnx2x_reset_mcp_comp(bp, val))
6362 /* Open the gates #2, #3 and #4 */
6363 bnx2x_set_234_gates(bp, false);
6365 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6366 * reset state, re-enable attentions. */
6371 static int bnx2x_leader_reset(struct bnx2x *bp)
6374 /* Try to recover after the failure */
6375 if (bnx2x_process_kill(bp)) {
6376 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6379 goto exit_leader_reset;
6382 /* Clear "reset is in progress" bit and update the driver state */
6383 bnx2x_set_reset_done(bp);
6384 bp->recovery_state = BNX2X_RECOVERY_DONE;
6388 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6393 /* Assumption: runs under rtnl lock. This together with the fact
6394 * that it's called only from bnx2x_reset_task() ensure that it
6395 * will never be called when netif_running(bp->dev) is false.
6397 static void bnx2x_parity_recover(struct bnx2x *bp)
6399 DP(NETIF_MSG_HW, "Handling parity\n");
6401 switch (bp->recovery_state) {
6402 case BNX2X_RECOVERY_INIT:
6403 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6404 /* Try to get a LEADER_LOCK HW lock */
6405 if (bnx2x_trylock_hw_lock(bp,
6406 HW_LOCK_RESOURCE_RESERVED_08))
6409 /* Stop the driver */
6410 /* If interface has been removed - break */
6411 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6414 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6415 /* Ensure "is_leader" and "recovery_state"
6416 * update values are seen on other CPUs
6421 case BNX2X_RECOVERY_WAIT:
6422 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6423 if (bp->is_leader) {
6424 u32 load_counter = bnx2x_get_load_cnt(bp);
6426 /* Wait until all other functions get
6429 schedule_delayed_work(&bp->reset_task,
6433 /* If all other functions got down -
6434 * try to bring the chip back to
6435 * normal. In any case it's an exit
6436 * point for a leader.
6438 if (bnx2x_leader_reset(bp) ||
6439 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6440 printk(KERN_ERR"%s: Recovery "
6441 "has failed. Power cycle is "
6442 "needed.\n", bp->dev->name);
6443 /* Disconnect this device */
6444 netif_device_detach(bp->dev);
6445 /* Block ifup for all function
6446 * of this ASIC until
6447 * "process kill" or power
6450 bnx2x_set_reset_in_progress(bp);
6451 /* Shut down the power */
6452 bnx2x_set_power_state(bp,
6459 } else { /* non-leader */
6460 if (!bnx2x_reset_is_done(bp)) {
6461 /* Try to get a LEADER_LOCK HW lock as
6462 * long as a former leader may have
6463 * been unloaded by the user or
6464 * released a leadership by another
6467 if (bnx2x_trylock_hw_lock(bp,
6468 HW_LOCK_RESOURCE_RESERVED_08)) {
6469 /* I'm a leader now! Restart a
6476 schedule_delayed_work(&bp->reset_task,
6480 } else { /* A leader has completed
6481 * the "process kill". It's an exit
6482 * point for a non-leader.
6484 bnx2x_nic_load(bp, LOAD_NORMAL);
6485 bp->recovery_state =
6486 BNX2X_RECOVERY_DONE;
6497 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6498 * scheduled on a general queue in order to prevent a dead lock.
6500 static void bnx2x_reset_task(struct work_struct *work)
6502 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
6504 #ifdef BNX2X_STOP_ON_ERROR
6505 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6506 " so reset not done to allow debug dump,\n"
6507 KERN_ERR " you will need to reboot when done\n");
6513 if (!netif_running(bp->dev))
6514 goto reset_task_exit;
6516 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6517 bnx2x_parity_recover(bp);
6519 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6520 bnx2x_nic_load(bp, LOAD_NORMAL);
6527 /* end of nic load/unload */
6530 * Init service functions
6533 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
6536 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
6537 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
6538 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6539 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6540 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6541 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6542 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6543 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6545 BNX2X_ERR("Unsupported function index: %d\n", func);
6550 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6552 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
6554 /* Flush all outstanding writes */
6557 /* Pretend to be function 0 */
6559 /* Flush the GRC transaction (in the chip) */
6560 new_val = REG_RD(bp, reg);
6562 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6567 /* From now we are in the "like-E1" mode */
6568 bnx2x_int_disable(bp);
6570 /* Flush all outstanding writes */
6573 /* Restore the original funtion settings */
6574 REG_WR(bp, reg, orig_func);
6575 new_val = REG_RD(bp, reg);
6576 if (new_val != orig_func) {
6577 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6578 orig_func, new_val);
6583 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
6585 if (CHIP_IS_E1H(bp))
6586 bnx2x_undi_int_disable_e1h(bp, func);
6588 bnx2x_int_disable(bp);
6591 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6595 /* Check if there is any driver already loaded */
6596 val = REG_RD(bp, MISC_REG_UNPREPARED);
6598 /* Check if it is the UNDI driver
6599 * UNDI driver initializes CID offset for normal bell to 0x7
6601 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6602 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6604 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6606 int func = BP_FUNC(bp);
6610 /* clear the UNDI indication */
6611 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6613 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6615 /* try unload UNDI on port 0 */
6618 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6619 DRV_MSG_SEQ_NUMBER_MASK);
6620 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6622 /* if UNDI is loaded on the other port */
6623 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6625 /* send "DONE" for previous unload */
6626 bnx2x_fw_command(bp,
6627 DRV_MSG_CODE_UNLOAD_DONE, 0);
6629 /* unload UNDI on port 1 */
6632 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6633 DRV_MSG_SEQ_NUMBER_MASK);
6634 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6636 bnx2x_fw_command(bp, reset_code, 0);
6639 /* now it's safe to release the lock */
6640 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6642 bnx2x_undi_int_disable(bp, func);
6644 /* close input traffic and wait for it */
6645 /* Do not rcv packets to BRB */
6647 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6648 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6649 /* Do not direct rcv packets that are not for MCP to
6652 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6653 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6656 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6657 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6660 /* save NIG port swap info */
6661 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6662 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6665 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6668 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6670 /* take the NIG out of reset and restore swap values */
6672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6673 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6674 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6675 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6677 /* send unload done to the MCP */
6678 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6680 /* restore our func and fw_seq */
6683 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6684 DRV_MSG_SEQ_NUMBER_MASK);
6687 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6691 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6693 u32 val, val2, val3, val4, id;
6696 /* Get the chip revision id and number. */
6697 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6698 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6699 id = ((val & 0xffff) << 16);
6700 val = REG_RD(bp, MISC_REG_CHIP_REV);
6701 id |= ((val & 0xf) << 12);
6702 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6703 id |= ((val & 0xff) << 4);
6704 val = REG_RD(bp, MISC_REG_BOND_ID);
6706 bp->common.chip_id = id;
6707 bp->link_params.chip_id = bp->common.chip_id;
6708 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6710 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6712 /* Set doorbell size */
6713 bp->db_size = (1 << BNX2X_DB_SHIFT);
6716 * set base FW non-default (fast path) status block id, this value is
6717 * used to initialize the fw_sb_id saved on the fp/queue structure to
6718 * determine the id used by the FW.
6720 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6722 val = (REG_RD(bp, 0x2874) & 0x55);
6723 if ((bp->common.chip_id & 0x1) ||
6724 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6725 bp->flags |= ONE_PORT_FLAG;
6726 BNX2X_DEV_INFO("single port device\n");
6729 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6730 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6731 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6732 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6733 bp->common.flash_size, bp->common.flash_size);
6735 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6736 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
6737 bp->link_params.shmem_base = bp->common.shmem_base;
6738 bp->link_params.shmem2_base = bp->common.shmem2_base;
6739 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6740 bp->common.shmem_base, bp->common.shmem2_base);
6742 if (!bp->common.shmem_base ||
6743 (bp->common.shmem_base < 0xA0000) ||
6744 (bp->common.shmem_base >= 0xC0000)) {
6745 BNX2X_DEV_INFO("MCP not active\n");
6746 bp->flags |= NO_MCP_FLAG;
6750 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6751 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6752 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6753 BNX2X_ERROR("BAD MCP validity signature\n");
6755 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6756 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6758 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6759 SHARED_HW_CFG_LED_MODE_MASK) >>
6760 SHARED_HW_CFG_LED_MODE_SHIFT);
6762 bp->link_params.feature_config_flags = 0;
6763 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6764 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6765 bp->link_params.feature_config_flags |=
6766 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6768 bp->link_params.feature_config_flags &=
6769 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6771 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6772 bp->common.bc_ver = val;
6773 BNX2X_DEV_INFO("bc_ver %X\n", val);
6774 if (val < BNX2X_BC_VER) {
6775 /* for now only warn
6776 * later we might need to enforce this */
6777 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6778 "please upgrade BC\n", BNX2X_BC_VER, val);
6780 bp->link_params.feature_config_flags |=
6781 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6782 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6783 bp->link_params.feature_config_flags |=
6784 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6785 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6787 if (BP_E1HVN(bp) == 0) {
6788 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6789 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6791 /* no WOL capability for E1HVN != 0 */
6792 bp->flags |= NO_WOL_FLAG;
6794 BNX2X_DEV_INFO("%sWoL capable\n",
6795 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6797 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6798 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6799 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6800 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6802 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6803 val, val2, val3, val4);
6806 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6809 int cfg_size = 0, idx, port = BP_PORT(bp);
6811 /* Aggregation of supported attributes of all external phys */
6812 bp->port.supported[0] = 0;
6813 bp->port.supported[1] = 0;
6814 switch (bp->link_params.num_phys) {
6816 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6820 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6824 if (bp->link_params.multi_phy_config &
6825 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6826 bp->port.supported[1] =
6827 bp->link_params.phy[EXT_PHY1].supported;
6828 bp->port.supported[0] =
6829 bp->link_params.phy[EXT_PHY2].supported;
6831 bp->port.supported[0] =
6832 bp->link_params.phy[EXT_PHY1].supported;
6833 bp->port.supported[1] =
6834 bp->link_params.phy[EXT_PHY2].supported;
6840 if (!(bp->port.supported[0] || bp->port.supported[1])) {
6841 BNX2X_ERR("NVRAM config error. BAD phy config."
6842 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6844 dev_info.port_hw_config[port].external_phy_config),
6846 dev_info.port_hw_config[port].external_phy_config2));
6850 switch (switch_cfg) {
6852 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6854 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6857 case SWITCH_CFG_10G:
6858 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6860 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6865 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6866 bp->port.link_config[0]);
6869 /* mask what we support according to speed_cap_mask per configuration */
6870 for (idx = 0; idx < cfg_size; idx++) {
6871 if (!(bp->link_params.speed_cap_mask[idx] &
6872 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6873 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6875 if (!(bp->link_params.speed_cap_mask[idx] &
6876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6877 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6879 if (!(bp->link_params.speed_cap_mask[idx] &
6880 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6881 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6883 if (!(bp->link_params.speed_cap_mask[idx] &
6884 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6885 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6887 if (!(bp->link_params.speed_cap_mask[idx] &
6888 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6889 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6890 SUPPORTED_1000baseT_Full);
6892 if (!(bp->link_params.speed_cap_mask[idx] &
6893 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6894 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6896 if (!(bp->link_params.speed_cap_mask[idx] &
6897 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6898 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6902 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6903 bp->port.supported[1]);
6906 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6908 u32 link_config, idx, cfg_size = 0;
6909 bp->port.advertising[0] = 0;
6910 bp->port.advertising[1] = 0;
6911 switch (bp->link_params.num_phys) {
6920 for (idx = 0; idx < cfg_size; idx++) {
6921 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6922 link_config = bp->port.link_config[idx];
6923 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6924 case PORT_FEATURE_LINK_SPEED_AUTO:
6925 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6926 bp->link_params.req_line_speed[idx] =
6928 bp->port.advertising[idx] |=
6929 bp->port.supported[idx];
6931 /* force 10G, no AN */
6932 bp->link_params.req_line_speed[idx] =
6934 bp->port.advertising[idx] |=
6935 (ADVERTISED_10000baseT_Full |
6941 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6942 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6943 bp->link_params.req_line_speed[idx] =
6945 bp->port.advertising[idx] |=
6946 (ADVERTISED_10baseT_Full |
6949 BNX2X_ERROR("NVRAM config error. "
6950 "Invalid link_config 0x%x"
6951 " speed_cap_mask 0x%x\n",
6953 bp->link_params.speed_cap_mask[idx]);
6958 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6959 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6960 bp->link_params.req_line_speed[idx] =
6962 bp->link_params.req_duplex[idx] =
6964 bp->port.advertising[idx] |=
6965 (ADVERTISED_10baseT_Half |
6968 BNX2X_ERROR("NVRAM config error. "
6969 "Invalid link_config 0x%x"
6970 " speed_cap_mask 0x%x\n",
6972 bp->link_params.speed_cap_mask[idx]);
6977 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6978 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6979 bp->link_params.req_line_speed[idx] =
6981 bp->port.advertising[idx] |=
6982 (ADVERTISED_100baseT_Full |
6985 BNX2X_ERROR("NVRAM config error. "
6986 "Invalid link_config 0x%x"
6987 " speed_cap_mask 0x%x\n",
6989 bp->link_params.speed_cap_mask[idx]);
6994 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6995 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6996 bp->link_params.req_line_speed[idx] = SPEED_100;
6997 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6998 bp->port.advertising[idx] |=
6999 (ADVERTISED_100baseT_Half |
7002 BNX2X_ERROR("NVRAM config error. "
7003 "Invalid link_config 0x%x"
7004 " speed_cap_mask 0x%x\n",
7006 bp->link_params.speed_cap_mask[idx]);
7011 case PORT_FEATURE_LINK_SPEED_1G:
7012 if (bp->port.supported[idx] &
7013 SUPPORTED_1000baseT_Full) {
7014 bp->link_params.req_line_speed[idx] =
7016 bp->port.advertising[idx] |=
7017 (ADVERTISED_1000baseT_Full |
7020 BNX2X_ERROR("NVRAM config error. "
7021 "Invalid link_config 0x%x"
7022 " speed_cap_mask 0x%x\n",
7024 bp->link_params.speed_cap_mask[idx]);
7029 case PORT_FEATURE_LINK_SPEED_2_5G:
7030 if (bp->port.supported[idx] &
7031 SUPPORTED_2500baseX_Full) {
7032 bp->link_params.req_line_speed[idx] =
7034 bp->port.advertising[idx] |=
7035 (ADVERTISED_2500baseX_Full |
7038 BNX2X_ERROR("NVRAM config error. "
7039 "Invalid link_config 0x%x"
7040 " speed_cap_mask 0x%x\n",
7042 bp->link_params.speed_cap_mask[idx]);
7047 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7048 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7049 case PORT_FEATURE_LINK_SPEED_10G_KR:
7050 if (bp->port.supported[idx] &
7051 SUPPORTED_10000baseT_Full) {
7052 bp->link_params.req_line_speed[idx] =
7054 bp->port.advertising[idx] |=
7055 (ADVERTISED_10000baseT_Full |
7058 BNX2X_ERROR("NVRAM config error. "
7059 "Invalid link_config 0x%x"
7060 " speed_cap_mask 0x%x\n",
7062 bp->link_params.speed_cap_mask[idx]);
7068 BNX2X_ERROR("NVRAM config error. "
7069 "BAD link speed link_config 0x%x\n",
7071 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7072 bp->port.advertising[idx] = bp->port.supported[idx];
7076 bp->link_params.req_flow_ctrl[idx] = (link_config &
7077 PORT_FEATURE_FLOW_CONTROL_MASK);
7078 if ((bp->link_params.req_flow_ctrl[idx] ==
7079 BNX2X_FLOW_CTRL_AUTO) &&
7080 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7081 bp->link_params.req_flow_ctrl[idx] =
7082 BNX2X_FLOW_CTRL_NONE;
7085 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7086 " 0x%x advertising 0x%x\n",
7087 bp->link_params.req_line_speed[idx],
7088 bp->link_params.req_duplex[idx],
7089 bp->link_params.req_flow_ctrl[idx],
7090 bp->port.advertising[idx]);
7094 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7096 mac_hi = cpu_to_be16(mac_hi);
7097 mac_lo = cpu_to_be32(mac_lo);
7098 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7099 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7102 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7104 int port = BP_PORT(bp);
7107 u32 ext_phy_type, ext_phy_config;;
7109 bp->link_params.bp = bp;
7110 bp->link_params.port = port;
7112 bp->link_params.lane_config =
7113 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7115 bp->link_params.speed_cap_mask[0] =
7117 dev_info.port_hw_config[port].speed_capability_mask);
7118 bp->link_params.speed_cap_mask[1] =
7120 dev_info.port_hw_config[port].speed_capability_mask2);
7121 bp->port.link_config[0] =
7122 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7124 bp->port.link_config[1] =
7125 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
7127 bp->link_params.multi_phy_config =
7128 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
7129 /* If the device is capable of WoL, set the default state according
7132 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7133 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7134 (config & PORT_FEATURE_WOL_ENABLED));
7136 BNX2X_DEV_INFO("lane_config 0x%08x"
7137 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
7138 bp->link_params.lane_config,
7139 bp->link_params.speed_cap_mask[0],
7140 bp->port.link_config[0]);
7142 bp->link_params.switch_cfg = (bp->port.link_config[0] &
7143 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7144 bnx2x_phy_probe(&bp->link_params);
7145 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7147 bnx2x_link_settings_requested(bp);
7150 * If connected directly, work with the internal PHY, otherwise, work
7151 * with the external PHY
7155 dev_info.port_hw_config[port].external_phy_config);
7156 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
7157 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7158 bp->mdio.prtad = bp->port.phy_addr;
7160 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7161 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7163 XGXS_EXT_PHY_ADDR(ext_phy_config);
7165 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7166 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7167 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7168 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7169 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7172 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7173 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7174 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7178 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7180 int func = BP_FUNC(bp);
7184 bnx2x_get_common_hwinfo(bp);
7186 bp->common.int_block = INT_BLOCK_HC;
7188 bp->igu_dsb_id = DEF_SB_IGU_ID;
7189 bp->igu_base_sb = 0;
7190 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
7194 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
7196 bp->common.mf_cfg_base = bp->common.shmem_base +
7197 offsetof(struct shmem_region, func_mb) +
7198 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
7200 MF_CFG_RD(bp, func_mf_config[func].config);
7202 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
7203 FUNC_MF_CFG_E1HOV_TAG_MASK);
7204 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
7206 BNX2X_DEV_INFO("%s function mode\n",
7207 IS_MF(bp) ? "multi" : "single");
7210 val = (MF_CFG_RD(bp, func_mf_config[func].
7212 FUNC_MF_CFG_E1HOV_TAG_MASK);
7213 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7215 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7217 func, bp->mf_ov, bp->mf_ov);
7219 BNX2X_ERROR("No valid E1HOV for func %d,"
7220 " aborting\n", func);
7225 BNX2X_ERROR("VN %d in single function mode,"
7226 " aborting\n", BP_E1HVN(bp));
7232 /* adjust igu_sb_cnt to MF */
7234 bp->igu_sb_cnt /= E1HVN_MAX;
7236 if (!BP_NOMCP(bp)) {
7237 bnx2x_get_port_hwinfo(bp);
7239 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7240 DRV_MSG_SEQ_NUMBER_MASK);
7241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
7247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7250 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7251 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7252 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7253 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7254 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7255 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7257 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7265 /* only supposed to happen on emulation/FPGA */
7266 BNX2X_ERROR("warning: random MAC workaround active\n");
7267 random_ether_addr(bp->dev->dev_addr);
7268 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7274 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7276 int cnt, i, block_end, rodi;
7277 char vpd_data[BNX2X_VPD_LEN+1];
7278 char str_id_reg[VENDOR_ID_LEN+1];
7279 char str_id_cap[VENDOR_ID_LEN+1];
7282 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7283 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7285 if (cnt < BNX2X_VPD_LEN)
7288 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7289 PCI_VPD_LRDT_RO_DATA);
7294 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7295 pci_vpd_lrdt_size(&vpd_data[i]);
7297 i += PCI_VPD_LRDT_TAG_SIZE;
7299 if (block_end > BNX2X_VPD_LEN)
7302 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7303 PCI_VPD_RO_KEYWORD_MFR_ID);
7307 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7309 if (len != VENDOR_ID_LEN)
7312 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7314 /* vendor specific info */
7315 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7316 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7317 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7318 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7320 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7321 PCI_VPD_RO_KEYWORD_VENDOR0);
7323 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7325 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7327 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7328 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7329 bp->fw_ver[len] = ' ';
7338 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7340 int func = BP_FUNC(bp);
7344 /* Disable interrupt handling until HW is initialized */
7345 atomic_set(&bp->intr_sem, 1);
7346 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7348 mutex_init(&bp->port.phy_mutex);
7349 mutex_init(&bp->fw_mb_mutex);
7350 spin_lock_init(&bp->stats_lock);
7352 mutex_init(&bp->cnic_mutex);
7355 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7356 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
7358 rc = bnx2x_get_hwinfo(bp);
7361 rc = bnx2x_alloc_mem_bp(bp);
7363 bnx2x_read_fwinfo(bp);
7364 /* need to reset chip if undi was active */
7366 bnx2x_undi_unload(bp);
7368 if (CHIP_REV_IS_FPGA(bp))
7369 dev_err(&bp->pdev->dev, "FPGA detected\n");
7371 if (BP_NOMCP(bp) && (func == 0))
7372 dev_err(&bp->pdev->dev, "MCP disabled, "
7373 "must load devices in order!\n");
7375 /* Set multi queue mode */
7376 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7377 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7378 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
7379 "requested is not MSI-X\n");
7380 multi_mode = ETH_RSS_MODE_DISABLED;
7382 bp->multi_mode = multi_mode;
7383 bp->int_mode = int_mode;
7385 bp->dev->features |= NETIF_F_GRO;
7389 bp->flags &= ~TPA_ENABLE_FLAG;
7390 bp->dev->features &= ~NETIF_F_LRO;
7392 bp->flags |= TPA_ENABLE_FLAG;
7393 bp->dev->features |= NETIF_F_LRO;
7395 bp->disable_tpa = disable_tpa;
7398 bp->dropless_fc = 0;
7400 bp->dropless_fc = dropless_fc;
7404 bp->tx_ring_size = MAX_TX_AVAIL;
7408 /* make sure that the numbers are in the right granularity */
7409 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7410 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
7412 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7413 bp->current_interval = (poll ? poll : timer_interval);
7415 init_timer(&bp->timer);
7416 bp->timer.expires = jiffies + bp->current_interval;
7417 bp->timer.data = (unsigned long) bp;
7418 bp->timer.function = bnx2x_timer;
7424 /****************************************************************************
7425 * General service functions
7426 ****************************************************************************/
7428 /* called with rtnl_lock */
7429 static int bnx2x_open(struct net_device *dev)
7431 struct bnx2x *bp = netdev_priv(dev);
7433 netif_carrier_off(dev);
7435 bnx2x_set_power_state(bp, PCI_D0);
7437 if (!bnx2x_reset_is_done(bp)) {
7439 /* Reset MCP mail box sequence if there is on going
7444 /* If it's the first function to load and reset done
7445 * is still not cleared it may mean that. We don't
7446 * check the attention state here because it may have
7447 * already been cleared by a "common" reset but we
7448 * shell proceed with "process kill" anyway.
7450 if ((bnx2x_get_load_cnt(bp) == 0) &&
7451 bnx2x_trylock_hw_lock(bp,
7452 HW_LOCK_RESOURCE_RESERVED_08) &&
7453 (!bnx2x_leader_reset(bp))) {
7454 DP(NETIF_MSG_HW, "Recovered in open\n");
7458 bnx2x_set_power_state(bp, PCI_D3hot);
7460 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7461 " completed yet. Try again later. If u still see this"
7462 " message after a few retries then power cycle is"
7463 " required.\n", bp->dev->name);
7469 bp->recovery_state = BNX2X_RECOVERY_DONE;
7471 return bnx2x_nic_load(bp, LOAD_OPEN);
7474 /* called with rtnl_lock */
7475 static int bnx2x_close(struct net_device *dev)
7477 struct bnx2x *bp = netdev_priv(dev);
7479 /* Unload the driver, release IRQs */
7480 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
7481 bnx2x_set_power_state(bp, PCI_D3hot);
7486 /* called with netif_tx_lock from dev_mcast.c */
7487 void bnx2x_set_rx_mode(struct net_device *dev)
7489 struct bnx2x *bp = netdev_priv(dev);
7490 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
7491 int port = BP_PORT(bp);
7493 if (bp->state != BNX2X_STATE_OPEN) {
7494 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
7498 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
7500 if (dev->flags & IFF_PROMISC)
7501 rx_mode = BNX2X_RX_MODE_PROMISC;
7503 else if ((dev->flags & IFF_ALLMULTI) ||
7504 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
7506 rx_mode = BNX2X_RX_MODE_ALLMULTI;
7508 else { /* some multicasts */
7509 if (CHIP_IS_E1(bp)) {
7511 * set mc list, do not wait as wait implies sleep
7512 * and set_rx_mode can be invoked from non-sleepable
7515 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
7516 BNX2X_MAX_EMUL_MULTI*(1 + port) :
7517 BNX2X_MAX_MULTICAST*(1 + port));
7519 bnx2x_set_e1_mc_list(bp, offset);
7521 /* Accept one or more multicasts */
7522 struct netdev_hw_addr *ha;
7523 u32 mc_filter[MC_HASH_SIZE];
7524 u32 crc, bit, regidx;
7527 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
7529 netdev_for_each_mc_addr(ha, dev) {
7530 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
7533 crc = crc32c_le(0, bnx2x_mc_addr(ha),
7535 bit = (crc >> 24) & 0xff;
7538 mc_filter[regidx] |= (1 << bit);
7541 for (i = 0; i < MC_HASH_SIZE; i++)
7542 REG_WR(bp, MC_HASH_OFFSET(bp, i),
7548 bp->rx_mode = rx_mode;
7549 bnx2x_set_storm_rx_mode(bp);
7553 /* called with rtnl_lock */
7554 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
7555 int devad, u16 addr)
7557 struct bnx2x *bp = netdev_priv(netdev);
7561 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
7562 prtad, devad, addr);
7564 /* The HW expects different devad if CL22 is used */
7565 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7567 bnx2x_acquire_phy_lock(bp);
7568 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
7569 bnx2x_release_phy_lock(bp);
7570 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7577 /* called with rtnl_lock */
7578 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7579 u16 addr, u16 value)
7581 struct bnx2x *bp = netdev_priv(netdev);
7584 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7585 " value 0x%x\n", prtad, devad, addr, value);
7587 /* The HW expects different devad if CL22 is used */
7588 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7590 bnx2x_acquire_phy_lock(bp);
7591 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
7592 bnx2x_release_phy_lock(bp);
7596 /* called with rtnl_lock */
7597 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7599 struct bnx2x *bp = netdev_priv(dev);
7600 struct mii_ioctl_data *mdio = if_mii(ifr);
7602 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7603 mdio->phy_id, mdio->reg_num, mdio->val_in);
7605 if (!netif_running(dev))
7608 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7611 #ifdef CONFIG_NET_POLL_CONTROLLER
7612 static void poll_bnx2x(struct net_device *dev)
7614 struct bnx2x *bp = netdev_priv(dev);
7616 disable_irq(bp->pdev->irq);
7617 bnx2x_interrupt(bp->pdev->irq, dev);
7618 enable_irq(bp->pdev->irq);
7622 static const struct net_device_ops bnx2x_netdev_ops = {
7623 .ndo_open = bnx2x_open,
7624 .ndo_stop = bnx2x_close,
7625 .ndo_start_xmit = bnx2x_start_xmit,
7626 .ndo_set_multicast_list = bnx2x_set_rx_mode,
7627 .ndo_set_mac_address = bnx2x_change_mac_addr,
7628 .ndo_validate_addr = eth_validate_addr,
7629 .ndo_do_ioctl = bnx2x_ioctl,
7630 .ndo_change_mtu = bnx2x_change_mtu,
7631 .ndo_tx_timeout = bnx2x_tx_timeout,
7633 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7635 #ifdef CONFIG_NET_POLL_CONTROLLER
7636 .ndo_poll_controller = poll_bnx2x,
7640 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7641 struct net_device *dev)
7646 SET_NETDEV_DEV(dev, &pdev->dev);
7647 bp = netdev_priv(dev);
7652 bp->func = PCI_FUNC(pdev->devfn);
7654 rc = pci_enable_device(pdev);
7656 dev_err(&bp->pdev->dev,
7657 "Cannot enable PCI device, aborting\n");
7661 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7662 dev_err(&bp->pdev->dev,
7663 "Cannot find PCI device base address, aborting\n");
7665 goto err_out_disable;
7668 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7669 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7670 " base address, aborting\n");
7672 goto err_out_disable;
7675 if (atomic_read(&pdev->enable_cnt) == 1) {
7676 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7678 dev_err(&bp->pdev->dev,
7679 "Cannot obtain PCI resources, aborting\n");
7680 goto err_out_disable;
7683 pci_set_master(pdev);
7684 pci_save_state(pdev);
7687 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7688 if (bp->pm_cap == 0) {
7689 dev_err(&bp->pdev->dev,
7690 "Cannot find power management capability, aborting\n");
7692 goto err_out_release;
7695 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7696 if (bp->pcie_cap == 0) {
7697 dev_err(&bp->pdev->dev,
7698 "Cannot find PCI Express capability, aborting\n");
7700 goto err_out_release;
7703 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7704 bp->flags |= USING_DAC_FLAG;
7705 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7706 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7707 " failed, aborting\n");
7709 goto err_out_release;
7712 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7713 dev_err(&bp->pdev->dev,
7714 "System does not support DMA, aborting\n");
7716 goto err_out_release;
7719 dev->mem_start = pci_resource_start(pdev, 0);
7720 dev->base_addr = dev->mem_start;
7721 dev->mem_end = pci_resource_end(pdev, 0);
7723 dev->irq = pdev->irq;
7725 bp->regview = pci_ioremap_bar(pdev, 0);
7727 dev_err(&bp->pdev->dev,
7728 "Cannot map register space, aborting\n");
7730 goto err_out_release;
7733 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7734 min_t(u64, BNX2X_DB_SIZE(bp),
7735 pci_resource_len(pdev, 2)));
7736 if (!bp->doorbells) {
7737 dev_err(&bp->pdev->dev,
7738 "Cannot map doorbell space, aborting\n");
7743 bnx2x_set_power_state(bp, PCI_D0);
7745 /* clean indirect addresses */
7746 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7747 PCICFG_VENDOR_ID_OFFSET);
7748 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7749 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7750 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7751 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7753 /* Reset the load counter */
7754 bnx2x_clear_load_cnt(bp);
7756 dev->watchdog_timeo = TX_TIMEOUT;
7758 dev->netdev_ops = &bnx2x_netdev_ops;
7759 bnx2x_set_ethtool_ops(dev);
7760 dev->features |= NETIF_F_SG;
7761 dev->features |= NETIF_F_HW_CSUM;
7762 if (bp->flags & USING_DAC_FLAG)
7763 dev->features |= NETIF_F_HIGHDMA;
7764 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7765 dev->features |= NETIF_F_TSO6;
7767 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7768 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7770 dev->vlan_features |= NETIF_F_SG;
7771 dev->vlan_features |= NETIF_F_HW_CSUM;
7772 if (bp->flags & USING_DAC_FLAG)
7773 dev->vlan_features |= NETIF_F_HIGHDMA;
7774 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7775 dev->vlan_features |= NETIF_F_TSO6;
7778 /* get_port_hwinfo() will set prtad and mmds properly */
7779 bp->mdio.prtad = MDIO_PRTAD_NONE;
7781 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7783 bp->mdio.mdio_read = bnx2x_mdio_read;
7784 bp->mdio.mdio_write = bnx2x_mdio_write;
7790 iounmap(bp->regview);
7793 if (bp->doorbells) {
7794 iounmap(bp->doorbells);
7795 bp->doorbells = NULL;
7799 if (atomic_read(&pdev->enable_cnt) == 1)
7800 pci_release_regions(pdev);
7803 pci_disable_device(pdev);
7804 pci_set_drvdata(pdev, NULL);
7810 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7811 int *width, int *speed)
7813 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7815 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7817 /* return value of 1=2.5GHz 2=5GHz */
7818 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7821 static int bnx2x_check_firmware(struct bnx2x *bp)
7823 const struct firmware *firmware = bp->firmware;
7824 struct bnx2x_fw_file_hdr *fw_hdr;
7825 struct bnx2x_fw_file_section *sections;
7826 u32 offset, len, num_ops;
7831 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7834 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7835 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7837 /* Make sure none of the offsets and sizes make us read beyond
7838 * the end of the firmware data */
7839 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7840 offset = be32_to_cpu(sections[i].offset);
7841 len = be32_to_cpu(sections[i].len);
7842 if (offset + len > firmware->size) {
7843 dev_err(&bp->pdev->dev,
7844 "Section %d length is out of bounds\n", i);
7849 /* Likewise for the init_ops offsets */
7850 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7851 ops_offsets = (u16 *)(firmware->data + offset);
7852 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7854 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7855 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7856 dev_err(&bp->pdev->dev,
7857 "Section offset %d is out of bounds\n", i);
7862 /* Check FW version */
7863 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7864 fw_ver = firmware->data + offset;
7865 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7866 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7867 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7868 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7869 dev_err(&bp->pdev->dev,
7870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7871 fw_ver[0], fw_ver[1], fw_ver[2],
7872 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7873 BCM_5710_FW_MINOR_VERSION,
7874 BCM_5710_FW_REVISION_VERSION,
7875 BCM_5710_FW_ENGINEERING_VERSION);
7882 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7884 const __be32 *source = (const __be32 *)_source;
7885 u32 *target = (u32 *)_target;
7888 for (i = 0; i < n/4; i++)
7889 target[i] = be32_to_cpu(source[i]);
7893 Ops array is stored in the following format:
7894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7896 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7898 const __be32 *source = (const __be32 *)_source;
7899 struct raw_op *target = (struct raw_op *)_target;
7902 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7903 tmp = be32_to_cpu(source[j]);
7904 target[i].op = (tmp >> 24) & 0xff;
7905 target[i].offset = tmp & 0xffffff;
7906 target[i].raw_data = be32_to_cpu(source[j + 1]);
7911 * IRO array is stored in the following format:
7912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
7914 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
7916 const __be32 *source = (const __be32 *)_source;
7917 struct iro *target = (struct iro *)_target;
7920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
7921 target[i].base = be32_to_cpu(source[j]);
7923 tmp = be32_to_cpu(source[j]);
7924 target[i].m1 = (tmp >> 16) & 0xffff;
7925 target[i].m2 = tmp & 0xffff;
7927 tmp = be32_to_cpu(source[j]);
7928 target[i].m3 = (tmp >> 16) & 0xffff;
7929 target[i].size = tmp & 0xffff;
7934 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7936 const __be16 *source = (const __be16 *)_source;
7937 u16 *target = (u16 *)_target;
7940 for (i = 0; i < n/2; i++)
7941 target[i] = be16_to_cpu(source[i]);
7944 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7947 bp->arr = kmalloc(len, GFP_KERNEL); \
7949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7953 (u8 *)bp->arr, len); \
7956 int bnx2x_init_firmware(struct bnx2x *bp)
7958 const char *fw_file_name;
7959 struct bnx2x_fw_file_hdr *fw_hdr;
7963 fw_file_name = FW_FILE_NAME_E1;
7964 else if (CHIP_IS_E1H(bp))
7965 fw_file_name = FW_FILE_NAME_E1H;
7967 BNX2X_ERR("Unsupported chip revision\n");
7971 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7973 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7975 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7976 goto request_firmware_exit;
7979 rc = bnx2x_check_firmware(bp);
7981 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7982 goto request_firmware_exit;
7985 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7987 /* Initialize the pointers to the init arrays */
7989 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7992 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7995 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7998 /* STORMs firmware */
7999 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8000 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8001 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8002 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8003 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8004 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8005 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8006 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8007 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8008 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8009 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8010 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8011 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8012 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8013 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8014 be32_to_cpu(fw_hdr->csem_pram_data.offset);
8016 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8021 kfree(bp->init_ops_offsets);
8022 init_offsets_alloc_err:
8023 kfree(bp->init_ops);
8025 kfree(bp->init_data);
8026 request_firmware_exit:
8027 release_firmware(bp->firmware);
8032 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8034 int cid_count = L2_FP_COUNT(l2_cid_count);
8037 cid_count += CNIC_CID_MAX;
8039 return roundup(cid_count, QM_CID_ROUND);
8041 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8042 const struct pci_device_id *ent)
8044 struct net_device *dev = NULL;
8046 int pcie_width, pcie_speed;
8049 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
8051 /* dev zeroed in init_etherdev */
8052 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
8054 dev_err(&pdev->dev, "Cannot allocate net device\n");
8058 bp = netdev_priv(dev);
8059 bp->msg_enable = debug;
8061 pci_set_drvdata(pdev, dev);
8063 bp->l2_cid_count = cid_count;
8065 rc = bnx2x_init_dev(pdev, dev);
8071 rc = bnx2x_init_bp(bp);
8075 /* calc qm_cid_count */
8076 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8078 rc = register_netdev(dev);
8080 dev_err(&pdev->dev, "Cannot register net device\n");
8084 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
8085 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8086 " IRQ %d, ", board_info[ent->driver_data].name,
8087 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8088 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
8089 dev->base_addr, bp->pdev->irq);
8090 pr_cont("node addr %pM\n", dev->dev_addr);
8096 iounmap(bp->regview);
8099 iounmap(bp->doorbells);
8103 if (atomic_read(&pdev->enable_cnt) == 1)
8104 pci_release_regions(pdev);
8106 pci_disable_device(pdev);
8107 pci_set_drvdata(pdev, NULL);
8112 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8114 struct net_device *dev = pci_get_drvdata(pdev);
8118 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
8121 bp = netdev_priv(dev);
8123 unregister_netdev(dev);
8125 /* Make sure RESET task is not scheduled before continuing */
8126 cancel_delayed_work_sync(&bp->reset_task);
8129 iounmap(bp->regview);
8132 iounmap(bp->doorbells);
8134 bnx2x_free_mem_bp(bp);
8138 if (atomic_read(&pdev->enable_cnt) == 1)
8139 pci_release_regions(pdev);
8141 pci_disable_device(pdev);
8142 pci_set_drvdata(pdev, NULL);
8145 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8149 bp->state = BNX2X_STATE_ERROR;
8151 bp->rx_mode = BNX2X_RX_MODE_NONE;
8153 bnx2x_netif_stop(bp, 0);
8154 netif_carrier_off(bp->dev);
8156 del_timer_sync(&bp->timer);
8157 bp->stats_state = STATS_STATE_DISABLED;
8158 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8161 bnx2x_free_irq(bp, false);
8163 /* Free SKBs, SGEs, TPA pool and driver internals */
8164 bnx2x_free_skbs(bp);
8166 for_each_queue(bp, i)
8167 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8168 for_each_queue(bp, i)
8169 netif_napi_del(&bnx2x_fp(bp, i, napi));
8172 bp->state = BNX2X_STATE_CLOSED;
8177 static void bnx2x_eeh_recover(struct bnx2x *bp)
8181 mutex_init(&bp->port.phy_mutex);
8183 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8184 bp->link_params.shmem_base = bp->common.shmem_base;
8185 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8187 if (!bp->common.shmem_base ||
8188 (bp->common.shmem_base < 0xA0000) ||
8189 (bp->common.shmem_base >= 0xC0000)) {
8190 BNX2X_DEV_INFO("MCP not active\n");
8191 bp->flags |= NO_MCP_FLAG;
8195 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8196 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8197 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8198 BNX2X_ERR("BAD MCP validity signature\n");
8200 if (!BP_NOMCP(bp)) {
8201 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
8202 & DRV_MSG_SEQ_NUMBER_MASK);
8203 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8208 * bnx2x_io_error_detected - called when PCI error is detected
8209 * @pdev: Pointer to PCI device
8210 * @state: The current pci connection state
8212 * This function is called after a PCI bus error affecting
8213 * this device has been detected.
8215 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8216 pci_channel_state_t state)
8218 struct net_device *dev = pci_get_drvdata(pdev);
8219 struct bnx2x *bp = netdev_priv(dev);
8223 netif_device_detach(dev);
8225 if (state == pci_channel_io_perm_failure) {
8227 return PCI_ERS_RESULT_DISCONNECT;
8230 if (netif_running(dev))
8231 bnx2x_eeh_nic_unload(bp);
8233 pci_disable_device(pdev);
8237 /* Request a slot reset */
8238 return PCI_ERS_RESULT_NEED_RESET;
8242 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8243 * @pdev: Pointer to PCI device
8245 * Restart the card from scratch, as if from a cold-boot.
8247 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8249 struct net_device *dev = pci_get_drvdata(pdev);
8250 struct bnx2x *bp = netdev_priv(dev);
8254 if (pci_enable_device(pdev)) {
8256 "Cannot re-enable PCI device after reset\n");
8258 return PCI_ERS_RESULT_DISCONNECT;
8261 pci_set_master(pdev);
8262 pci_restore_state(pdev);
8264 if (netif_running(dev))
8265 bnx2x_set_power_state(bp, PCI_D0);
8269 return PCI_ERS_RESULT_RECOVERED;
8273 * bnx2x_io_resume - called when traffic can start flowing again
8274 * @pdev: Pointer to PCI device
8276 * This callback is called when the error recovery driver tells us that
8277 * its OK to resume normal operation.
8279 static void bnx2x_io_resume(struct pci_dev *pdev)
8281 struct net_device *dev = pci_get_drvdata(pdev);
8282 struct bnx2x *bp = netdev_priv(dev);
8284 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8285 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
8291 bnx2x_eeh_recover(bp);
8293 if (netif_running(dev))
8294 bnx2x_nic_load(bp, LOAD_NORMAL);
8296 netif_device_attach(dev);
8301 static struct pci_error_handlers bnx2x_err_handler = {
8302 .error_detected = bnx2x_io_error_detected,
8303 .slot_reset = bnx2x_io_slot_reset,
8304 .resume = bnx2x_io_resume,
8307 static struct pci_driver bnx2x_pci_driver = {
8308 .name = DRV_MODULE_NAME,
8309 .id_table = bnx2x_pci_tbl,
8310 .probe = bnx2x_init_one,
8311 .remove = __devexit_p(bnx2x_remove_one),
8312 .suspend = bnx2x_suspend,
8313 .resume = bnx2x_resume,
8314 .err_handler = &bnx2x_err_handler,
8317 static int __init bnx2x_init(void)
8321 pr_info("%s", version);
8323 bnx2x_wq = create_singlethread_workqueue("bnx2x");
8324 if (bnx2x_wq == NULL) {
8325 pr_err("Cannot create workqueue\n");
8329 ret = pci_register_driver(&bnx2x_pci_driver);
8331 pr_err("Cannot register driver\n");
8332 destroy_workqueue(bnx2x_wq);
8337 static void __exit bnx2x_cleanup(void)
8339 pci_unregister_driver(&bnx2x_pci_driver);
8341 destroy_workqueue(bnx2x_wq);
8344 module_init(bnx2x_init);
8345 module_exit(bnx2x_cleanup);
8349 /* count denotes the number of new completions we have seen */
8350 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
8352 struct eth_spe *spe;
8354 #ifdef BNX2X_STOP_ON_ERROR
8355 if (unlikely(bp->panic))
8359 spin_lock_bh(&bp->spq_lock);
8360 bp->cnic_spq_pending -= count;
8362 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
8363 bp->cnic_spq_pending++) {
8365 if (!bp->cnic_kwq_pending)
8368 spe = bnx2x_sp_get_next(bp);
8369 *spe = *bp->cnic_kwq_cons;
8371 bp->cnic_kwq_pending--;
8373 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
8374 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
8376 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
8377 bp->cnic_kwq_cons = bp->cnic_kwq;
8379 bp->cnic_kwq_cons++;
8381 bnx2x_sp_prod_update(bp);
8382 spin_unlock_bh(&bp->spq_lock);
8385 static int bnx2x_cnic_sp_queue(struct net_device *dev,
8386 struct kwqe_16 *kwqes[], u32 count)
8388 struct bnx2x *bp = netdev_priv(dev);
8391 #ifdef BNX2X_STOP_ON_ERROR
8392 if (unlikely(bp->panic))
8396 spin_lock_bh(&bp->spq_lock);
8398 for (i = 0; i < count; i++) {
8399 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
8401 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
8404 *bp->cnic_kwq_prod = *spe;
8406 bp->cnic_kwq_pending++;
8408 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
8409 spe->hdr.conn_and_cmd_data, spe->hdr.type,
8410 spe->data.update_data_addr.hi,
8411 spe->data.update_data_addr.lo,
8412 bp->cnic_kwq_pending);
8414 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
8415 bp->cnic_kwq_prod = bp->cnic_kwq;
8417 bp->cnic_kwq_prod++;
8420 spin_unlock_bh(&bp->spq_lock);
8422 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
8423 bnx2x_cnic_sp_post(bp, 0);
8428 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8430 struct cnic_ops *c_ops;
8433 mutex_lock(&bp->cnic_mutex);
8434 c_ops = bp->cnic_ops;
8436 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8437 mutex_unlock(&bp->cnic_mutex);
8442 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8444 struct cnic_ops *c_ops;
8448 c_ops = rcu_dereference(bp->cnic_ops);
8450 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8457 * for commands that have no data
8459 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
8461 struct cnic_ctl_info ctl = {0};
8465 return bnx2x_cnic_ctl_send(bp, &ctl);
8468 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
8470 struct cnic_ctl_info ctl;
8472 /* first we tell CNIC and only then we count this as a completion */
8473 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
8474 ctl.data.comp.cid = cid;
8476 bnx2x_cnic_ctl_send_bh(bp, &ctl);
8477 bnx2x_cnic_sp_post(bp, 1);
8480 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
8482 struct bnx2x *bp = netdev_priv(dev);
8486 case DRV_CTL_CTXTBL_WR_CMD: {
8487 u32 index = ctl->data.io.offset;
8488 dma_addr_t addr = ctl->data.io.dma_addr;
8490 bnx2x_ilt_wr(bp, index, addr);
8494 case DRV_CTL_COMPLETION_CMD: {
8495 int count = ctl->data.comp.comp_count;
8497 bnx2x_cnic_sp_post(bp, count);
8501 /* rtnl_lock is held. */
8502 case DRV_CTL_START_L2_CMD: {
8503 u32 cli = ctl->data.ring.client_id;
8505 /* Set iSCSI MAC address */
8506 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8511 /* Start accepting on iSCSI L2 ring. Accept all multicasts
8512 * because it's the only way for UIO Client to accept
8513 * multicasts (in non-promiscuous mode only one Client per
8514 * function will receive multicast packets (leading in our
8517 bnx2x_rxq_set_mac_filters(bp, cli,
8518 BNX2X_ACCEPT_UNICAST |
8519 BNX2X_ACCEPT_BROADCAST |
8520 BNX2X_ACCEPT_ALL_MULTICAST);
8521 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8526 /* rtnl_lock is held. */
8527 case DRV_CTL_STOP_L2_CMD: {
8528 u32 cli = ctl->data.ring.client_id;
8530 /* Stop accepting on iSCSI L2 ring */
8531 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
8532 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8537 /* Unset iSCSI L2 MAC */
8538 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8543 BNX2X_ERR("unknown command %x\n", ctl->cmd);
8550 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
8552 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8554 if (bp->flags & USING_MSIX_FLAG) {
8555 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
8556 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
8557 cp->irq_arr[0].vector = bp->msix_table[1].vector;
8559 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8560 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8562 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
8563 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
8564 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
8565 cp->irq_arr[1].status_blk = bp->def_status_blk;
8566 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
8567 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
8572 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
8575 struct bnx2x *bp = netdev_priv(dev);
8576 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8581 if (atomic_read(&bp->intr_sem) != 0)
8584 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
8588 bp->cnic_kwq_cons = bp->cnic_kwq;
8589 bp->cnic_kwq_prod = bp->cnic_kwq;
8590 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
8592 bp->cnic_spq_pending = 0;
8593 bp->cnic_kwq_pending = 0;
8595 bp->cnic_data = data;
8598 cp->drv_state = CNIC_DRV_STATE_REGD;
8599 cp->iro_arr = bp->iro_arr;
8601 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
8602 BNX2X_VF_ID_INVALID, false,
8603 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
8605 bnx2x_setup_cnic_irq_info(bp);
8606 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8607 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8608 rcu_assign_pointer(bp->cnic_ops, ops);
8613 static int bnx2x_unregister_cnic(struct net_device *dev)
8615 struct bnx2x *bp = netdev_priv(dev);
8616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8618 mutex_lock(&bp->cnic_mutex);
8619 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8620 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8621 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8624 rcu_assign_pointer(bp->cnic_ops, NULL);
8625 mutex_unlock(&bp->cnic_mutex);
8627 kfree(bp->cnic_kwq);
8628 bp->cnic_kwq = NULL;
8633 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8635 struct bnx2x *bp = netdev_priv(dev);
8636 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8638 cp->drv_owner = THIS_MODULE;
8639 cp->chip_id = CHIP_ID(bp);
8640 cp->pdev = bp->pdev;
8641 cp->io_base = bp->regview;
8642 cp->io_base2 = bp->doorbells;
8643 cp->max_kwqe_pending = 8;
8644 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
8645 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8646 cp->ctx_tbl_len = CNIC_ILT_LINES;
8647 cp->starting_cid = BCM_CNIC_CID_START;
8648 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8649 cp->drv_ctl = bnx2x_drv_ctl;
8650 cp->drv_register_cnic = bnx2x_register_cnic;
8651 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8655 EXPORT_SYMBOL(bnx2x_cnic_probe);
8657 #endif /* BCM_CNIC */