1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1-1"
60 #define DRV_MODULE_RELDATE "2009/10/13"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1047 bp->set_mac_pending--;
1052 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1053 command, bp->state);
1056 mb(); /* force bnx2x_wait_ramrod() to see the change */
1059 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1062 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063 struct page *page = sw_buf->page;
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066 /* Skip "next page" elements */
1070 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1071 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1072 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 sw_buf->page = NULL;
1079 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, int last)
1084 for (i = 0; i < last; i++)
1085 bnx2x_free_rx_sge(bp, fp, i);
1088 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089 struct bnx2x_fastpath *fp, u16 index)
1091 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1096 if (unlikely(page == NULL))
1099 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1100 PCI_DMA_FROMDEVICE);
1101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1102 __free_pages(page, PAGES_PER_SGE_SHIFT);
1106 sw_buf->page = page;
1107 pci_unmap_addr_set(sw_buf, mapping, mapping);
1109 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1115 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116 struct bnx2x_fastpath *fp, u16 index)
1118 struct sk_buff *skb;
1119 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1123 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124 if (unlikely(skb == NULL))
1127 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1128 PCI_DMA_FROMDEVICE);
1129 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1135 pci_unmap_addr_set(rx_buf, mapping, mapping);
1137 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1143 /* note that we are not allocating a new skb,
1144 * we are just moving one from cons to prod
1145 * we are not creating a new mapping,
1146 * so there is no need to check for dma_mapping_error().
1148 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149 struct sk_buff *skb, u16 cons, u16 prod)
1151 struct bnx2x *bp = fp->bp;
1152 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157 pci_dma_sync_single_for_device(bp->pdev,
1158 pci_unmap_addr(cons_rx_buf, mapping),
1159 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1161 prod_rx_buf->skb = cons_rx_buf->skb;
1162 pci_unmap_addr_set(prod_rx_buf, mapping,
1163 pci_unmap_addr(cons_rx_buf, mapping));
1164 *prod_bd = *cons_bd;
1167 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1170 u16 last_max = fp->last_max_sge;
1172 if (SUB_S16(idx, last_max) > 0)
1173 fp->last_max_sge = idx;
1176 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1180 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181 int idx = RX_SGE_CNT * i - 1;
1183 for (j = 0; j < 2; j++) {
1184 SGE_MASK_CLEAR_BIT(fp, idx);
1190 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191 struct eth_fast_path_rx_cqe *fp_cqe)
1193 struct bnx2x *bp = fp->bp;
1194 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1195 le16_to_cpu(fp_cqe->len_on_bd)) >>
1197 u16 last_max, last_elem, first_elem;
1204 /* First mark all used pages */
1205 for (i = 0; i < sge_len; i++)
1206 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211 /* Here we assume that the last SGE index is the biggest */
1212 prefetch((void *)(fp->sge_mask));
1213 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215 last_max = RX_SGE(fp->last_max_sge);
1216 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219 /* If ring is not full */
1220 if (last_elem + 1 != first_elem)
1223 /* Now update the prod */
1224 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225 if (likely(fp->sge_mask[i]))
1228 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229 delta += RX_SGE_MASK_ELEM_SZ;
1233 fp->rx_sge_prod += delta;
1234 /* clear page-end entries */
1235 bnx2x_clear_sge_mask_next_elems(fp);
1238 DP(NETIF_MSG_RX_STATUS,
1239 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1240 fp->last_max_sge, fp->rx_sge_prod);
1243 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246 memset(fp->sge_mask, 0xff,
1247 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249 /* Clear the two last indices in the page to 1:
1250 these are the indices that correspond to the "next" element,
1251 hence will never be indicated and should be removed from
1252 the calculations. */
1253 bnx2x_clear_sge_mask_next_elems(fp);
1256 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257 struct sk_buff *skb, u16 cons, u16 prod)
1259 struct bnx2x *bp = fp->bp;
1260 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1265 /* move empty skb from pool to prod and map it */
1266 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1268 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1269 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271 /* move partial skb from cons to pool (don't unmap yet) */
1272 fp->tpa_pool[queue] = *cons_rx_buf;
1274 /* mark bin state as start - print error if current state != stop */
1275 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278 fp->tpa_state[queue] = BNX2X_TPA_START;
1280 /* point prod_bd to new skb */
1281 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284 #ifdef BNX2X_STOP_ON_ERROR
1285 fp->tpa_queue_used |= (1 << queue);
1286 #ifdef __powerpc64__
1287 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291 fp->tpa_queue_used);
1295 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 struct sk_buff *skb,
1297 struct eth_fast_path_rx_cqe *fp_cqe,
1300 struct sw_rx_page *rx_pg, old_rx_pg;
1301 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302 u32 i, frag_len, frag_size, pages;
1306 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1307 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1309 /* This is needed in order to enable forwarding support */
1311 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1312 max(frag_size, (u32)len_on_bd));
1314 #ifdef BNX2X_STOP_ON_ERROR
1316 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1320 fp_cqe->pkt_len, len_on_bd);
1326 /* Run through the SGL and compose the fragmented skb */
1327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330 /* FW gives the indices of the SGE as if the ring is an array
1331 (meaning that "next" element will consume 2 indices) */
1332 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1333 rx_pg = &fp->rx_page_ring[sge_idx];
1336 /* If we fail to allocate a substitute page, we simply stop
1337 where we are and drop the whole packet */
1338 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339 if (unlikely(err)) {
1340 fp->eth_q_stats.rx_skb_alloc_failed++;
1344 /* Unmap the page as we r going to pass it to the stack */
1345 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1346 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1348 /* Add one frag and update the appropriate fields in the skb */
1349 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351 skb->data_len += frag_len;
1352 skb->truesize += frag_len;
1353 skb->len += frag_len;
1355 frag_size -= frag_len;
1361 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1365 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366 struct sk_buff *skb = rx_buf->skb;
1368 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370 /* Unmap skb in the pool anyway, as we are going to change
1371 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1374 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1376 if (likely(new_skb)) {
1377 /* fix ip xsum and give it to the stack */
1378 /* (no need to map the new skb) */
1381 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382 PARSING_FLAGS_VLAN);
1383 int is_not_hwaccel_vlan_cqe =
1384 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1388 prefetch(((char *)(skb)) + 128);
1390 #ifdef BNX2X_STOP_ON_ERROR
1391 if (pad + len > bp->rx_buf_size) {
1392 BNX2X_ERR("skb_put is about to fail... "
1393 "pad %d len %d rx_buf_size %d\n",
1394 pad, len, bp->rx_buf_size);
1400 skb_reserve(skb, pad);
1403 skb->protocol = eth_type_trans(skb, bp->dev);
1404 skb->ip_summed = CHECKSUM_UNNECESSARY;
1409 iph = (struct iphdr *)skb->data;
1411 /* If there is no Rx VLAN offloading -
1412 take VLAN tag into an account */
1413 if (unlikely(is_not_hwaccel_vlan_cqe))
1414 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1417 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1420 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421 &cqe->fast_path_cqe, cqe_idx)) {
1423 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424 (!is_not_hwaccel_vlan_cqe))
1425 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426 le16_to_cpu(cqe->fast_path_cqe.
1430 netif_receive_skb(skb);
1432 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433 " - dropping packet!\n");
1438 /* put new skb in bin */
1439 fp->tpa_pool[queue].skb = new_skb;
1442 /* else drop the packet and keep the buffer in the bin */
1443 DP(NETIF_MSG_RX_STATUS,
1444 "Failed to allocate new skb - dropping packet!\n");
1445 fp->eth_q_stats.rx_skb_alloc_failed++;
1448 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1451 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452 struct bnx2x_fastpath *fp,
1453 u16 bd_prod, u16 rx_comp_prod,
1456 struct ustorm_eth_rx_producers rx_prods = {0};
1459 /* Update producers */
1460 rx_prods.bd_prod = bd_prod;
1461 rx_prods.cqe_prod = rx_comp_prod;
1462 rx_prods.sge_prod = rx_sge_prod;
1465 * Make sure that the BD and SGE data is updated before updating the
1466 * producers since FW might read the BD/SGE right after the producer
1468 * This is only applicable for weak-ordered memory model archs such
1469 * as IA-64. The following barrier is also mandatory since FW will
1470 * assumes BDs must have buffers.
1474 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1477 ((u32 *)&rx_prods)[i]);
1479 mmiowb(); /* keep prod updates ordered */
1481 DP(NETIF_MSG_RX_STATUS,
1482 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1483 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1486 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488 struct bnx2x *bp = fp->bp;
1489 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1490 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1493 #ifdef BNX2X_STOP_ON_ERROR
1494 if (unlikely(bp->panic))
1498 /* CQ "next element" is of the size of the regular element,
1499 that's why it's ok here */
1500 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1504 bd_cons = fp->rx_bd_cons;
1505 bd_prod = fp->rx_bd_prod;
1506 bd_prod_fw = bd_prod;
1507 sw_comp_cons = fp->rx_comp_cons;
1508 sw_comp_prod = fp->rx_comp_prod;
1510 /* Memory barrier necessary as speculative reads of the rx
1511 * buffer can be ahead of the index in the status block
1515 DP(NETIF_MSG_RX_STATUS,
1516 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1517 fp->index, hw_comp_cons, sw_comp_cons);
1519 while (sw_comp_cons != hw_comp_cons) {
1520 struct sw_rx_bd *rx_buf = NULL;
1521 struct sk_buff *skb;
1522 union eth_rx_cqe *cqe;
1526 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527 bd_prod = RX_BD(bd_prod);
1528 bd_cons = RX_BD(bd_cons);
1530 /* Prefetch the page containing the BD descriptor
1531 at producer's index. It will be needed when new skb is
1533 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534 (&fp->rx_desc_ring[bd_prod])) -
1537 cqe = &fp->rx_comp_ring[comp_ring_cons];
1538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1540 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1541 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1542 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1543 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1544 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1547 /* is this a slowpath msg? */
1548 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1549 bnx2x_sp_event(fp, cqe);
1552 /* this is an rx packet */
1554 rx_buf = &fp->rx_buf_ring[bd_cons];
1556 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557 pad = cqe->fast_path_cqe.placement_offset;
1559 /* If CQE is marked both TPA_START and TPA_END
1560 it is a non-TPA CQE */
1561 if ((!fp->disable_tpa) &&
1562 (TPA_TYPE(cqe_fp_flags) !=
1563 (TPA_TYPE_START | TPA_TYPE_END))) {
1564 u16 queue = cqe->fast_path_cqe.queue_index;
1566 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567 DP(NETIF_MSG_RX_STATUS,
1568 "calling tpa_start on queue %d\n",
1571 bnx2x_tpa_start(fp, queue, skb,
1576 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577 DP(NETIF_MSG_RX_STATUS,
1578 "calling tpa_stop on queue %d\n",
1581 if (!BNX2X_RX_SUM_FIX(cqe))
1582 BNX2X_ERR("STOP on none TCP "
1585 /* This is a size of the linear data
1587 len = le16_to_cpu(cqe->fast_path_cqe.
1589 bnx2x_tpa_stop(bp, fp, queue, pad,
1590 len, cqe, comp_ring_cons);
1591 #ifdef BNX2X_STOP_ON_ERROR
1596 bnx2x_update_sge_prod(fp,
1597 &cqe->fast_path_cqe);
1602 pci_dma_sync_single_for_device(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 pad + RX_COPY_THRESH,
1605 PCI_DMA_FROMDEVICE);
1607 prefetch(((char *)(skb)) + 128);
1609 /* is this an error packet? */
1610 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1611 DP(NETIF_MSG_RX_ERR,
1612 "ERROR flags %x rx packet %u\n",
1613 cqe_fp_flags, sw_comp_cons);
1614 fp->eth_q_stats.rx_err_discard_pkt++;
1618 /* Since we don't have a jumbo ring
1619 * copy small packets if mtu > 1500
1621 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622 (len <= RX_COPY_THRESH)) {
1623 struct sk_buff *new_skb;
1625 new_skb = netdev_alloc_skb(bp->dev,
1627 if (new_skb == NULL) {
1628 DP(NETIF_MSG_RX_ERR,
1629 "ERROR packet dropped "
1630 "because of alloc failure\n");
1631 fp->eth_q_stats.rx_skb_alloc_failed++;
1636 skb_copy_from_linear_data_offset(skb, pad,
1637 new_skb->data + pad, len);
1638 skb_reserve(new_skb, pad);
1639 skb_put(new_skb, len);
1641 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1646 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1647 pci_unmap_single(bp->pdev,
1648 pci_unmap_addr(rx_buf, mapping),
1650 PCI_DMA_FROMDEVICE);
1651 skb_reserve(skb, pad);
1655 DP(NETIF_MSG_RX_ERR,
1656 "ERROR packet dropped because "
1657 "of alloc failure\n");
1658 fp->eth_q_stats.rx_skb_alloc_failed++;
1660 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1664 skb->protocol = eth_type_trans(skb, bp->dev);
1666 skb->ip_summed = CHECKSUM_NONE;
1668 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1671 fp->eth_q_stats.hw_csum_err++;
1675 skb_record_rx_queue(skb, fp->index);
1678 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1679 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680 PARSING_FLAGS_VLAN))
1681 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1685 netif_receive_skb(skb);
1691 bd_cons = NEXT_RX_IDX(bd_cons);
1692 bd_prod = NEXT_RX_IDX(bd_prod);
1693 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1696 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1699 if (rx_pkt == budget)
1703 fp->rx_bd_cons = bd_cons;
1704 fp->rx_bd_prod = bd_prod_fw;
1705 fp->rx_comp_cons = sw_comp_cons;
1706 fp->rx_comp_prod = sw_comp_prod;
1708 /* Update producers */
1709 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1712 fp->rx_pkt += rx_pkt;
1718 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 struct bnx2x_fastpath *fp = fp_cookie;
1721 struct bnx2x *bp = fp->bp;
1723 /* Return here if interrupt is disabled */
1724 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1729 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1730 fp->index, fp->sb_id);
1731 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1733 #ifdef BNX2X_STOP_ON_ERROR
1734 if (unlikely(bp->panic))
1737 /* Handle Rx or Tx according to MSI-X vector */
1738 if (fp->is_rx_queue) {
1739 prefetch(fp->rx_cons_sb);
1740 prefetch(&fp->status_blk->u_status_block.status_block_index);
1742 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1745 prefetch(fp->tx_cons_sb);
1746 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 bnx2x_update_fpsb_idx(fp);
1752 /* Re-enable interrupts */
1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1762 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764 struct bnx2x *bp = netdev_priv(dev_instance);
1765 u16 status = bnx2x_ack_int(bp);
1769 /* Return here if interrupt is shared and it's not for us */
1770 if (unlikely(status == 0)) {
1771 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1774 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1776 /* Return here if interrupt is disabled */
1777 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1782 #ifdef BNX2X_STOP_ON_ERROR
1783 if (unlikely(bp->panic))
1787 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788 struct bnx2x_fastpath *fp = &bp->fp[i];
1790 mask = 0x2 << fp->sb_id;
1791 if (status & mask) {
1792 /* Handle Rx or Tx according to SB id */
1793 if (fp->is_rx_queue) {
1794 prefetch(fp->rx_cons_sb);
1795 prefetch(&fp->status_blk->u_status_block.
1796 status_block_index);
1798 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->c_status_block.
1803 status_block_index);
1805 bnx2x_update_fpsb_idx(fp);
1809 /* Re-enable interrupts */
1810 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811 le16_to_cpu(fp->fp_u_idx),
1813 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814 le16_to_cpu(fp->fp_c_idx),
1822 mask = 0x2 << CNIC_SB_ID(bp);
1823 if (status & (mask | 0x1)) {
1824 struct cnic_ops *c_ops = NULL;
1827 c_ops = rcu_dereference(bp->cnic_ops);
1829 c_ops->cnic_handler(bp->cnic_data, NULL);
1836 if (unlikely(status & 0x1)) {
1837 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1845 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1851 /* end of fast path */
1853 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1858 * General service functions
1861 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1864 u32 resource_bit = (1 << resource);
1865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1884 /* Validating that the resource is not already taken */
1885 lock_status = REG_RD(bp, hw_lock_control_reg);
1886 if (lock_status & resource_bit) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1892 /* Try for 5 second every 5ms */
1893 for (cnt = 0; cnt < 1000; cnt++) {
1894 /* Try to acquire the lock */
1895 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896 lock_status = REG_RD(bp, hw_lock_control_reg);
1897 if (lock_status & resource_bit)
1902 DP(NETIF_MSG_HW, "Timeout\n");
1906 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1909 u32 resource_bit = (1 << resource);
1910 int func = BP_FUNC(bp);
1911 u32 hw_lock_control_reg;
1913 /* Validating that the resource is within range */
1914 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1922 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924 hw_lock_control_reg =
1925 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1928 /* Validating that the resource is currently taken */
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
1930 if (!(lock_status & resource_bit)) {
1931 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1932 lock_status, resource_bit);
1936 REG_WR(bp, hw_lock_control_reg, resource_bit);
1940 /* HW Lock for shared dual port PHYs */
1941 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1943 mutex_lock(&bp->port.phy_mutex);
1945 if (bp->port.need_hw_lock)
1946 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1949 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1951 if (bp->port.need_hw_lock)
1952 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1954 mutex_unlock(&bp->port.phy_mutex);
1957 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959 /* The GPIO should be swapped if swap register is set and active */
1960 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962 int gpio_shift = gpio_num +
1963 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964 u32 gpio_mask = (1 << gpio_shift);
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1973 /* read GPIO value */
1974 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976 /* get the requested pin value */
1977 if ((gpio_reg & gpio_mask) == gpio_mask)
1982 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1987 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003 /* read GPIO and mask except the float bits */
2004 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2007 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009 gpio_num, gpio_shift);
2010 /* clear FLOAT and set CLR */
2011 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2015 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set SET */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2023 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025 gpio_num, gpio_shift);
2027 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2034 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2040 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 /* The GPIO should be swapped if swap register is set and active */
2043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045 int gpio_shift = gpio_num +
2046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047 u32 gpio_mask = (1 << gpio_shift);
2050 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2055 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2060 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062 "output low\n", gpio_num, gpio_shift);
2063 /* clear SET and set CLR */
2064 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070 "output high\n", gpio_num, gpio_shift);
2071 /* clear CLR and set SET */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2080 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2086 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2088 u32 spio_mask = (1 << spio_num);
2091 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092 (spio_num > MISC_REGISTERS_SPIO_7)) {
2093 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2098 /* read SPIO and mask except the float bits */
2099 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2102 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104 /* clear FLOAT and set CLR */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2109 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2110 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111 /* clear FLOAT and set SET */
2112 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2116 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2126 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2132 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2134 switch (bp->link_vars.ieee_fc &
2135 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2137 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2141 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2142 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2146 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2147 bp->port.advertising |= ADVERTISED_Asym_Pause;
2151 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2157 static void bnx2x_link_report(struct bnx2x *bp)
2159 if (bp->flags & MF_FUNC_DIS) {
2160 netif_carrier_off(bp->dev);
2161 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2165 if (bp->link_vars.link_up) {
2168 if (bp->state == BNX2X_STATE_OPEN)
2169 netif_carrier_on(bp->dev);
2170 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2172 line_speed = bp->link_vars.line_speed;
2177 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2178 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2179 if (vn_max_rate < line_speed)
2180 line_speed = vn_max_rate;
2182 printk("%d Mbps ", line_speed);
2184 if (bp->link_vars.duplex == DUPLEX_FULL)
2185 printk("full duplex");
2187 printk("half duplex");
2189 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2190 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2191 printk(", receive ");
2192 if (bp->link_vars.flow_ctrl &
2194 printk("& transmit ");
2196 printk(", transmit ");
2198 printk("flow control ON");
2202 } else { /* link_down */
2203 netif_carrier_off(bp->dev);
2204 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2208 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2210 if (!BP_NOMCP(bp)) {
2213 /* Initialize link parameters structure variables */
2214 /* It is recommended to turn off RX FC for jumbo frames
2215 for better performance */
2216 if (bp->dev->mtu > 5000)
2217 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2219 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2221 bnx2x_acquire_phy_lock(bp);
2223 if (load_mode == LOAD_DIAG)
2224 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2226 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2228 bnx2x_release_phy_lock(bp);
2230 bnx2x_calc_fc_adv(bp);
2232 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2233 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2234 bnx2x_link_report(bp);
2239 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2243 static void bnx2x_link_set(struct bnx2x *bp)
2245 if (!BP_NOMCP(bp)) {
2246 bnx2x_acquire_phy_lock(bp);
2247 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2248 bnx2x_release_phy_lock(bp);
2250 bnx2x_calc_fc_adv(bp);
2252 BNX2X_ERR("Bootcode is missing - can not set link\n");
2255 static void bnx2x__link_reset(struct bnx2x *bp)
2257 if (!BP_NOMCP(bp)) {
2258 bnx2x_acquire_phy_lock(bp);
2259 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2260 bnx2x_release_phy_lock(bp);
2262 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2265 static u8 bnx2x_link_test(struct bnx2x *bp)
2269 bnx2x_acquire_phy_lock(bp);
2270 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2271 bnx2x_release_phy_lock(bp);
2276 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2278 u32 r_param = bp->link_vars.line_speed / 8;
2279 u32 fair_periodic_timeout_usec;
2282 memset(&(bp->cmng.rs_vars), 0,
2283 sizeof(struct rate_shaping_vars_per_port));
2284 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2286 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2287 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2289 /* this is the threshold below which no timer arming will occur
2290 1.25 coefficient is for the threshold to be a little bigger
2291 than the real time, to compensate for timer in-accuracy */
2292 bp->cmng.rs_vars.rs_threshold =
2293 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2295 /* resolution of fairness timer */
2296 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2297 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2298 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2300 /* this is the threshold below which we won't arm the timer anymore */
2301 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2303 /* we multiply by 1e3/8 to get bytes/msec.
2304 We don't want the credits to pass a credit
2305 of the t_fair*FAIR_MEM (algorithm resolution) */
2306 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2307 /* since each tick is 4 usec */
2308 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2311 /* Calculates the sum of vn_min_rates.
2312 It's needed for further normalizing of the min_rates.
2314 sum of vn_min_rates.
2316 0 - if all the min_rates are 0.
2317 In the later case fainess algorithm should be deactivated.
2318 If not all min_rates are zero then those that are zeroes will be set to 1.
2320 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2323 int port = BP_PORT(bp);
2326 bp->vn_weight_sum = 0;
2327 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2328 int func = 2*vn + port;
2329 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2330 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2331 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2333 /* Skip hidden vns */
2334 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2337 /* If min rate is zero - set it to 1 */
2339 vn_min_rate = DEF_MIN_RATE;
2343 bp->vn_weight_sum += vn_min_rate;
2346 /* ... only if all min rates are zeros - disable fairness */
2348 bp->cmng.flags.cmng_enables &=
2349 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2350 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2351 " fairness will be disabled\n");
2353 bp->cmng.flags.cmng_enables |=
2354 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2357 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2359 struct rate_shaping_vars_per_vn m_rs_vn;
2360 struct fairness_vars_per_vn m_fair_vn;
2361 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2362 u16 vn_min_rate, vn_max_rate;
2365 /* If function is hidden - set min and max to zeroes */
2366 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2371 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2372 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2373 /* If min rate is zero - set it to 1 */
2375 vn_min_rate = DEF_MIN_RATE;
2376 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2377 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2380 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2381 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2383 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2384 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2386 /* global vn counter - maximal Mbps for this vn */
2387 m_rs_vn.vn_counter.rate = vn_max_rate;
2389 /* quota - number of bytes transmitted in this period */
2390 m_rs_vn.vn_counter.quota =
2391 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2393 if (bp->vn_weight_sum) {
2394 /* credit for each period of the fairness algorithm:
2395 number of bytes in T_FAIR (the vn share the port rate).
2396 vn_weight_sum should not be larger than 10000, thus
2397 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2399 m_fair_vn.vn_credit_delta =
2400 max((u32)(vn_min_rate * (T_FAIR_COEF /
2401 (8 * bp->vn_weight_sum))),
2402 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2403 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2404 m_fair_vn.vn_credit_delta);
2407 /* Store it to internal memory */
2408 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2409 REG_WR(bp, BAR_XSTRORM_INTMEM +
2410 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2411 ((u32 *)(&m_rs_vn))[i]);
2413 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2414 REG_WR(bp, BAR_XSTRORM_INTMEM +
2415 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2416 ((u32 *)(&m_fair_vn))[i]);
2420 /* This function is called upon link interrupt */
2421 static void bnx2x_link_attn(struct bnx2x *bp)
2423 /* Make sure that we are synced with the current statistics */
2424 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2426 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2428 if (bp->link_vars.link_up) {
2430 /* dropless flow control */
2431 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2432 int port = BP_PORT(bp);
2433 u32 pause_enabled = 0;
2435 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2438 REG_WR(bp, BAR_USTRORM_INTMEM +
2439 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2443 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2444 struct host_port_stats *pstats;
2446 pstats = bnx2x_sp(bp, port_stats);
2447 /* reset old bmac stats */
2448 memset(&(pstats->mac_stx[0]), 0,
2449 sizeof(struct mac_stx));
2451 if (bp->state == BNX2X_STATE_OPEN)
2452 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2455 /* indicate link status */
2456 bnx2x_link_report(bp);
2459 int port = BP_PORT(bp);
2463 /* Set the attention towards other drivers on the same port */
2464 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2465 if (vn == BP_E1HVN(bp))
2468 func = ((vn << 1) | port);
2469 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2470 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2473 if (bp->link_vars.link_up) {
2476 /* Init rate shaping and fairness contexts */
2477 bnx2x_init_port_minmax(bp);
2479 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2480 bnx2x_init_vn_minmax(bp, 2*vn + port);
2482 /* Store it to internal memory */
2484 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2485 REG_WR(bp, BAR_XSTRORM_INTMEM +
2486 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2487 ((u32 *)(&bp->cmng))[i]);
2492 static void bnx2x__link_status_update(struct bnx2x *bp)
2494 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2497 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2499 if (bp->link_vars.link_up)
2500 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2502 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2504 bnx2x_calc_vn_weight_sum(bp);
2506 /* indicate link status */
2507 bnx2x_link_report(bp);
2510 static void bnx2x_pmf_update(struct bnx2x *bp)
2512 int port = BP_PORT(bp);
2516 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2518 /* enable nig attention */
2519 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2520 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2523 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2531 * General service functions
2534 /* send the MCP a request, block until there is a reply */
2535 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2537 int func = BP_FUNC(bp);
2538 u32 seq = ++bp->fw_seq;
2541 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2543 mutex_lock(&bp->fw_mb_mutex);
2544 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2545 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2548 /* let the FW do it's magic ... */
2551 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2553 /* Give the FW up to 5 second (500*10ms) */
2554 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2556 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2557 cnt*delay, rc, seq);
2559 /* is this a reply to our command? */
2560 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2561 rc &= FW_MSG_CODE_MASK;
2564 BNX2X_ERR("FW failed to respond!\n");
2568 mutex_unlock(&bp->fw_mb_mutex);
2573 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2574 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2575 static void bnx2x_set_rx_mode(struct net_device *dev);
2577 static void bnx2x_e1h_disable(struct bnx2x *bp)
2579 int port = BP_PORT(bp);
2581 netif_tx_disable(bp->dev);
2582 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2584 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2586 netif_carrier_off(bp->dev);
2589 static void bnx2x_e1h_enable(struct bnx2x *bp)
2591 int port = BP_PORT(bp);
2593 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2595 /* Tx queue should be only reenabled */
2596 netif_tx_wake_all_queues(bp->dev);
2599 * Should not call netif_carrier_on since it will be called if the link
2600 * is up when checking for link state
2604 static void bnx2x_update_min_max(struct bnx2x *bp)
2606 int port = BP_PORT(bp);
2609 /* Init rate shaping and fairness contexts */
2610 bnx2x_init_port_minmax(bp);
2612 bnx2x_calc_vn_weight_sum(bp);
2614 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2615 bnx2x_init_vn_minmax(bp, 2*vn + port);
2620 /* Set the attention towards other drivers on the same port */
2621 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2622 if (vn == BP_E1HVN(bp))
2625 func = ((vn << 1) | port);
2626 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2627 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2630 /* Store it to internal memory */
2631 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2632 REG_WR(bp, BAR_XSTRORM_INTMEM +
2633 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2634 ((u32 *)(&bp->cmng))[i]);
2638 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2640 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2645 * This is the only place besides the function initialization
2646 * where the bp->flags can change so it is done without any
2649 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2650 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2651 bp->flags |= MF_FUNC_DIS;
2653 bnx2x_e1h_disable(bp);
2655 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2656 bp->flags &= ~MF_FUNC_DIS;
2658 bnx2x_e1h_enable(bp);
2660 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2662 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2664 bnx2x_update_min_max(bp);
2665 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2668 /* Report results to MCP */
2670 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2672 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2675 /* must be called under the spq lock */
2676 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2678 struct eth_spe *next_spe = bp->spq_prod_bd;
2680 if (bp->spq_prod_bd == bp->spq_last_bd) {
2681 bp->spq_prod_bd = bp->spq;
2682 bp->spq_prod_idx = 0;
2683 DP(NETIF_MSG_TIMER, "end of spq\n");
2691 /* must be called under the spq lock */
2692 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2694 int func = BP_FUNC(bp);
2696 /* Make sure that BD data is updated before writing the producer */
2699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2704 /* the slow path queue is odd since completions arrive on the fastpath ring */
2705 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2706 u32 data_hi, u32 data_lo, int common)
2708 struct eth_spe *spe;
2710 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2711 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2712 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2713 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2714 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2716 #ifdef BNX2X_STOP_ON_ERROR
2717 if (unlikely(bp->panic))
2721 spin_lock_bh(&bp->spq_lock);
2723 if (!bp->spq_left) {
2724 BNX2X_ERR("BUG! SPQ ring full!\n");
2725 spin_unlock_bh(&bp->spq_lock);
2730 spe = bnx2x_sp_get_next(bp);
2732 /* CID needs port number to be encoded int it */
2733 spe->hdr.conn_and_cmd_data =
2734 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2736 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2739 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2741 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2742 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2746 bnx2x_sp_prod_update(bp);
2747 spin_unlock_bh(&bp->spq_lock);
2751 /* acquire split MCP access lock register */
2752 static int bnx2x_acquire_alr(struct bnx2x *bp)
2759 for (j = 0; j < i*10; j++) {
2761 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2762 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2763 if (val & (1L << 31))
2768 if (!(val & (1L << 31))) {
2769 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2776 /* release split MCP access lock register */
2777 static void bnx2x_release_alr(struct bnx2x *bp)
2781 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2784 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2786 struct host_def_status_block *def_sb = bp->def_status_blk;
2789 barrier(); /* status block is written to by the chip */
2790 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2791 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2794 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2795 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2798 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2799 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2802 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2803 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2806 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2807 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2814 * slow path service functions
2817 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2819 int port = BP_PORT(bp);
2820 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2821 COMMAND_REG_ATTN_BITS_SET);
2822 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2824 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825 NIG_REG_MASK_INTERRUPT_PORT0;
2829 if (bp->attn_state & asserted)
2830 BNX2X_ERR("IGU ERROR\n");
2832 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833 aeu_mask = REG_RD(bp, aeu_addr);
2835 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2836 aeu_mask, asserted);
2837 aeu_mask &= ~(asserted & 0xff);
2838 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2840 REG_WR(bp, aeu_addr, aeu_mask);
2841 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2844 bp->attn_state |= asserted;
2845 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2847 if (asserted & ATTN_HARD_WIRED_MASK) {
2848 if (asserted & ATTN_NIG_FOR_FUNC) {
2850 bnx2x_acquire_phy_lock(bp);
2852 /* save nig interrupt mask */
2853 nig_mask = REG_RD(bp, nig_int_mask_addr);
2854 REG_WR(bp, nig_int_mask_addr, 0);
2856 bnx2x_link_attn(bp);
2858 /* handle unicore attn? */
2860 if (asserted & ATTN_SW_TIMER_4_FUNC)
2861 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2863 if (asserted & GPIO_2_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2866 if (asserted & GPIO_3_FUNC)
2867 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2869 if (asserted & GPIO_4_FUNC)
2870 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2873 if (asserted & ATTN_GENERAL_ATTN_1) {
2874 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2875 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2877 if (asserted & ATTN_GENERAL_ATTN_2) {
2878 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2879 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2881 if (asserted & ATTN_GENERAL_ATTN_3) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2886 if (asserted & ATTN_GENERAL_ATTN_4) {
2887 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2888 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2890 if (asserted & ATTN_GENERAL_ATTN_5) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2894 if (asserted & ATTN_GENERAL_ATTN_6) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2900 } /* if hardwired */
2902 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2904 REG_WR(bp, hc_addr, asserted);
2906 /* now set back the mask */
2907 if (asserted & ATTN_NIG_FOR_FUNC) {
2908 REG_WR(bp, nig_int_mask_addr, nig_mask);
2909 bnx2x_release_phy_lock(bp);
2913 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2915 int port = BP_PORT(bp);
2917 /* mark the failure */
2918 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2919 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2920 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2921 bp->link_params.ext_phy_config);
2923 /* log the failure */
2924 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2925 " the driver to shutdown the card to prevent permanent"
2926 " damage. Please contact Dell Support for assistance\n",
2930 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2932 int port = BP_PORT(bp);
2934 u32 val, swap_val, swap_override;
2936 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2937 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2939 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2941 val = REG_RD(bp, reg_offset);
2942 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2943 REG_WR(bp, reg_offset, val);
2945 BNX2X_ERR("SPIO5 hw attention\n");
2947 /* Fan failure attention */
2948 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2949 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2950 /* Low power mode is controlled by GPIO 2 */
2951 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2952 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2953 /* The PHY reset is controlled by GPIO 1 */
2954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2955 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2959 /* The PHY reset is controlled by GPIO 1 */
2960 /* fake the port number to cancel the swap done in
2962 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2963 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2964 port = (swap_val && swap_override) ^ 1;
2965 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2966 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2972 bnx2x_fan_failure(bp);
2975 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2977 bnx2x_acquire_phy_lock(bp);
2978 bnx2x_handle_module_detect_int(&bp->link_params);
2979 bnx2x_release_phy_lock(bp);
2982 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2986 REG_WR(bp, reg_offset, val);
2988 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2989 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2994 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2998 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3000 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3001 BNX2X_ERR("DB hw attention 0x%x\n", val);
3002 /* DORQ discard attention */
3004 BNX2X_ERR("FATAL error from DORQ\n");
3007 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3009 int port = BP_PORT(bp);
3012 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3013 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3015 val = REG_RD(bp, reg_offset);
3016 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3017 REG_WR(bp, reg_offset, val);
3019 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3020 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3025 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3029 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3031 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3032 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3033 /* CFC error attention */
3035 BNX2X_ERR("FATAL error from CFC\n");
3038 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3040 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3041 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3042 /* RQ_USDMDP_FIFO_OVERFLOW */
3044 BNX2X_ERR("FATAL error from PXP\n");
3047 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3049 int port = BP_PORT(bp);
3052 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3053 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3055 val = REG_RD(bp, reg_offset);
3056 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3057 REG_WR(bp, reg_offset, val);
3059 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3060 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3065 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3069 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3071 if (attn & BNX2X_PMF_LINK_ASSERT) {
3072 int func = BP_FUNC(bp);
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3075 bp->mf_config = SHMEM_RD(bp,
3076 mf_cfg.func_mf_config[func].config);
3077 val = SHMEM_RD(bp, func_mb[func].drv_status);
3078 if (val & DRV_STATUS_DCC_EVENT_MASK)
3080 (val & DRV_STATUS_DCC_EVENT_MASK));
3081 bnx2x__link_status_update(bp);
3082 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3083 bnx2x_pmf_update(bp);
3085 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3087 BNX2X_ERR("MC assert!\n");
3088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3089 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3094 } else if (attn & BNX2X_MCP_ASSERT) {
3096 BNX2X_ERR("MCP assert!\n");
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3101 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3104 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3105 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3106 if (attn & BNX2X_GRC_TIMEOUT) {
3107 val = CHIP_IS_E1H(bp) ?
3108 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3109 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3111 if (attn & BNX2X_GRC_RSV) {
3112 val = CHIP_IS_E1H(bp) ?
3113 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3114 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3116 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3120 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3122 struct attn_route attn;
3123 struct attn_route group_mask;
3124 int port = BP_PORT(bp);
3130 /* need to take HW lock because MCP or other port might also
3131 try to handle this event */
3132 bnx2x_acquire_alr(bp);
3134 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3135 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3136 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3137 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3138 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3139 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3142 if (deasserted & (1 << index)) {
3143 group_mask = bp->attn_group[index];
3145 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3146 index, group_mask.sig[0], group_mask.sig[1],
3147 group_mask.sig[2], group_mask.sig[3]);
3149 bnx2x_attn_int_deasserted3(bp,
3150 attn.sig[3] & group_mask.sig[3]);
3151 bnx2x_attn_int_deasserted1(bp,
3152 attn.sig[1] & group_mask.sig[1]);
3153 bnx2x_attn_int_deasserted2(bp,
3154 attn.sig[2] & group_mask.sig[2]);
3155 bnx2x_attn_int_deasserted0(bp,
3156 attn.sig[0] & group_mask.sig[0]);
3158 if ((attn.sig[0] & group_mask.sig[0] &
3159 HW_PRTY_ASSERT_SET_0) ||
3160 (attn.sig[1] & group_mask.sig[1] &
3161 HW_PRTY_ASSERT_SET_1) ||
3162 (attn.sig[2] & group_mask.sig[2] &
3163 HW_PRTY_ASSERT_SET_2))
3164 BNX2X_ERR("FATAL HW block parity attention\n");
3168 bnx2x_release_alr(bp);
3170 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3173 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3175 REG_WR(bp, reg_addr, val);
3177 if (~bp->attn_state & deasserted)
3178 BNX2X_ERR("IGU ERROR\n");
3180 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3181 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3184 aeu_mask = REG_RD(bp, reg_addr);
3186 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3187 aeu_mask, deasserted);
3188 aeu_mask |= (deasserted & 0xff);
3189 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3191 REG_WR(bp, reg_addr, aeu_mask);
3192 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3194 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3195 bp->attn_state &= ~deasserted;
3196 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3199 static void bnx2x_attn_int(struct bnx2x *bp)
3201 /* read local copy of bits */
3202 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3204 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3206 u32 attn_state = bp->attn_state;
3208 /* look for changed bits */
3209 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3210 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3213 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3214 attn_bits, attn_ack, asserted, deasserted);
3216 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3217 BNX2X_ERR("BAD attention state\n");
3219 /* handle bits that were raised */
3221 bnx2x_attn_int_asserted(bp, asserted);
3224 bnx2x_attn_int_deasserted(bp, deasserted);
3227 static void bnx2x_sp_task(struct work_struct *work)
3229 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3233 /* Return here if interrupt is disabled */
3234 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3235 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3239 status = bnx2x_update_dsb_idx(bp);
3240 /* if (status == 0) */
3241 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3243 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3249 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3251 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3253 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3255 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3257 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3262 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3264 struct net_device *dev = dev_instance;
3265 struct bnx2x *bp = netdev_priv(dev);
3267 /* Return here if interrupt is disabled */
3268 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3269 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3273 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3275 #ifdef BNX2X_STOP_ON_ERROR
3276 if (unlikely(bp->panic))
3282 struct cnic_ops *c_ops;
3285 c_ops = rcu_dereference(bp->cnic_ops);
3287 c_ops->cnic_handler(bp->cnic_data, NULL);
3291 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3296 /* end of slow path */
3300 /****************************************************************************
3302 ****************************************************************************/
3304 /* sum[hi:lo] += add[hi:lo] */
3305 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3308 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3311 /* difference = minuend - subtrahend */
3312 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3314 if (m_lo < s_lo) { \
3316 d_hi = m_hi - s_hi; \
3318 /* we can 'loan' 1 */ \
3320 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3322 /* m_hi <= s_hi */ \
3327 /* m_lo >= s_lo */ \
3328 if (m_hi < s_hi) { \
3332 /* m_hi >= s_hi */ \
3333 d_hi = m_hi - s_hi; \
3334 d_lo = m_lo - s_lo; \
3339 #define UPDATE_STAT64(s, t) \
3341 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3342 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3343 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3344 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3345 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3346 pstats->mac_stx[1].t##_lo, diff.lo); \
3349 #define UPDATE_STAT64_NIG(s, t) \
3351 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3352 diff.lo, new->s##_lo, old->s##_lo); \
3353 ADD_64(estats->t##_hi, diff.hi, \
3354 estats->t##_lo, diff.lo); \
3357 /* sum[hi:lo] += add */
3358 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3361 s_hi += (s_lo < a) ? 1 : 0; \
3364 #define UPDATE_EXTEND_STAT(s) \
3366 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3367 pstats->mac_stx[1].s##_lo, \
3371 #define UPDATE_EXTEND_TSTAT(s, t) \
3373 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3374 old_tclient->s = tclient->s; \
3375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378 #define UPDATE_EXTEND_USTAT(s, t) \
3380 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3381 old_uclient->s = uclient->s; \
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385 #define UPDATE_EXTEND_XSTAT(s, t) \
3387 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3388 old_xclient->s = xclient->s; \
3389 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3392 /* minuend -= subtrahend */
3393 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3395 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3398 /* minuend[hi:lo] -= subtrahend */
3399 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3401 SUB_64(m_hi, 0, m_lo, s); \
3404 #define SUB_EXTEND_USTAT(s, t) \
3406 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3407 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3411 * General service functions
3414 static inline long bnx2x_hilo(u32 *hiref)
3416 u32 lo = *(hiref + 1);
3417 #if (BITS_PER_LONG == 64)
3420 return HILO_U64(hi, lo);
3427 * Init service functions
3430 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3432 if (!bp->stats_pending) {
3433 struct eth_query_ramrod_data ramrod_data = {0};
3436 ramrod_data.drv_counter = bp->stats_counter++;
3437 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3438 for_each_queue(bp, i)
3439 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3441 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3442 ((u32 *)&ramrod_data)[1],
3443 ((u32 *)&ramrod_data)[0], 0);
3445 /* stats ramrod has it's own slot on the spq */
3447 bp->stats_pending = 1;
3452 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3454 struct dmae_command *dmae = &bp->stats_dmae;
3455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3457 *stats_comp = DMAE_COMP_VAL;
3458 if (CHIP_REV_IS_SLOW(bp))
3462 if (bp->executer_idx) {
3463 int loader_idx = PMF_DMAE_C(bp);
3465 memset(dmae, 0, sizeof(struct dmae_command));
3467 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3468 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3469 DMAE_CMD_DST_RESET |
3471 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3473 DMAE_CMD_ENDIANITY_DW_SWAP |
3475 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3477 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3478 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3479 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3480 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3481 sizeof(struct dmae_command) *
3482 (loader_idx + 1)) >> 2;
3483 dmae->dst_addr_hi = 0;
3484 dmae->len = sizeof(struct dmae_command) >> 2;
3487 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3488 dmae->comp_addr_hi = 0;
3492 bnx2x_post_dmae(bp, dmae, loader_idx);
3494 } else if (bp->func_stx) {
3496 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3500 static int bnx2x_stats_comp(struct bnx2x *bp)
3502 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3506 while (*stats_comp != DMAE_COMP_VAL) {
3508 BNX2X_ERR("timeout waiting for stats finished\n");
3518 * Statistics service functions
3521 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3523 struct dmae_command *dmae;
3525 int loader_idx = PMF_DMAE_C(bp);
3526 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3529 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3530 BNX2X_ERR("BUG!\n");
3534 bp->executer_idx = 0;
3536 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3538 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3540 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3542 DMAE_CMD_ENDIANITY_DW_SWAP |
3544 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3547 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3549 dmae->src_addr_lo = bp->port.port_stx >> 2;
3550 dmae->src_addr_hi = 0;
3551 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3552 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3553 dmae->len = DMAE_LEN32_RD_MAX;
3554 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555 dmae->comp_addr_hi = 0;
3558 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3559 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3560 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3561 dmae->src_addr_hi = 0;
3562 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3563 DMAE_LEN32_RD_MAX * 4);
3564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3565 DMAE_LEN32_RD_MAX * 4);
3566 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3567 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3568 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3569 dmae->comp_val = DMAE_COMP_VAL;
3572 bnx2x_hw_stats_post(bp);
3573 bnx2x_stats_comp(bp);
3576 static void bnx2x_port_stats_init(struct bnx2x *bp)
3578 struct dmae_command *dmae;
3579 int port = BP_PORT(bp);
3580 int vn = BP_E1HVN(bp);
3582 int loader_idx = PMF_DMAE_C(bp);
3584 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3587 if (!bp->link_vars.link_up || !bp->port.pmf) {
3588 BNX2X_ERR("BUG!\n");
3592 bp->executer_idx = 0;
3595 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3596 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3597 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3599 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3601 DMAE_CMD_ENDIANITY_DW_SWAP |
3603 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3604 (vn << DMAE_CMD_E1HVN_SHIFT));
3606 if (bp->port.port_stx) {
3608 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3609 dmae->opcode = opcode;
3610 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3611 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3612 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3613 dmae->dst_addr_hi = 0;
3614 dmae->len = sizeof(struct host_port_stats) >> 2;
3615 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616 dmae->comp_addr_hi = 0;
3622 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3623 dmae->opcode = opcode;
3624 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3625 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3626 dmae->dst_addr_lo = bp->func_stx >> 2;
3627 dmae->dst_addr_hi = 0;
3628 dmae->len = sizeof(struct host_func_stats) >> 2;
3629 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3630 dmae->comp_addr_hi = 0;
3635 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3636 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3637 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3639 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3641 DMAE_CMD_ENDIANITY_DW_SWAP |
3643 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3644 (vn << DMAE_CMD_E1HVN_SHIFT));
3646 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3648 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3649 NIG_REG_INGRESS_BMAC0_MEM);
3651 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3652 BIGMAC_REGISTER_TX_STAT_GTBYT */
3653 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654 dmae->opcode = opcode;
3655 dmae->src_addr_lo = (mac_addr +
3656 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3657 dmae->src_addr_hi = 0;
3658 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3661 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3662 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3663 dmae->comp_addr_hi = 0;
3666 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3667 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3668 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3669 dmae->opcode = opcode;
3670 dmae->src_addr_lo = (mac_addr +
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->src_addr_hi = 0;
3673 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3674 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3675 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3676 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3677 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3678 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3683 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3685 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3687 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (mac_addr +
3691 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3694 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3695 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3696 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697 dmae->comp_addr_hi = 0;
3700 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3701 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3702 dmae->opcode = opcode;
3703 dmae->src_addr_lo = (mac_addr +
3704 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3705 dmae->src_addr_hi = 0;
3706 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3707 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3708 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3709 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3711 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3712 dmae->comp_addr_hi = 0;
3715 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3716 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3717 dmae->opcode = opcode;
3718 dmae->src_addr_lo = (mac_addr +
3719 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3720 dmae->src_addr_hi = 0;
3721 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3722 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3723 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3724 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3725 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3726 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3727 dmae->comp_addr_hi = 0;
3732 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733 dmae->opcode = opcode;
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3735 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3738 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3739 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3740 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3741 dmae->comp_addr_hi = 0;
3744 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3745 dmae->opcode = opcode;
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3748 dmae->src_addr_hi = 0;
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3755 dmae->comp_addr_hi = 0;
3758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3759 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3760 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3761 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3763 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3765 DMAE_CMD_ENDIANITY_DW_SWAP |
3767 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3768 (vn << DMAE_CMD_E1HVN_SHIFT));
3769 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3770 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3771 dmae->src_addr_hi = 0;
3772 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3773 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3774 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3775 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3776 dmae->len = (2*sizeof(u32)) >> 2;
3777 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3778 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3779 dmae->comp_val = DMAE_COMP_VAL;
3784 static void bnx2x_func_stats_init(struct bnx2x *bp)
3786 struct dmae_command *dmae = &bp->stats_dmae;
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3790 if (!bp->func_stx) {
3791 BNX2X_ERR("BUG!\n");
3795 bp->executer_idx = 0;
3796 memset(dmae, 0, sizeof(struct dmae_command));
3798 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3799 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3800 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3802 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3804 DMAE_CMD_ENDIANITY_DW_SWAP |
3806 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3807 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3808 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3809 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3810 dmae->dst_addr_lo = bp->func_stx >> 2;
3811 dmae->dst_addr_hi = 0;
3812 dmae->len = sizeof(struct host_func_stats) >> 2;
3813 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3814 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3815 dmae->comp_val = DMAE_COMP_VAL;
3820 static void bnx2x_stats_start(struct bnx2x *bp)
3823 bnx2x_port_stats_init(bp);
3825 else if (bp->func_stx)
3826 bnx2x_func_stats_init(bp);
3828 bnx2x_hw_stats_post(bp);
3829 bnx2x_storm_stats_post(bp);
3832 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_pmf_update(bp);
3836 bnx2x_stats_start(bp);
3839 static void bnx2x_stats_restart(struct bnx2x *bp)
3841 bnx2x_stats_comp(bp);
3842 bnx2x_stats_start(bp);
3845 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3847 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3848 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3849 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3855 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3856 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3857 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3858 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3859 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3860 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3861 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3862 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3863 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3864 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3865 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3866 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3867 UPDATE_STAT64(tx_stat_gt127,
3868 tx_stat_etherstatspkts65octetsto127octets);
3869 UPDATE_STAT64(tx_stat_gt255,
3870 tx_stat_etherstatspkts128octetsto255octets);
3871 UPDATE_STAT64(tx_stat_gt511,
3872 tx_stat_etherstatspkts256octetsto511octets);
3873 UPDATE_STAT64(tx_stat_gt1023,
3874 tx_stat_etherstatspkts512octetsto1023octets);
3875 UPDATE_STAT64(tx_stat_gt1518,
3876 tx_stat_etherstatspkts1024octetsto1522octets);
3877 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3878 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3879 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3880 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3881 UPDATE_STAT64(tx_stat_gterr,
3882 tx_stat_dot3statsinternalmactransmiterrors);
3883 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3885 estats->pause_frames_received_hi =
3886 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3887 estats->pause_frames_received_lo =
3888 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3890 estats->pause_frames_sent_hi =
3891 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3892 estats->pause_frames_sent_lo =
3893 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3896 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3898 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3899 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3900 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3902 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3903 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3904 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3905 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3906 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3907 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3908 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3909 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3910 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3911 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3912 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3913 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3914 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3915 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3916 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3917 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3918 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3921 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3922 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3923 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3924 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3926 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3928 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3929 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3930 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3931 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3932 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3934 estats->pause_frames_received_hi =
3935 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3936 estats->pause_frames_received_lo =
3937 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3938 ADD_64(estats->pause_frames_received_hi,
3939 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3940 estats->pause_frames_received_lo,
3941 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3943 estats->pause_frames_sent_hi =
3944 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3945 estats->pause_frames_sent_lo =
3946 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3947 ADD_64(estats->pause_frames_sent_hi,
3948 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3949 estats->pause_frames_sent_lo,
3950 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3953 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3955 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3956 struct nig_stats *old = &(bp->port.old_nig_stats);
3957 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3958 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3965 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3966 bnx2x_bmac_stats_update(bp);
3968 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3969 bnx2x_emac_stats_update(bp);
3971 else { /* unreached */
3972 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3976 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3977 new->brb_discard - old->brb_discard);
3978 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3979 new->brb_truncate - old->brb_truncate);
3981 UPDATE_STAT64_NIG(egress_mac_pkt0,
3982 etherstatspkts1024octetsto1522octets);
3983 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3985 memcpy(old, new, sizeof(struct nig_stats));
3987 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3988 sizeof(struct mac_stx));
3989 estats->brb_drop_hi = pstats->brb_drop_hi;
3990 estats->brb_drop_lo = pstats->brb_drop_lo;
3992 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3994 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3995 if (nig_timer_max != estats->nig_timer_max) {
3996 estats->nig_timer_max = nig_timer_max;
3997 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4003 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4005 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4006 struct tstorm_per_port_stats *tport =
4007 &stats->tstorm_common.port_statistics;
4008 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4012 memcpy(&(fstats->total_bytes_received_hi),
4013 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4014 sizeof(struct host_func_stats) - 2*sizeof(u32));
4015 estats->error_bytes_received_hi = 0;
4016 estats->error_bytes_received_lo = 0;
4017 estats->etherstatsoverrsizepkts_hi = 0;
4018 estats->etherstatsoverrsizepkts_lo = 0;
4019 estats->no_buff_discard_hi = 0;
4020 estats->no_buff_discard_lo = 0;
4022 for_each_rx_queue(bp, i) {
4023 struct bnx2x_fastpath *fp = &bp->fp[i];
4024 int cl_id = fp->cl_id;
4025 struct tstorm_per_client_stats *tclient =
4026 &stats->tstorm_common.client_statistics[cl_id];
4027 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4028 struct ustorm_per_client_stats *uclient =
4029 &stats->ustorm_common.client_statistics[cl_id];
4030 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4031 struct xstorm_per_client_stats *xclient =
4032 &stats->xstorm_common.client_statistics[cl_id];
4033 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4034 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4037 /* are storm stats valid? */
4038 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4039 bp->stats_counter) {
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4041 " xstorm counter (%d) != stats_counter (%d)\n",
4042 i, xclient->stats_counter, bp->stats_counter);
4045 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4046 bp->stats_counter) {
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4048 " tstorm counter (%d) != stats_counter (%d)\n",
4049 i, tclient->stats_counter, bp->stats_counter);
4052 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4053 bp->stats_counter) {
4054 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4055 " ustorm counter (%d) != stats_counter (%d)\n",
4056 i, uclient->stats_counter, bp->stats_counter);
4060 qstats->total_bytes_received_hi =
4061 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4062 qstats->total_bytes_received_lo =
4063 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4065 ADD_64(qstats->total_bytes_received_hi,
4066 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4067 qstats->total_bytes_received_lo,
4068 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4070 ADD_64(qstats->total_bytes_received_hi,
4071 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4072 qstats->total_bytes_received_lo,
4073 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4075 qstats->valid_bytes_received_hi =
4076 qstats->total_bytes_received_hi;
4077 qstats->valid_bytes_received_lo =
4078 qstats->total_bytes_received_lo;
4080 qstats->error_bytes_received_hi =
4081 le32_to_cpu(tclient->rcv_error_bytes.hi);
4082 qstats->error_bytes_received_lo =
4083 le32_to_cpu(tclient->rcv_error_bytes.lo);
4085 ADD_64(qstats->total_bytes_received_hi,
4086 qstats->error_bytes_received_hi,
4087 qstats->total_bytes_received_lo,
4088 qstats->error_bytes_received_lo);
4090 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4091 total_unicast_packets_received);
4092 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4093 total_multicast_packets_received);
4094 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4095 total_broadcast_packets_received);
4096 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4097 etherstatsoverrsizepkts);
4098 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4100 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4101 total_unicast_packets_received);
4102 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4103 total_multicast_packets_received);
4104 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4105 total_broadcast_packets_received);
4106 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4107 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4108 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4110 qstats->total_bytes_transmitted_hi =
4111 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4112 qstats->total_bytes_transmitted_lo =
4113 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4115 ADD_64(qstats->total_bytes_transmitted_hi,
4116 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4117 qstats->total_bytes_transmitted_lo,
4118 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4120 ADD_64(qstats->total_bytes_transmitted_hi,
4121 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4122 qstats->total_bytes_transmitted_lo,
4123 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4125 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4126 total_unicast_packets_transmitted);
4127 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4128 total_multicast_packets_transmitted);
4129 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4130 total_broadcast_packets_transmitted);
4132 old_tclient->checksum_discard = tclient->checksum_discard;
4133 old_tclient->ttl0_discard = tclient->ttl0_discard;
4135 ADD_64(fstats->total_bytes_received_hi,
4136 qstats->total_bytes_received_hi,
4137 fstats->total_bytes_received_lo,
4138 qstats->total_bytes_received_lo);
4139 ADD_64(fstats->total_bytes_transmitted_hi,
4140 qstats->total_bytes_transmitted_hi,
4141 fstats->total_bytes_transmitted_lo,
4142 qstats->total_bytes_transmitted_lo);
4143 ADD_64(fstats->total_unicast_packets_received_hi,
4144 qstats->total_unicast_packets_received_hi,
4145 fstats->total_unicast_packets_received_lo,
4146 qstats->total_unicast_packets_received_lo);
4147 ADD_64(fstats->total_multicast_packets_received_hi,
4148 qstats->total_multicast_packets_received_hi,
4149 fstats->total_multicast_packets_received_lo,
4150 qstats->total_multicast_packets_received_lo);
4151 ADD_64(fstats->total_broadcast_packets_received_hi,
4152 qstats->total_broadcast_packets_received_hi,
4153 fstats->total_broadcast_packets_received_lo,
4154 qstats->total_broadcast_packets_received_lo);
4155 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4156 qstats->total_unicast_packets_transmitted_hi,
4157 fstats->total_unicast_packets_transmitted_lo,
4158 qstats->total_unicast_packets_transmitted_lo);
4159 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4160 qstats->total_multicast_packets_transmitted_hi,
4161 fstats->total_multicast_packets_transmitted_lo,
4162 qstats->total_multicast_packets_transmitted_lo);
4163 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4164 qstats->total_broadcast_packets_transmitted_hi,
4165 fstats->total_broadcast_packets_transmitted_lo,
4166 qstats->total_broadcast_packets_transmitted_lo);
4167 ADD_64(fstats->valid_bytes_received_hi,
4168 qstats->valid_bytes_received_hi,
4169 fstats->valid_bytes_received_lo,
4170 qstats->valid_bytes_received_lo);
4172 ADD_64(estats->error_bytes_received_hi,
4173 qstats->error_bytes_received_hi,
4174 estats->error_bytes_received_lo,
4175 qstats->error_bytes_received_lo);
4176 ADD_64(estats->etherstatsoverrsizepkts_hi,
4177 qstats->etherstatsoverrsizepkts_hi,
4178 estats->etherstatsoverrsizepkts_lo,
4179 qstats->etherstatsoverrsizepkts_lo);
4180 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4181 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4184 ADD_64(fstats->total_bytes_received_hi,
4185 estats->rx_stat_ifhcinbadoctets_hi,
4186 fstats->total_bytes_received_lo,
4187 estats->rx_stat_ifhcinbadoctets_lo);
4189 memcpy(estats, &(fstats->total_bytes_received_hi),
4190 sizeof(struct host_func_stats) - 2*sizeof(u32));
4192 ADD_64(estats->etherstatsoverrsizepkts_hi,
4193 estats->rx_stat_dot3statsframestoolong_hi,
4194 estats->etherstatsoverrsizepkts_lo,
4195 estats->rx_stat_dot3statsframestoolong_lo);
4196 ADD_64(estats->error_bytes_received_hi,
4197 estats->rx_stat_ifhcinbadoctets_hi,
4198 estats->error_bytes_received_lo,
4199 estats->rx_stat_ifhcinbadoctets_lo);
4202 estats->mac_filter_discard =
4203 le32_to_cpu(tport->mac_filter_discard);
4204 estats->xxoverflow_discard =
4205 le32_to_cpu(tport->xxoverflow_discard);
4206 estats->brb_truncate_discard =
4207 le32_to_cpu(tport->brb_truncate_discard);
4208 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4211 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4213 bp->stats_pending = 0;
4218 static void bnx2x_net_stats_update(struct bnx2x *bp)
4220 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4221 struct net_device_stats *nstats = &bp->dev->stats;
4224 nstats->rx_packets =
4225 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4226 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4227 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4229 nstats->tx_packets =
4230 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4231 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4232 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4234 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4236 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4238 nstats->rx_dropped = estats->mac_discard;
4239 for_each_rx_queue(bp, i)
4240 nstats->rx_dropped +=
4241 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4243 nstats->tx_dropped = 0;
4246 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4248 nstats->collisions =
4249 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4251 nstats->rx_length_errors =
4252 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4253 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4254 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4255 bnx2x_hilo(&estats->brb_truncate_hi);
4256 nstats->rx_crc_errors =
4257 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4258 nstats->rx_frame_errors =
4259 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4260 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4261 nstats->rx_missed_errors = estats->xxoverflow_discard;
4263 nstats->rx_errors = nstats->rx_length_errors +
4264 nstats->rx_over_errors +
4265 nstats->rx_crc_errors +
4266 nstats->rx_frame_errors +
4267 nstats->rx_fifo_errors +
4268 nstats->rx_missed_errors;
4270 nstats->tx_aborted_errors =
4271 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4272 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4273 nstats->tx_carrier_errors =
4274 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4275 nstats->tx_fifo_errors = 0;
4276 nstats->tx_heartbeat_errors = 0;
4277 nstats->tx_window_errors = 0;
4279 nstats->tx_errors = nstats->tx_aborted_errors +
4280 nstats->tx_carrier_errors +
4281 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4284 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4286 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4289 estats->driver_xoff = 0;
4290 estats->rx_err_discard_pkt = 0;
4291 estats->rx_skb_alloc_failed = 0;
4292 estats->hw_csum_err = 0;
4293 for_each_rx_queue(bp, i) {
4294 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4296 estats->driver_xoff += qstats->driver_xoff;
4297 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4298 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4299 estats->hw_csum_err += qstats->hw_csum_err;
4303 static void bnx2x_stats_update(struct bnx2x *bp)
4305 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4307 if (*stats_comp != DMAE_COMP_VAL)
4311 bnx2x_hw_stats_update(bp);
4313 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4314 BNX2X_ERR("storm stats were not updated for 3 times\n");
4319 bnx2x_net_stats_update(bp);
4320 bnx2x_drv_stats_update(bp);
4322 if (bp->msglevel & NETIF_MSG_TIMER) {
4323 struct bnx2x_fastpath *fp0_rx = bp->fp;
4324 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4325 struct tstorm_per_client_stats *old_tclient =
4326 &bp->fp->old_tclient;
4327 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4329 struct net_device_stats *nstats = &bp->dev->stats;
4332 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4333 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4335 bnx2x_tx_avail(fp0_tx),
4336 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4337 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4339 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4340 fp0_rx->rx_comp_cons),
4341 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4342 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4343 "brb truncate %u\n",
4344 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4345 qstats->driver_xoff,
4346 estats->brb_drop_lo, estats->brb_truncate_lo);
4347 printk(KERN_DEBUG "tstats: checksum_discard %u "
4348 "packets_too_big_discard %lu no_buff_discard %lu "
4349 "mac_discard %u mac_filter_discard %u "
4350 "xxovrflow_discard %u brb_truncate_discard %u "
4351 "ttl0_discard %u\n",
4352 le32_to_cpu(old_tclient->checksum_discard),
4353 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4354 bnx2x_hilo(&qstats->no_buff_discard_hi),
4355 estats->mac_discard, estats->mac_filter_discard,
4356 estats->xxoverflow_discard, estats->brb_truncate_discard,
4357 le32_to_cpu(old_tclient->ttl0_discard));
4359 for_each_queue(bp, i) {
4360 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4361 bnx2x_fp(bp, i, tx_pkt),
4362 bnx2x_fp(bp, i, rx_pkt),
4363 bnx2x_fp(bp, i, rx_calls));
4367 bnx2x_hw_stats_post(bp);
4368 bnx2x_storm_stats_post(bp);
4371 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4373 struct dmae_command *dmae;
4375 int loader_idx = PMF_DMAE_C(bp);
4376 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4378 bp->executer_idx = 0;
4380 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4382 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4384 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4386 DMAE_CMD_ENDIANITY_DW_SWAP |
4388 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4389 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4391 if (bp->port.port_stx) {
4393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4395 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4397 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4398 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4399 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4400 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4401 dmae->dst_addr_hi = 0;
4402 dmae->len = sizeof(struct host_port_stats) >> 2;
4404 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4405 dmae->comp_addr_hi = 0;
4408 dmae->comp_addr_lo =
4409 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4410 dmae->comp_addr_hi =
4411 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4412 dmae->comp_val = DMAE_COMP_VAL;
4420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4421 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4422 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4423 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4424 dmae->dst_addr_lo = bp->func_stx >> 2;
4425 dmae->dst_addr_hi = 0;
4426 dmae->len = sizeof(struct host_func_stats) >> 2;
4427 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4428 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4429 dmae->comp_val = DMAE_COMP_VAL;
4435 static void bnx2x_stats_stop(struct bnx2x *bp)
4439 bnx2x_stats_comp(bp);
4442 update = (bnx2x_hw_stats_update(bp) == 0);
4444 update |= (bnx2x_storm_stats_update(bp) == 0);
4447 bnx2x_net_stats_update(bp);
4450 bnx2x_port_stats_stop(bp);
4452 bnx2x_hw_stats_post(bp);
4453 bnx2x_stats_comp(bp);
4457 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4461 static const struct {
4462 void (*action)(struct bnx2x *bp);
4463 enum bnx2x_stats_state next_state;
4464 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4467 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4468 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4469 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4470 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4473 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4474 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4475 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4476 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4480 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4482 enum bnx2x_stats_state state = bp->stats_state;
4484 bnx2x_stats_stm[state][event].action(bp);
4485 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4487 /* Make sure the state has been "changed" */
4490 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4491 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4492 state, event, bp->stats_state);
4495 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4497 struct dmae_command *dmae;
4498 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4501 if (!bp->port.pmf || !bp->port.port_stx) {
4502 BNX2X_ERR("BUG!\n");
4506 bp->executer_idx = 0;
4508 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4509 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4510 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4511 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4513 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4515 DMAE_CMD_ENDIANITY_DW_SWAP |
4517 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4518 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4519 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4520 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4521 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4522 dmae->dst_addr_hi = 0;
4523 dmae->len = sizeof(struct host_port_stats) >> 2;
4524 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4525 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4526 dmae->comp_val = DMAE_COMP_VAL;
4529 bnx2x_hw_stats_post(bp);
4530 bnx2x_stats_comp(bp);
4533 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4535 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4536 int port = BP_PORT(bp);
4541 if (!bp->port.pmf || !bp->func_stx) {
4542 BNX2X_ERR("BUG!\n");
4546 /* save our func_stx */
4547 func_stx = bp->func_stx;
4549 for (vn = VN_0; vn < vn_max; vn++) {
4552 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4553 bnx2x_func_stats_init(bp);
4554 bnx2x_hw_stats_post(bp);
4555 bnx2x_stats_comp(bp);
4558 /* restore our func_stx */
4559 bp->func_stx = func_stx;
4562 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4564 struct dmae_command *dmae = &bp->stats_dmae;
4565 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4568 if (!bp->func_stx) {
4569 BNX2X_ERR("BUG!\n");
4573 bp->executer_idx = 0;
4574 memset(dmae, 0, sizeof(struct dmae_command));
4576 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4577 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4578 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4580 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4582 DMAE_CMD_ENDIANITY_DW_SWAP |
4584 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4585 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4586 dmae->src_addr_lo = bp->func_stx >> 2;
4587 dmae->src_addr_hi = 0;
4588 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4589 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4590 dmae->len = sizeof(struct host_func_stats) >> 2;
4591 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4592 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4593 dmae->comp_val = DMAE_COMP_VAL;
4596 bnx2x_hw_stats_post(bp);
4597 bnx2x_stats_comp(bp);
4600 static void bnx2x_stats_init(struct bnx2x *bp)
4602 int port = BP_PORT(bp);
4603 int func = BP_FUNC(bp);
4606 bp->stats_pending = 0;
4607 bp->executer_idx = 0;
4608 bp->stats_counter = 0;
4610 /* port and func stats for management */
4611 if (!BP_NOMCP(bp)) {
4612 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4613 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4616 bp->port.port_stx = 0;
4619 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4620 bp->port.port_stx, bp->func_stx);
4623 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4624 bp->port.old_nig_stats.brb_discard =
4625 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4626 bp->port.old_nig_stats.brb_truncate =
4627 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4628 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4629 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4630 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4631 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4633 /* function stats */
4634 for_each_queue(bp, i) {
4635 struct bnx2x_fastpath *fp = &bp->fp[i];
4637 memset(&fp->old_tclient, 0,
4638 sizeof(struct tstorm_per_client_stats));
4639 memset(&fp->old_uclient, 0,
4640 sizeof(struct ustorm_per_client_stats));
4641 memset(&fp->old_xclient, 0,
4642 sizeof(struct xstorm_per_client_stats));
4643 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4646 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4647 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4649 bp->stats_state = STATS_STATE_DISABLED;
4652 if (bp->port.port_stx)
4653 bnx2x_port_stats_base_init(bp);
4656 bnx2x_func_stats_base_init(bp);
4658 } else if (bp->func_stx)
4659 bnx2x_func_stats_base_update(bp);
4662 static void bnx2x_timer(unsigned long data)
4664 struct bnx2x *bp = (struct bnx2x *) data;
4666 if (!netif_running(bp->dev))
4669 if (atomic_read(&bp->intr_sem) != 0)
4673 struct bnx2x_fastpath *fp = &bp->fp[0];
4677 rc = bnx2x_rx_int(fp, 1000);
4680 if (!BP_NOMCP(bp)) {
4681 int func = BP_FUNC(bp);
4685 ++bp->fw_drv_pulse_wr_seq;
4686 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4687 /* TBD - add SYSTEM_TIME */
4688 drv_pulse = bp->fw_drv_pulse_wr_seq;
4689 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4691 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4692 MCP_PULSE_SEQ_MASK);
4693 /* The delta between driver pulse and mcp response
4694 * should be 1 (before mcp response) or 0 (after mcp response)
4696 if ((drv_pulse != mcp_pulse) &&
4697 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4698 /* someone lost a heartbeat... */
4699 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4700 drv_pulse, mcp_pulse);
4704 if (bp->state == BNX2X_STATE_OPEN)
4705 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4708 mod_timer(&bp->timer, jiffies + bp->current_interval);
4711 /* end of Statistics */
4716 * nic init service functions
4719 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4721 int port = BP_PORT(bp);
4724 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4726 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4727 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4728 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4729 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4732 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4733 dma_addr_t mapping, int sb_id)
4735 int port = BP_PORT(bp);
4736 int func = BP_FUNC(bp);
4741 section = ((u64)mapping) + offsetof(struct host_status_block,
4743 sb->u_status_block.status_block_id = sb_id;
4745 REG_WR(bp, BAR_CSTRORM_INTMEM +
4746 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4747 REG_WR(bp, BAR_CSTRORM_INTMEM +
4748 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4750 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4751 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4753 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4754 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4755 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4758 section = ((u64)mapping) + offsetof(struct host_status_block,
4760 sb->c_status_block.status_block_id = sb_id;
4762 REG_WR(bp, BAR_CSTRORM_INTMEM +
4763 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4764 REG_WR(bp, BAR_CSTRORM_INTMEM +
4765 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4767 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4768 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4770 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4771 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4772 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4774 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4777 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4779 int func = BP_FUNC(bp);
4781 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4782 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4783 sizeof(struct tstorm_def_status_block)/4);
4784 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4786 sizeof(struct cstorm_def_status_block_u)/4);
4787 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4788 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4789 sizeof(struct cstorm_def_status_block_c)/4);
4790 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4791 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4792 sizeof(struct xstorm_def_status_block)/4);
4795 static void bnx2x_init_def_sb(struct bnx2x *bp,
4796 struct host_def_status_block *def_sb,
4797 dma_addr_t mapping, int sb_id)
4799 int port = BP_PORT(bp);
4800 int func = BP_FUNC(bp);
4801 int index, val, reg_offset;
4805 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4806 atten_status_block);
4807 def_sb->atten_status_block.status_block_id = sb_id;
4811 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4812 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4814 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4815 bp->attn_group[index].sig[0] = REG_RD(bp,
4816 reg_offset + 0x10*index);
4817 bp->attn_group[index].sig[1] = REG_RD(bp,
4818 reg_offset + 0x4 + 0x10*index);
4819 bp->attn_group[index].sig[2] = REG_RD(bp,
4820 reg_offset + 0x8 + 0x10*index);
4821 bp->attn_group[index].sig[3] = REG_RD(bp,
4822 reg_offset + 0xc + 0x10*index);
4825 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4826 HC_REG_ATTN_MSG0_ADDR_L);
4828 REG_WR(bp, reg_offset, U64_LO(section));
4829 REG_WR(bp, reg_offset + 4, U64_HI(section));
4831 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4833 val = REG_RD(bp, reg_offset);
4835 REG_WR(bp, reg_offset, val);
4838 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4839 u_def_status_block);
4840 def_sb->u_def_status_block.status_block_id = sb_id;
4842 REG_WR(bp, BAR_CSTRORM_INTMEM +
4843 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4844 REG_WR(bp, BAR_CSTRORM_INTMEM +
4845 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4847 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4848 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4850 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4851 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4852 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4855 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4856 c_def_status_block);
4857 def_sb->c_def_status_block.status_block_id = sb_id;
4859 REG_WR(bp, BAR_CSTRORM_INTMEM +
4860 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4861 REG_WR(bp, BAR_CSTRORM_INTMEM +
4862 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4864 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4865 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4867 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4868 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4869 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4872 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4873 t_def_status_block);
4874 def_sb->t_def_status_block.status_block_id = sb_id;
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
4877 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4878 REG_WR(bp, BAR_TSTRORM_INTMEM +
4879 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4881 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4882 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4884 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4885 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4886 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4889 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4890 x_def_status_block);
4891 def_sb->x_def_status_block.status_block_id = sb_id;
4893 REG_WR(bp, BAR_XSTRORM_INTMEM +
4894 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4895 REG_WR(bp, BAR_XSTRORM_INTMEM +
4896 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4898 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4899 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4901 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4902 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4903 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4905 bp->stats_pending = 0;
4906 bp->set_mac_pending = 0;
4908 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4911 static void bnx2x_update_coalesce(struct bnx2x *bp)
4913 int port = BP_PORT(bp);
4916 for_each_queue(bp, i) {
4917 int sb_id = bp->fp[i].sb_id;
4919 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4920 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4921 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4922 U_SB_ETH_RX_CQ_INDEX),
4924 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4925 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4926 U_SB_ETH_RX_CQ_INDEX),
4927 (bp->rx_ticks/12) ? 0 : 1);
4929 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4930 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4931 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4932 C_SB_ETH_TX_CQ_INDEX),
4934 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4935 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4936 C_SB_ETH_TX_CQ_INDEX),
4937 (bp->tx_ticks/12) ? 0 : 1);
4941 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4942 struct bnx2x_fastpath *fp, int last)
4946 for (i = 0; i < last; i++) {
4947 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4948 struct sk_buff *skb = rx_buf->skb;
4951 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4955 if (fp->tpa_state[i] == BNX2X_TPA_START)
4956 pci_unmap_single(bp->pdev,
4957 pci_unmap_addr(rx_buf, mapping),
4958 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4965 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4967 int func = BP_FUNC(bp);
4968 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4969 ETH_MAX_AGGREGATION_QUEUES_E1H;
4970 u16 ring_prod, cqe_ring_prod;
4973 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4975 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4977 if (bp->flags & TPA_ENABLE_FLAG) {
4979 for_each_rx_queue(bp, j) {
4980 struct bnx2x_fastpath *fp = &bp->fp[j];
4982 for (i = 0; i < max_agg_queues; i++) {
4983 fp->tpa_pool[i].skb =
4984 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4985 if (!fp->tpa_pool[i].skb) {
4986 BNX2X_ERR("Failed to allocate TPA "
4987 "skb pool for queue[%d] - "
4988 "disabling TPA on this "
4990 bnx2x_free_tpa_pool(bp, fp, i);
4991 fp->disable_tpa = 1;
4994 pci_unmap_addr_set((struct sw_rx_bd *)
4995 &bp->fp->tpa_pool[i],
4997 fp->tpa_state[i] = BNX2X_TPA_STOP;
5002 for_each_rx_queue(bp, j) {
5003 struct bnx2x_fastpath *fp = &bp->fp[j];
5006 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5007 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5009 /* Mark queue as Rx */
5010 fp->is_rx_queue = 1;
5012 /* "next page" elements initialization */
5014 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5015 struct eth_rx_sge *sge;
5017 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5019 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5020 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5022 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5023 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5026 bnx2x_init_sge_ring_bit_mask(fp);
5029 for (i = 1; i <= NUM_RX_RINGS; i++) {
5030 struct eth_rx_bd *rx_bd;
5032 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5034 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5035 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5037 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5038 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5042 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5043 struct eth_rx_cqe_next_page *nextpg;
5045 nextpg = (struct eth_rx_cqe_next_page *)
5046 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5048 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5051 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5052 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5055 /* Allocate SGEs and initialize the ring elements */
5056 for (i = 0, ring_prod = 0;
5057 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5059 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5060 BNX2X_ERR("was only able to allocate "
5062 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5063 /* Cleanup already allocated elements */
5064 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5065 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5066 fp->disable_tpa = 1;
5070 ring_prod = NEXT_SGE_IDX(ring_prod);
5072 fp->rx_sge_prod = ring_prod;
5074 /* Allocate BDs and initialize BD ring */
5075 fp->rx_comp_cons = 0;
5076 cqe_ring_prod = ring_prod = 0;
5077 for (i = 0; i < bp->rx_ring_size; i++) {
5078 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5079 BNX2X_ERR("was only able to allocate "
5080 "%d rx skbs on queue[%d]\n", i, j);
5081 fp->eth_q_stats.rx_skb_alloc_failed++;
5084 ring_prod = NEXT_RX_IDX(ring_prod);
5085 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5086 WARN_ON(ring_prod <= i);
5089 fp->rx_bd_prod = ring_prod;
5090 /* must not have more available CQEs than BDs */
5091 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5093 fp->rx_pkt = fp->rx_calls = 0;
5096 * this will generate an interrupt (to the TSTORM)
5097 * must only be done after chip is initialized
5099 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5104 REG_WR(bp, BAR_USTRORM_INTMEM +
5105 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5106 U64_LO(fp->rx_comp_mapping));
5107 REG_WR(bp, BAR_USTRORM_INTMEM +
5108 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5109 U64_HI(fp->rx_comp_mapping));
5113 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5117 for_each_tx_queue(bp, j) {
5118 struct bnx2x_fastpath *fp = &bp->fp[j];
5120 for (i = 1; i <= NUM_TX_RINGS; i++) {
5121 struct eth_tx_next_bd *tx_next_bd =
5122 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5124 tx_next_bd->addr_hi =
5125 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5126 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5127 tx_next_bd->addr_lo =
5128 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5129 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5132 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5133 fp->tx_db.data.zero_fill1 = 0;
5134 fp->tx_db.data.prod = 0;
5136 fp->tx_pkt_prod = 0;
5137 fp->tx_pkt_cons = 0;
5140 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5144 /* clean tx statistics */
5145 for_each_rx_queue(bp, i)
5146 bnx2x_fp(bp, i, tx_pkt) = 0;
5149 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5151 int func = BP_FUNC(bp);
5153 spin_lock_init(&bp->spq_lock);
5155 bp->spq_left = MAX_SPQ_PENDING;
5156 bp->spq_prod_idx = 0;
5157 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5158 bp->spq_prod_bd = bp->spq;
5159 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5162 U64_LO(bp->spq_mapping));
5164 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5165 U64_HI(bp->spq_mapping));
5167 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5171 static void bnx2x_init_context(struct bnx2x *bp)
5175 for_each_rx_queue(bp, i) {
5176 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5177 struct bnx2x_fastpath *fp = &bp->fp[i];
5178 u8 cl_id = fp->cl_id;
5180 context->ustorm_st_context.common.sb_index_numbers =
5181 BNX2X_RX_SB_INDEX_NUM;
5182 context->ustorm_st_context.common.clientId = cl_id;
5183 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5184 context->ustorm_st_context.common.flags =
5185 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5186 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5187 context->ustorm_st_context.common.statistics_counter_id =
5189 context->ustorm_st_context.common.mc_alignment_log_size =
5190 BNX2X_RX_ALIGN_SHIFT;
5191 context->ustorm_st_context.common.bd_buff_size =
5193 context->ustorm_st_context.common.bd_page_base_hi =
5194 U64_HI(fp->rx_desc_mapping);
5195 context->ustorm_st_context.common.bd_page_base_lo =
5196 U64_LO(fp->rx_desc_mapping);
5197 if (!fp->disable_tpa) {
5198 context->ustorm_st_context.common.flags |=
5199 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5200 context->ustorm_st_context.common.sge_buff_size =
5201 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5203 context->ustorm_st_context.common.sge_page_base_hi =
5204 U64_HI(fp->rx_sge_mapping);
5205 context->ustorm_st_context.common.sge_page_base_lo =
5206 U64_LO(fp->rx_sge_mapping);
5208 context->ustorm_st_context.common.max_sges_for_packet =
5209 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5210 context->ustorm_st_context.common.max_sges_for_packet =
5211 ((context->ustorm_st_context.common.
5212 max_sges_for_packet + PAGES_PER_SGE - 1) &
5213 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5216 context->ustorm_ag_context.cdu_usage =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_UCM_AG,
5219 ETH_CONNECTION_TYPE);
5221 context->xstorm_ag_context.cdu_reserved =
5222 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5223 CDU_REGION_NUMBER_XCM_AG,
5224 ETH_CONNECTION_TYPE);
5227 for_each_tx_queue(bp, i) {
5228 struct bnx2x_fastpath *fp = &bp->fp[i];
5229 struct eth_context *context =
5230 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5232 context->cstorm_st_context.sb_index_number =
5233 C_SB_ETH_TX_CQ_INDEX;
5234 context->cstorm_st_context.status_block_id = fp->sb_id;
5236 context->xstorm_st_context.tx_bd_page_base_hi =
5237 U64_HI(fp->tx_desc_mapping);
5238 context->xstorm_st_context.tx_bd_page_base_lo =
5239 U64_LO(fp->tx_desc_mapping);
5240 context->xstorm_st_context.statistics_data = (fp->cl_id |
5241 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5245 static void bnx2x_init_ind_table(struct bnx2x *bp)
5247 int func = BP_FUNC(bp);
5250 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5254 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5255 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5256 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5257 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5258 bp->fp->cl_id + (i % bp->num_rx_queues));
5261 static void bnx2x_set_client_config(struct bnx2x *bp)
5263 struct tstorm_eth_client_config tstorm_client = {0};
5264 int port = BP_PORT(bp);
5267 tstorm_client.mtu = bp->dev->mtu;
5268 tstorm_client.config_flags =
5269 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5270 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5272 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5273 tstorm_client.config_flags |=
5274 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5275 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5279 for_each_queue(bp, i) {
5280 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5282 REG_WR(bp, BAR_TSTRORM_INTMEM +
5283 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5284 ((u32 *)&tstorm_client)[0]);
5285 REG_WR(bp, BAR_TSTRORM_INTMEM +
5286 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5287 ((u32 *)&tstorm_client)[1]);
5290 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5291 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5294 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5296 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5297 int mode = bp->rx_mode;
5298 int mask = bp->rx_mode_cl_mask;
5299 int func = BP_FUNC(bp);
5300 int port = BP_PORT(bp);
5302 /* All but management unicast packets should pass to the host as well */
5304 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5305 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5306 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5307 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5309 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5312 case BNX2X_RX_MODE_NONE: /* no Rx */
5313 tstorm_mac_filter.ucast_drop_all = mask;
5314 tstorm_mac_filter.mcast_drop_all = mask;
5315 tstorm_mac_filter.bcast_drop_all = mask;
5318 case BNX2X_RX_MODE_NORMAL:
5319 tstorm_mac_filter.bcast_accept_all = mask;
5322 case BNX2X_RX_MODE_ALLMULTI:
5323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
5327 case BNX2X_RX_MODE_PROMISC:
5328 tstorm_mac_filter.ucast_accept_all = mask;
5329 tstorm_mac_filter.mcast_accept_all = mask;
5330 tstorm_mac_filter.bcast_accept_all = mask;
5331 /* pass management unicast packets as well */
5332 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5336 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5341 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5344 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5345 REG_WR(bp, BAR_TSTRORM_INTMEM +
5346 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5347 ((u32 *)&tstorm_mac_filter)[i]);
5349 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5350 ((u32 *)&tstorm_mac_filter)[i]); */
5353 if (mode != BNX2X_RX_MODE_NONE)
5354 bnx2x_set_client_config(bp);
5357 static void bnx2x_init_internal_common(struct bnx2x *bp)
5361 /* Zero this manually as its initialization is
5362 currently missing in the initTool */
5363 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5364 REG_WR(bp, BAR_USTRORM_INTMEM +
5365 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5368 static void bnx2x_init_internal_port(struct bnx2x *bp)
5370 int port = BP_PORT(bp);
5373 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5375 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5376 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5377 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5380 static void bnx2x_init_internal_func(struct bnx2x *bp)
5382 struct tstorm_eth_function_common_config tstorm_config = {0};
5383 struct stats_indication_flags stats_flags = {0};
5384 int port = BP_PORT(bp);
5385 int func = BP_FUNC(bp);
5391 tstorm_config.config_flags = MULTI_FLAGS(bp);
5392 tstorm_config.rss_result_mask = MULTI_MASK;
5395 /* Enable TPA if needed */
5396 if (bp->flags & TPA_ENABLE_FLAG)
5397 tstorm_config.config_flags |=
5398 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5401 tstorm_config.config_flags |=
5402 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5404 tstorm_config.leading_client_id = BP_L_ID(bp);
5406 REG_WR(bp, BAR_TSTRORM_INTMEM +
5407 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5408 (*(u32 *)&tstorm_config));
5410 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5411 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5412 bnx2x_set_storm_rx_mode(bp);
5414 for_each_queue(bp, i) {
5415 u8 cl_id = bp->fp[i].cl_id;
5417 /* reset xstorm per client statistics */
5418 offset = BAR_XSTRORM_INTMEM +
5419 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5421 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5422 REG_WR(bp, offset + j*4, 0);
5424 /* reset tstorm per client statistics */
5425 offset = BAR_TSTRORM_INTMEM +
5426 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5428 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5429 REG_WR(bp, offset + j*4, 0);
5431 /* reset ustorm per client statistics */
5432 offset = BAR_USTRORM_INTMEM +
5433 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5435 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5436 REG_WR(bp, offset + j*4, 0);
5439 /* Init statistics related context */
5440 stats_flags.collect_eth = 1;
5442 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5443 ((u32 *)&stats_flags)[0]);
5444 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445 ((u32 *)&stats_flags)[1]);
5447 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5448 ((u32 *)&stats_flags)[0]);
5449 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5450 ((u32 *)&stats_flags)[1]);
5452 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5453 ((u32 *)&stats_flags)[0]);
5454 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5455 ((u32 *)&stats_flags)[1]);
5457 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5458 ((u32 *)&stats_flags)[0]);
5459 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5460 ((u32 *)&stats_flags)[1]);
5462 REG_WR(bp, BAR_XSTRORM_INTMEM +
5463 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5464 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5465 REG_WR(bp, BAR_XSTRORM_INTMEM +
5466 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5467 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5469 REG_WR(bp, BAR_TSTRORM_INTMEM +
5470 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5471 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5472 REG_WR(bp, BAR_TSTRORM_INTMEM +
5473 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5474 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5478 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5479 REG_WR(bp, BAR_USTRORM_INTMEM +
5480 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5481 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5483 if (CHIP_IS_E1H(bp)) {
5484 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5486 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5488 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5490 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5493 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5497 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5499 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5500 SGE_PAGE_SIZE * PAGES_PER_SGE),
5502 for_each_rx_queue(bp, i) {
5503 struct bnx2x_fastpath *fp = &bp->fp[i];
5505 REG_WR(bp, BAR_USTRORM_INTMEM +
5506 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5507 U64_LO(fp->rx_comp_mapping));
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
5509 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5510 U64_HI(fp->rx_comp_mapping));
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
5514 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5515 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5516 REG_WR(bp, BAR_USTRORM_INTMEM +
5517 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5518 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5520 REG_WR16(bp, BAR_USTRORM_INTMEM +
5521 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5525 /* dropless flow control */
5526 if (CHIP_IS_E1H(bp)) {
5527 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5529 rx_pause.bd_thr_low = 250;
5530 rx_pause.cqe_thr_low = 250;
5532 rx_pause.sge_thr_low = 0;
5533 rx_pause.bd_thr_high = 350;
5534 rx_pause.cqe_thr_high = 350;
5535 rx_pause.sge_thr_high = 0;
5537 for_each_rx_queue(bp, i) {
5538 struct bnx2x_fastpath *fp = &bp->fp[i];
5540 if (!fp->disable_tpa) {
5541 rx_pause.sge_thr_low = 150;
5542 rx_pause.sge_thr_high = 250;
5546 offset = BAR_USTRORM_INTMEM +
5547 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5550 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5552 REG_WR(bp, offset + j*4,
5553 ((u32 *)&rx_pause)[j]);
5557 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5559 /* Init rate shaping and fairness contexts */
5563 /* During init there is no active link
5564 Until link is up, set link rate to 10Gbps */
5565 bp->link_vars.line_speed = SPEED_10000;
5566 bnx2x_init_port_minmax(bp);
5570 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5571 bnx2x_calc_vn_weight_sum(bp);
5573 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5574 bnx2x_init_vn_minmax(bp, 2*vn + port);
5576 /* Enable rate shaping and fairness */
5577 bp->cmng.flags.cmng_enables |=
5578 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5581 /* rate shaping and fairness are disabled */
5583 "single function mode minmax will be disabled\n");
5587 /* Store it to internal memory */
5589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5590 REG_WR(bp, BAR_XSTRORM_INTMEM +
5591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5592 ((u32 *)(&bp->cmng))[i]);
5595 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5597 switch (load_code) {
5598 case FW_MSG_CODE_DRV_LOAD_COMMON:
5599 bnx2x_init_internal_common(bp);
5602 case FW_MSG_CODE_DRV_LOAD_PORT:
5603 bnx2x_init_internal_port(bp);
5606 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5607 bnx2x_init_internal_func(bp);
5611 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5616 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5620 for_each_queue(bp, i) {
5621 struct bnx2x_fastpath *fp = &bp->fp[i];
5624 fp->state = BNX2X_FP_STATE_CLOSED;
5626 fp->cl_id = BP_L_ID(bp) + i;
5628 fp->sb_id = fp->cl_id + 1;
5630 fp->sb_id = fp->cl_id;
5632 /* Suitable Rx and Tx SBs are served by the same client */
5633 if (i >= bp->num_rx_queues)
5634 fp->cl_id -= bp->num_rx_queues;
5636 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5637 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5638 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5640 bnx2x_update_fpsb_idx(fp);
5643 /* ensure status block indices were read */
5647 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5649 bnx2x_update_dsb_idx(bp);
5650 bnx2x_update_coalesce(bp);
5651 bnx2x_init_rx_rings(bp);
5652 bnx2x_init_tx_ring(bp);
5653 bnx2x_init_sp_ring(bp);
5654 bnx2x_init_context(bp);
5655 bnx2x_init_internal(bp, load_code);
5656 bnx2x_init_ind_table(bp);
5657 bnx2x_stats_init(bp);
5659 /* At this point, we are ready for interrupts */
5660 atomic_set(&bp->intr_sem, 0);
5662 /* flush all before enabling interrupts */
5666 bnx2x_int_enable(bp);
5668 /* Check for SPIO5 */
5669 bnx2x_attn_int_deasserted0(bp,
5670 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5671 AEU_INPUTS_ATTN_BITS_SPIO5);
5674 /* end of nic init */
5677 * gzip service functions
5680 static int bnx2x_gunzip_init(struct bnx2x *bp)
5682 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5683 &bp->gunzip_mapping);
5684 if (bp->gunzip_buf == NULL)
5687 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5688 if (bp->strm == NULL)
5691 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5693 if (bp->strm->workspace == NULL)
5703 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5704 bp->gunzip_mapping);
5705 bp->gunzip_buf = NULL;
5708 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5709 " un-compression\n", bp->dev->name);
5713 static void bnx2x_gunzip_end(struct bnx2x *bp)
5715 kfree(bp->strm->workspace);
5720 if (bp->gunzip_buf) {
5721 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5722 bp->gunzip_mapping);
5723 bp->gunzip_buf = NULL;
5727 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5731 /* check gzip header */
5732 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5733 BNX2X_ERR("Bad gzip header\n");
5741 if (zbuf[3] & FNAME)
5742 while ((zbuf[n++] != 0) && (n < len));
5744 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5745 bp->strm->avail_in = len - n;
5746 bp->strm->next_out = bp->gunzip_buf;
5747 bp->strm->avail_out = FW_BUF_SIZE;
5749 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5753 rc = zlib_inflate(bp->strm, Z_FINISH);
5754 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5755 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5756 bp->dev->name, bp->strm->msg);
5758 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5759 if (bp->gunzip_outlen & 0x3)
5760 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5761 " gunzip_outlen (%d) not aligned\n",
5762 bp->dev->name, bp->gunzip_outlen);
5763 bp->gunzip_outlen >>= 2;
5765 zlib_inflateEnd(bp->strm);
5767 if (rc == Z_STREAM_END)
5773 /* nic load/unload */
5776 * General service functions
5779 /* send a NIG loopback debug packet */
5780 static void bnx2x_lb_pckt(struct bnx2x *bp)
5784 /* Ethernet source and destination addresses */
5785 wb_write[0] = 0x55555555;
5786 wb_write[1] = 0x55555555;
5787 wb_write[2] = 0x20; /* SOP */
5788 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5790 /* NON-IP protocol */
5791 wb_write[0] = 0x09000000;
5792 wb_write[1] = 0x55555555;
5793 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5794 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5797 /* some of the internal memories
5798 * are not directly readable from the driver
5799 * to test them we send debug packets
5801 static int bnx2x_int_mem_test(struct bnx2x *bp)
5807 if (CHIP_REV_IS_FPGA(bp))
5809 else if (CHIP_REV_IS_EMUL(bp))
5814 DP(NETIF_MSG_HW, "start part1\n");
5816 /* Disable inputs of parser neighbor blocks */
5817 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5818 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5819 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5820 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5822 /* Write 0 to parser credits for CFC search request */
5823 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5825 /* send Ethernet packet */
5828 /* TODO do i reset NIG statistic? */
5829 /* Wait until NIG register shows 1 packet of size 0x10 */
5830 count = 1000 * factor;
5833 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5834 val = *bnx2x_sp(bp, wb_data[0]);
5842 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5846 /* Wait until PRS register shows 1 packet */
5847 count = 1000 * factor;
5849 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5857 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5861 /* Reset and init BRB, PRS */
5862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5866 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5867 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5869 DP(NETIF_MSG_HW, "part2\n");
5871 /* Disable inputs of parser neighbor blocks */
5872 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5873 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5874 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5875 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5877 /* Write 0 to parser credits for CFC search request */
5878 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5880 /* send 10 Ethernet packets */
5881 for (i = 0; i < 10; i++)
5884 /* Wait until NIG register shows 10 + 1
5885 packets of size 11*0x10 = 0xb0 */
5886 count = 1000 * factor;
5889 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5890 val = *bnx2x_sp(bp, wb_data[0]);
5898 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5902 /* Wait until PRS register shows 2 packets */
5903 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5905 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5907 /* Write 1 to parser credits for CFC search request */
5908 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5910 /* Wait until PRS register shows 3 packets */
5911 msleep(10 * factor);
5912 /* Wait until NIG register shows 1 packet of size 0x10 */
5913 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5915 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5917 /* clear NIG EOP FIFO */
5918 for (i = 0; i < 11; i++)
5919 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5920 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5922 BNX2X_ERR("clear of NIG failed\n");
5926 /* Reset and init BRB, PRS, NIG */
5927 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5929 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5931 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5932 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5935 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5938 /* Enable inputs of parser neighbor blocks */
5939 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5940 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5941 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5942 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5944 DP(NETIF_MSG_HW, "done\n");
5949 static void enable_blocks_attention(struct bnx2x *bp)
5951 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5952 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5953 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5954 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5955 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5956 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5957 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5958 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5959 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5960 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5961 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5962 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5963 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5964 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5965 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5966 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5967 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5968 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5969 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5970 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5971 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5972 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5973 if (CHIP_REV_IS_FPGA(bp))
5974 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5976 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5977 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5978 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5979 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5980 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5981 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5982 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5983 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5984 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5985 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5989 static void bnx2x_reset_common(struct bnx2x *bp)
5992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5997 static void bnx2x_init_pxp(struct bnx2x *bp)
6000 int r_order, w_order;
6002 pci_read_config_word(bp->pdev,
6003 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6004 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6005 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6007 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6009 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6013 bnx2x_init_pxp_arb(bp, r_order, w_order);
6016 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6022 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6023 SHARED_HW_CFG_FAN_FAILURE_MASK;
6025 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6029 * The fan failure mechanism is usually related to the PHY type since
6030 * the power consumption of the board is affected by the PHY. Currently,
6031 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6033 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6034 for (port = PORT_0; port < PORT_MAX; port++) {
6036 SHMEM_RD(bp, dev_info.port_hw_config[port].
6037 external_phy_config) &
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6041 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6043 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6048 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6050 if (is_required == 0)
6053 /* Fan failure is indicated by SPIO 5 */
6054 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6055 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6057 /* set to active low mode */
6058 val = REG_RD(bp, MISC_REG_SPIO_INT);
6059 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6060 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6061 REG_WR(bp, MISC_REG_SPIO_INT, val);
6063 /* enable interrupt to signal the IGU */
6064 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6065 val |= (1 << MISC_REGISTERS_SPIO_5);
6066 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6069 static int bnx2x_init_common(struct bnx2x *bp)
6076 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6078 bnx2x_reset_common(bp);
6079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6082 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6083 if (CHIP_IS_E1H(bp))
6084 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6086 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6088 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6090 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6091 if (CHIP_IS_E1(bp)) {
6092 /* enable HW interrupt from PXP on USDM overflow
6093 bit 16 on INT_MASK_0 */
6094 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6097 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6101 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6104 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6105 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6106 /* make sure this value is 0 */
6107 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6109 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6110 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6112 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6113 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6116 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6118 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6119 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6120 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6123 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6124 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6126 /* let the HW do it's magic ... */
6128 /* finish PXP init */
6129 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6131 BNX2X_ERR("PXP2 CFG failed\n");
6134 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6136 BNX2X_ERR("PXP2 RD_INIT failed\n");
6140 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6141 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6143 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6145 /* clean the DMAE memory */
6147 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6149 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6150 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6151 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6152 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6154 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6155 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6157 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6159 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6164 for (i = 0; i < 64; i++) {
6165 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6166 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6168 if (CHIP_IS_E1H(bp)) {
6169 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6170 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6175 /* soft reset pulse */
6176 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6177 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6180 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6183 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6184 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6185 if (!CHIP_REV_IS_SLOW(bp)) {
6186 /* enable hw interrupt from doorbell Q */
6187 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6190 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6192 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6195 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6197 if (CHIP_IS_E1H(bp))
6198 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6200 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6201 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6202 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6203 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6205 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6207 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6208 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6210 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6211 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6212 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6213 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6221 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6222 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227 REG_WR(bp, i, 0xc0cac01a);
6228 /* TODO: replace with something meaningful */
6230 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6238 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6239 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6240 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6243 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6245 if (sizeof(union cdu_context) != 1024)
6246 /* we currently assume that a context is 1024 bytes */
6247 printk(KERN_ALERT PFX "please adjust the size of"
6248 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6250 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6251 val = (4 << 24) + (0 << 12) + 1024;
6252 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6254 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6255 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6256 /* enable context validation interrupt from CFC */
6257 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6259 /* set the thresholds to prevent CFC/CDU race */
6260 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6262 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6263 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6265 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6266 /* Reset PCIE errors for debug */
6267 REG_WR(bp, 0x2814, 0xffffffff);
6268 REG_WR(bp, 0x3820, 0xffffffff);
6270 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6271 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6272 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6273 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6275 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6276 if (CHIP_IS_E1H(bp)) {
6277 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6278 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6281 if (CHIP_REV_IS_SLOW(bp))
6284 /* finish CFC init */
6285 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6287 BNX2X_ERR("CFC LL_INIT failed\n");
6290 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6292 BNX2X_ERR("CFC AC_INIT failed\n");
6295 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6297 BNX2X_ERR("CFC CAM_INIT failed\n");
6300 REG_WR(bp, CFC_REG_DEBUG0, 0);
6302 /* read NIG statistic
6303 to see if this is our first up since powerup */
6304 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6305 val = *bnx2x_sp(bp, wb_data[0]);
6307 /* do internal memory self test */
6308 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6309 BNX2X_ERR("internal mem self test failed\n");
6313 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6314 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6315 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6316 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6318 bp->port.need_hw_lock = 1;
6325 bnx2x_setup_fan_failure_detection(bp);
6327 /* clear PXP2 attentions */
6328 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6330 enable_blocks_attention(bp);
6332 if (!BP_NOMCP(bp)) {
6333 bnx2x_acquire_phy_lock(bp);
6334 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6335 bnx2x_release_phy_lock(bp);
6337 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6342 static int bnx2x_init_port(struct bnx2x *bp)
6344 int port = BP_PORT(bp);
6345 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6349 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6351 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6353 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6354 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6356 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6357 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6358 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6359 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6362 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6364 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6365 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6366 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6368 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6370 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6371 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6372 /* no pause for emulation and FPGA */
6377 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6378 else if (bp->dev->mtu > 4096) {
6379 if (bp->flags & ONE_PORT_FLAG)
6383 /* (24*1024 + val*4)/256 */
6384 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6387 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6388 high = low + 56; /* 14*1024/256 */
6390 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6391 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6394 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6396 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6397 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6401 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6402 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6403 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6404 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6406 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6407 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6409 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6411 /* configure PBF to work without PAUSE mtu 9000 */
6412 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6414 /* update threshold */
6415 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6416 /* update init credit */
6417 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6420 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6422 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6425 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6427 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6428 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6430 if (CHIP_IS_E1(bp)) {
6431 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6432 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6434 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6436 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6437 /* init aeu_mask_attn_func_0/1:
6438 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6439 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6440 * bits 4-7 are used for "per vn group attention" */
6441 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6442 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6444 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6445 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6446 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6447 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6448 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6450 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6452 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6454 if (CHIP_IS_E1H(bp)) {
6455 /* 0x2 disable e1hov, 0x1 enable */
6456 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6457 (IS_E1HMF(bp) ? 0x1 : 0x2));
6460 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6461 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6462 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6466 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6467 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6469 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6472 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6474 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6475 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6477 /* The GPIO should be swapped if the swap register is
6479 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6480 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6482 /* Select function upon port-swap configuration */
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6489 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6490 aeu_gpio_mask = (swap_val && swap_override) ?
6491 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6492 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6494 val = REG_RD(bp, offset);
6495 /* add GPIO3 to group */
6496 val |= aeu_gpio_mask;
6497 REG_WR(bp, offset, val);
6501 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6503 /* add SPIO 5 to group 0 */
6505 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6506 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6507 val = REG_RD(bp, reg_addr);
6508 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6509 REG_WR(bp, reg_addr, val);
6517 bnx2x__link_reset(bp);
6522 #define ILT_PER_FUNC (768/2)
6523 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6524 /* the phys address is shifted right 12 bits and has an added
6525 1=valid bit added to the 53rd bit
6526 then since this is a wide register(TM)
6527 we split it into two 32 bit writes
6529 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6530 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6531 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6532 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6535 #define CNIC_ILT_LINES 127
6536 #define CNIC_CTX_PER_ILT 16
6538 #define CNIC_ILT_LINES 0
6541 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6545 if (CHIP_IS_E1H(bp))
6546 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6548 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6550 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6553 static int bnx2x_init_func(struct bnx2x *bp)
6555 int port = BP_PORT(bp);
6556 int func = BP_FUNC(bp);
6560 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6562 /* set MSI reconfigure capability */
6563 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6564 val = REG_RD(bp, addr);
6565 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6566 REG_WR(bp, addr, val);
6568 i = FUNC_ILT_BASE(func);
6570 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6571 if (CHIP_IS_E1H(bp)) {
6572 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6573 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6575 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6576 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6579 i += 1 + CNIC_ILT_LINES;
6580 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6582 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6584 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6585 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6589 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6591 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6593 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6594 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6598 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6600 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6602 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6603 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6606 /* tell the searcher where the T2 table is */
6607 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6609 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6610 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6612 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6613 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6614 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6616 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6619 if (CHIP_IS_E1H(bp)) {
6620 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6625 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6626 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6627 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6628 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6630 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6631 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6634 /* HC init per function */
6635 if (CHIP_IS_E1H(bp)) {
6636 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6638 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6641 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6643 /* Reset PCIE errors for debug */
6644 REG_WR(bp, 0x2114, 0xffffffff);
6645 REG_WR(bp, 0x2120, 0xffffffff);
6650 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6654 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6655 BP_FUNC(bp), load_code);
6658 mutex_init(&bp->dmae_mutex);
6659 rc = bnx2x_gunzip_init(bp);
6663 switch (load_code) {
6664 case FW_MSG_CODE_DRV_LOAD_COMMON:
6665 rc = bnx2x_init_common(bp);
6670 case FW_MSG_CODE_DRV_LOAD_PORT:
6672 rc = bnx2x_init_port(bp);
6677 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6679 rc = bnx2x_init_func(bp);
6685 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6689 if (!BP_NOMCP(bp)) {
6690 int func = BP_FUNC(bp);
6692 bp->fw_drv_pulse_wr_seq =
6693 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6694 DRV_PULSE_SEQ_MASK);
6695 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6698 /* this needs to be done before gunzip end */
6699 bnx2x_zero_def_sb(bp);
6700 for_each_queue(bp, i)
6701 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6703 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6707 bnx2x_gunzip_end(bp);
6712 static void bnx2x_free_mem(struct bnx2x *bp)
6715 #define BNX2X_PCI_FREE(x, y, size) \
6718 pci_free_consistent(bp->pdev, size, x, y); \
6724 #define BNX2X_FREE(x) \
6736 for_each_queue(bp, i) {
6739 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6740 bnx2x_fp(bp, i, status_blk_mapping),
6741 sizeof(struct host_status_block));
6744 for_each_rx_queue(bp, i) {
6746 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6747 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6749 bnx2x_fp(bp, i, rx_desc_mapping),
6750 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6752 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6753 bnx2x_fp(bp, i, rx_comp_mapping),
6754 sizeof(struct eth_fast_path_rx_cqe) *
6758 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6760 bnx2x_fp(bp, i, rx_sge_mapping),
6761 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6764 for_each_tx_queue(bp, i) {
6766 /* fastpath tx rings: tx_buf tx_desc */
6767 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6768 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6769 bnx2x_fp(bp, i, tx_desc_mapping),
6770 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6772 /* end of fastpath */
6774 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6775 sizeof(struct host_def_status_block));
6777 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6778 sizeof(struct bnx2x_slowpath));
6781 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6782 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6783 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6784 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6785 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6786 sizeof(struct host_status_block));
6788 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6790 #undef BNX2X_PCI_FREE
6794 static int bnx2x_alloc_mem(struct bnx2x *bp)
6797 #define BNX2X_PCI_ALLOC(x, y, size) \
6799 x = pci_alloc_consistent(bp->pdev, size, y); \
6801 goto alloc_mem_err; \
6802 memset(x, 0, size); \
6805 #define BNX2X_ALLOC(x, size) \
6807 x = vmalloc(size); \
6809 goto alloc_mem_err; \
6810 memset(x, 0, size); \
6817 for_each_queue(bp, i) {
6818 bnx2x_fp(bp, i, bp) = bp;
6821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6822 &bnx2x_fp(bp, i, status_blk_mapping),
6823 sizeof(struct host_status_block));
6826 for_each_rx_queue(bp, i) {
6828 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6829 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6830 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6832 &bnx2x_fp(bp, i, rx_desc_mapping),
6833 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6835 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6836 &bnx2x_fp(bp, i, rx_comp_mapping),
6837 sizeof(struct eth_fast_path_rx_cqe) *
6841 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6842 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6844 &bnx2x_fp(bp, i, rx_sge_mapping),
6845 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6848 for_each_tx_queue(bp, i) {
6850 /* fastpath tx rings: tx_buf tx_desc */
6851 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6852 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6853 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6854 &bnx2x_fp(bp, i, tx_desc_mapping),
6855 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6857 /* end of fastpath */
6859 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6860 sizeof(struct host_def_status_block));
6862 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6863 sizeof(struct bnx2x_slowpath));
6866 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6868 /* allocate searcher T2 table
6869 we allocate 1/4 of alloc num for T2
6870 (which is not entered into the ILT) */
6871 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6873 /* Initialize T2 (for 1024 connections) */
6874 for (i = 0; i < 16*1024; i += 64)
6875 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6877 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6880 /* QM queues (128*MAX_CONN) */
6881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6883 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6884 sizeof(struct host_status_block));
6887 /* Slow path ring */
6888 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6896 #undef BNX2X_PCI_ALLOC
6900 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6904 for_each_tx_queue(bp, i) {
6905 struct bnx2x_fastpath *fp = &bp->fp[i];
6907 u16 bd_cons = fp->tx_bd_cons;
6908 u16 sw_prod = fp->tx_pkt_prod;
6909 u16 sw_cons = fp->tx_pkt_cons;
6911 while (sw_cons != sw_prod) {
6912 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6918 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6922 for_each_rx_queue(bp, j) {
6923 struct bnx2x_fastpath *fp = &bp->fp[j];
6925 for (i = 0; i < NUM_RX_BD; i++) {
6926 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6927 struct sk_buff *skb = rx_buf->skb;
6932 pci_unmap_single(bp->pdev,
6933 pci_unmap_addr(rx_buf, mapping),
6934 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6939 if (!fp->disable_tpa)
6940 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6941 ETH_MAX_AGGREGATION_QUEUES_E1 :
6942 ETH_MAX_AGGREGATION_QUEUES_E1H);
6946 static void bnx2x_free_skbs(struct bnx2x *bp)
6948 bnx2x_free_tx_skbs(bp);
6949 bnx2x_free_rx_skbs(bp);
6952 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6956 free_irq(bp->msix_table[0].vector, bp->dev);
6957 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6958 bp->msix_table[0].vector);
6963 for_each_queue(bp, i) {
6964 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6965 "state %x\n", i, bp->msix_table[i + offset].vector,
6966 bnx2x_fp(bp, i, state));
6968 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6972 static void bnx2x_free_irq(struct bnx2x *bp)
6974 if (bp->flags & USING_MSIX_FLAG) {
6975 bnx2x_free_msix_irqs(bp);
6976 pci_disable_msix(bp->pdev);
6977 bp->flags &= ~USING_MSIX_FLAG;
6979 } else if (bp->flags & USING_MSI_FLAG) {
6980 free_irq(bp->pdev->irq, bp->dev);
6981 pci_disable_msi(bp->pdev);
6982 bp->flags &= ~USING_MSI_FLAG;
6985 free_irq(bp->pdev->irq, bp->dev);
6988 static int bnx2x_enable_msix(struct bnx2x *bp)
6990 int i, rc, offset = 1;
6993 bp->msix_table[0].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6997 igu_vec = BP_L_ID(bp) + offset;
6998 bp->msix_table[1].entry = igu_vec;
6999 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7002 for_each_queue(bp, i) {
7003 igu_vec = BP_L_ID(bp) + offset + i;
7004 bp->msix_table[i + offset].entry = igu_vec;
7005 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7006 "(fastpath #%u)\n", i + offset, igu_vec, i);
7009 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7010 BNX2X_NUM_QUEUES(bp) + offset);
7012 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7016 bp->flags |= USING_MSIX_FLAG;
7021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7023 int i, rc, offset = 1;
7025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7026 bp->dev->name, bp->dev);
7028 BNX2X_ERR("request sp irq failed\n");
7035 for_each_queue(bp, i) {
7036 struct bnx2x_fastpath *fp = &bp->fp[i];
7038 if (i < bp->num_rx_queues)
7039 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7041 sprintf(fp->name, "%s-tx-%d",
7042 bp->dev->name, i - bp->num_rx_queues);
7044 rc = request_irq(bp->msix_table[i + offset].vector,
7045 bnx2x_msix_fp_int, 0, fp->name, fp);
7047 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7048 bnx2x_free_msix_irqs(bp);
7052 fp->state = BNX2X_FP_STATE_IRQ;
7055 i = BNX2X_NUM_QUEUES(bp);
7056 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7058 bp->dev->name, bp->msix_table[0].vector,
7059 0, bp->msix_table[offset].vector,
7060 i - 1, bp->msix_table[offset + i - 1].vector);
7065 static int bnx2x_enable_msi(struct bnx2x *bp)
7069 rc = pci_enable_msi(bp->pdev);
7071 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7074 bp->flags |= USING_MSI_FLAG;
7079 static int bnx2x_req_irq(struct bnx2x *bp)
7081 unsigned long flags;
7084 if (bp->flags & USING_MSI_FLAG)
7087 flags = IRQF_SHARED;
7089 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7090 bp->dev->name, bp->dev);
7092 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7097 static void bnx2x_napi_enable(struct bnx2x *bp)
7101 for_each_rx_queue(bp, i)
7102 napi_enable(&bnx2x_fp(bp, i, napi));
7105 static void bnx2x_napi_disable(struct bnx2x *bp)
7109 for_each_rx_queue(bp, i)
7110 napi_disable(&bnx2x_fp(bp, i, napi));
7113 static void bnx2x_netif_start(struct bnx2x *bp)
7117 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7118 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7121 if (netif_running(bp->dev)) {
7122 bnx2x_napi_enable(bp);
7123 bnx2x_int_enable(bp);
7124 if (bp->state == BNX2X_STATE_OPEN)
7125 netif_tx_wake_all_queues(bp->dev);
7130 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7132 bnx2x_int_disable_sync(bp, disable_hw);
7133 bnx2x_napi_disable(bp);
7134 netif_tx_disable(bp->dev);
7135 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7139 * Init service functions
7143 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7145 * @param bp driver descriptor
7146 * @param set set or clear an entry (1 or 0)
7147 * @param mac pointer to a buffer containing a MAC
7148 * @param cl_bit_vec bit vector of clients to register a MAC for
7149 * @param cam_offset offset in a CAM to use
7150 * @param with_bcast set broadcast MAC as well
7152 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7153 u32 cl_bit_vec, u8 cam_offset,
7156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7157 int port = BP_PORT(bp);
7160 * unicasts 0-31:port0 32-63:port1
7161 * multicast 64-127:port0 128-191:port1
7163 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7164 config->hdr.offset = cam_offset;
7165 config->hdr.client_id = 0xff;
7166 config->hdr.reserved1 = 0;
7169 config->config_table[0].cam_entry.msb_mac_addr =
7170 swab16(*(u16 *)&mac[0]);
7171 config->config_table[0].cam_entry.middle_mac_addr =
7172 swab16(*(u16 *)&mac[2]);
7173 config->config_table[0].cam_entry.lsb_mac_addr =
7174 swab16(*(u16 *)&mac[4]);
7175 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7177 config->config_table[0].target_table_entry.flags = 0;
7179 CAM_INVALIDATE(config->config_table[0]);
7180 config->config_table[0].target_table_entry.clients_bit_vector =
7181 cpu_to_le32(cl_bit_vec);
7182 config->config_table[0].target_table_entry.vlan_id = 0;
7184 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7185 (set ? "setting" : "clearing"),
7186 config->config_table[0].cam_entry.msb_mac_addr,
7187 config->config_table[0].cam_entry.middle_mac_addr,
7188 config->config_table[0].cam_entry.lsb_mac_addr);
7192 config->config_table[1].cam_entry.msb_mac_addr =
7193 cpu_to_le16(0xffff);
7194 config->config_table[1].cam_entry.middle_mac_addr =
7195 cpu_to_le16(0xffff);
7196 config->config_table[1].cam_entry.lsb_mac_addr =
7197 cpu_to_le16(0xffff);
7198 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7200 config->config_table[1].target_table_entry.flags =
7201 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7203 CAM_INVALIDATE(config->config_table[1]);
7204 config->config_table[1].target_table_entry.clients_bit_vector =
7205 cpu_to_le32(cl_bit_vec);
7206 config->config_table[1].target_table_entry.vlan_id = 0;
7209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7210 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7211 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7215 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7217 * @param bp driver descriptor
7218 * @param set set or clear an entry (1 or 0)
7219 * @param mac pointer to a buffer containing a MAC
7220 * @param cl_bit_vec bit vector of clients to register a MAC for
7221 * @param cam_offset offset in a CAM to use
7223 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7224 u32 cl_bit_vec, u8 cam_offset)
7226 struct mac_configuration_cmd_e1h *config =
7227 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7229 config->hdr.length = 1;
7230 config->hdr.offset = cam_offset;
7231 config->hdr.client_id = 0xff;
7232 config->hdr.reserved1 = 0;
7235 config->config_table[0].msb_mac_addr =
7236 swab16(*(u16 *)&mac[0]);
7237 config->config_table[0].middle_mac_addr =
7238 swab16(*(u16 *)&mac[2]);
7239 config->config_table[0].lsb_mac_addr =
7240 swab16(*(u16 *)&mac[4]);
7241 config->config_table[0].clients_bit_vector =
7242 cpu_to_le32(cl_bit_vec);
7243 config->config_table[0].vlan_id = 0;
7244 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7246 config->config_table[0].flags = BP_PORT(bp);
7248 config->config_table[0].flags =
7249 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7251 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7252 (set ? "setting" : "clearing"),
7253 config->config_table[0].msb_mac_addr,
7254 config->config_table[0].middle_mac_addr,
7255 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7257 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7258 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7259 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7262 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7263 int *state_p, int poll)
7265 /* can take a while if any port is running */
7268 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7269 poll ? "polling" : "waiting", state, idx);
7274 bnx2x_rx_int(bp->fp, 10);
7275 /* if index is different from 0
7276 * the reply for some commands will
7277 * be on the non default queue
7280 bnx2x_rx_int(&bp->fp[idx], 10);
7283 mb(); /* state is changed by bnx2x_sp_event() */
7284 if (*state_p == state) {
7285 #ifdef BNX2X_STOP_ON_ERROR
7286 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7298 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7299 poll ? "polling" : "waiting", state, idx);
7300 #ifdef BNX2X_STOP_ON_ERROR
7307 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7309 bp->set_mac_pending++;
7312 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7313 (1 << bp->fp->cl_id), BP_FUNC(bp));
7315 /* Wait for a completion */
7316 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7319 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7321 bp->set_mac_pending++;
7324 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7325 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7328 /* Wait for a completion */
7329 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7334 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7335 * MAC(s). This function will wait until the ramdord completion
7338 * @param bp driver handle
7339 * @param set set or clear the CAM entry
7341 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7343 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7345 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7347 bp->set_mac_pending++;
7350 /* Send a SET_MAC ramrod */
7352 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7353 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7356 /* CAM allocation for E1H
7357 * unicasts: by func number
7358 * multicast: 20+FUNC*20, 20 each
7360 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7363 /* Wait for a completion when setting */
7364 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7370 static int bnx2x_setup_leading(struct bnx2x *bp)
7374 /* reset IGU state */
7375 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7378 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7380 /* Wait for completion */
7381 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7386 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7388 struct bnx2x_fastpath *fp = &bp->fp[index];
7390 /* reset IGU state */
7391 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7394 fp->state = BNX2X_FP_STATE_OPENING;
7395 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7398 /* Wait for completion */
7399 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7403 static int bnx2x_poll(struct napi_struct *napi, int budget);
7405 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7406 int *num_tx_queues_out)
7408 int _num_rx_queues = 0, _num_tx_queues = 0;
7410 switch (bp->multi_mode) {
7411 case ETH_RSS_MODE_DISABLED:
7416 case ETH_RSS_MODE_REGULAR:
7418 _num_rx_queues = min_t(u32, num_rx_queues,
7419 BNX2X_MAX_QUEUES(bp));
7421 _num_rx_queues = min_t(u32, num_online_cpus(),
7422 BNX2X_MAX_QUEUES(bp));
7425 _num_tx_queues = min_t(u32, num_tx_queues,
7426 BNX2X_MAX_QUEUES(bp));
7428 _num_tx_queues = min_t(u32, num_online_cpus(),
7429 BNX2X_MAX_QUEUES(bp));
7431 /* There must be not more Tx queues than Rx queues */
7432 if (_num_tx_queues > _num_rx_queues) {
7433 BNX2X_ERR("number of tx queues (%d) > "
7434 "number of rx queues (%d)"
7435 " defaulting to %d\n",
7436 _num_tx_queues, _num_rx_queues,
7438 _num_tx_queues = _num_rx_queues;
7449 *num_rx_queues_out = _num_rx_queues;
7450 *num_tx_queues_out = _num_tx_queues;
7453 static int bnx2x_set_int_mode(struct bnx2x *bp)
7460 bp->num_rx_queues = 1;
7461 bp->num_tx_queues = 1;
7462 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7467 /* Set interrupt mode according to bp->multi_mode value */
7468 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7469 &bp->num_tx_queues);
7471 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7472 bp->num_rx_queues, bp->num_tx_queues);
7474 /* if we can't use MSI-X we only need one fp,
7475 * so try to enable MSI-X with the requested number of fp's
7476 * and fallback to MSI or legacy INTx with one fp
7478 rc = bnx2x_enable_msix(bp);
7480 /* failed to enable MSI-X */
7482 BNX2X_ERR("Multi requested but failed to "
7483 "enable MSI-X (rx %d tx %d), "
7484 "set number of queues to 1\n",
7485 bp->num_rx_queues, bp->num_tx_queues);
7486 bp->num_rx_queues = 1;
7487 bp->num_tx_queues = 1;
7491 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7496 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7497 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7500 /* must be called with rtnl_lock */
7501 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7506 #ifdef BNX2X_STOP_ON_ERROR
7507 if (unlikely(bp->panic))
7511 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7513 rc = bnx2x_set_int_mode(bp);
7515 if (bnx2x_alloc_mem(bp))
7518 for_each_rx_queue(bp, i)
7519 bnx2x_fp(bp, i, disable_tpa) =
7520 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7522 for_each_rx_queue(bp, i)
7523 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7526 bnx2x_napi_enable(bp);
7528 if (bp->flags & USING_MSIX_FLAG) {
7529 rc = bnx2x_req_msix_irqs(bp);
7531 pci_disable_msix(bp->pdev);
7535 /* Fall to INTx if failed to enable MSI-X due to lack of
7536 memory (in bnx2x_set_int_mode()) */
7537 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7538 bnx2x_enable_msi(bp);
7540 rc = bnx2x_req_irq(bp);
7542 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7543 if (bp->flags & USING_MSI_FLAG)
7544 pci_disable_msi(bp->pdev);
7547 if (bp->flags & USING_MSI_FLAG) {
7548 bp->dev->irq = bp->pdev->irq;
7549 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7550 bp->dev->name, bp->pdev->irq);
7554 /* Send LOAD_REQUEST command to MCP
7555 Returns the type of LOAD command:
7556 if it is the first port to be initialized
7557 common blocks should be initialized, otherwise - not
7559 if (!BP_NOMCP(bp)) {
7560 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7562 BNX2X_ERR("MCP response failure, aborting\n");
7566 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7567 rc = -EBUSY; /* other port in diagnostic mode */
7572 int port = BP_PORT(bp);
7574 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7575 load_count[0], load_count[1], load_count[2]);
7577 load_count[1 + port]++;
7578 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7579 load_count[0], load_count[1], load_count[2]);
7580 if (load_count[0] == 1)
7581 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7582 else if (load_count[1 + port] == 1)
7583 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7585 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7588 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7589 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7593 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7596 rc = bnx2x_init_hw(bp, load_code);
7598 BNX2X_ERR("HW init failed, aborting\n");
7602 /* Setup NIC internals and enable interrupts */
7603 bnx2x_nic_init(bp, load_code);
7605 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7606 (bp->common.shmem2_base))
7607 SHMEM2_WR(bp, dcc_support,
7608 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7609 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7611 /* Send LOAD_DONE command to MCP */
7612 if (!BP_NOMCP(bp)) {
7613 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7615 BNX2X_ERR("MCP response failure, aborting\n");
7621 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7623 rc = bnx2x_setup_leading(bp);
7625 BNX2X_ERR("Setup leading failed!\n");
7626 #ifndef BNX2X_STOP_ON_ERROR
7634 if (CHIP_IS_E1H(bp))
7635 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7636 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7637 bp->flags |= MF_FUNC_DIS;
7640 if (bp->state == BNX2X_STATE_OPEN) {
7642 /* Enable Timer scan */
7643 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7645 for_each_nondefault_queue(bp, i) {
7646 rc = bnx2x_setup_multi(bp, i);
7656 bnx2x_set_eth_mac_addr_e1(bp, 1);
7658 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7660 /* Set iSCSI L2 MAC */
7661 mutex_lock(&bp->cnic_mutex);
7662 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7663 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7664 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7666 mutex_unlock(&bp->cnic_mutex);
7671 bnx2x_initial_phy_init(bp, load_mode);
7673 /* Start fast path */
7674 switch (load_mode) {
7676 if (bp->state == BNX2X_STATE_OPEN) {
7677 /* Tx queue should be only reenabled */
7678 netif_tx_wake_all_queues(bp->dev);
7680 /* Initialize the receive filter. */
7681 bnx2x_set_rx_mode(bp->dev);
7685 netif_tx_start_all_queues(bp->dev);
7686 if (bp->state != BNX2X_STATE_OPEN)
7687 netif_tx_disable(bp->dev);
7688 /* Initialize the receive filter. */
7689 bnx2x_set_rx_mode(bp->dev);
7693 /* Initialize the receive filter. */
7694 bnx2x_set_rx_mode(bp->dev);
7695 bp->state = BNX2X_STATE_DIAG;
7703 bnx2x__link_status_update(bp);
7705 /* start the timer */
7706 mod_timer(&bp->timer, jiffies + bp->current_interval);
7709 bnx2x_setup_cnic_irq_info(bp);
7710 if (bp->state == BNX2X_STATE_OPEN)
7711 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7718 /* Disable Timer scan */
7719 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7722 bnx2x_int_disable_sync(bp, 1);
7723 if (!BP_NOMCP(bp)) {
7724 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7725 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7728 /* Free SKBs, SGEs, TPA pool and driver internals */
7729 bnx2x_free_skbs(bp);
7730 for_each_rx_queue(bp, i)
7731 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7736 bnx2x_napi_disable(bp);
7737 for_each_rx_queue(bp, i)
7738 netif_napi_del(&bnx2x_fp(bp, i, napi));
7744 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7746 struct bnx2x_fastpath *fp = &bp->fp[index];
7749 /* halt the connection */
7750 fp->state = BNX2X_FP_STATE_HALTING;
7751 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7753 /* Wait for completion */
7754 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7756 if (rc) /* timeout */
7759 /* delete cfc entry */
7760 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7762 /* Wait for completion */
7763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7768 static int bnx2x_stop_leading(struct bnx2x *bp)
7770 __le16 dsb_sp_prod_idx;
7771 /* if the other port is handling traffic,
7772 this can take a lot of time */
7778 /* Send HALT ramrod */
7779 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7782 /* Wait for completion */
7783 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7784 &(bp->fp[0].state), 1);
7785 if (rc) /* timeout */
7788 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7790 /* Send PORT_DELETE ramrod */
7791 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7793 /* Wait for completion to arrive on default status block
7794 we are going to reset the chip anyway
7795 so there is not much to do if this times out
7797 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7799 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7800 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7801 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7802 #ifdef BNX2X_STOP_ON_ERROR
7810 rmb(); /* Refresh the dsb_sp_prod */
7812 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7813 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7818 static void bnx2x_reset_func(struct bnx2x *bp)
7820 int port = BP_PORT(bp);
7821 int func = BP_FUNC(bp);
7825 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7826 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7829 /* Disable Timer scan */
7830 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7832 * Wait for at least 10ms and up to 2 second for the timers scan to
7835 for (i = 0; i < 200; i++) {
7837 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7842 base = FUNC_ILT_BASE(func);
7843 for (i = base; i < base + ILT_PER_FUNC; i++)
7844 bnx2x_ilt_wr(bp, i, 0);
7847 static void bnx2x_reset_port(struct bnx2x *bp)
7849 int port = BP_PORT(bp);
7852 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7854 /* Do not rcv packets to BRB */
7855 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7856 /* Do not direct rcv packets that are not for MCP to the BRB */
7857 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7858 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7861 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7864 /* Check for BRB port occupancy */
7865 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7867 DP(NETIF_MSG_IFDOWN,
7868 "BRB1 is not empty %d blocks are occupied\n", val);
7870 /* TODO: Close Doorbell port? */
7873 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7875 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7876 BP_FUNC(bp), reset_code);
7878 switch (reset_code) {
7879 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7880 bnx2x_reset_port(bp);
7881 bnx2x_reset_func(bp);
7882 bnx2x_reset_common(bp);
7885 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7886 bnx2x_reset_port(bp);
7887 bnx2x_reset_func(bp);
7890 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7891 bnx2x_reset_func(bp);
7895 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7900 /* must be called with rtnl_lock */
7901 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7903 int port = BP_PORT(bp);
7908 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7910 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7912 /* Set "drop all" */
7913 bp->rx_mode = BNX2X_RX_MODE_NONE;
7914 bnx2x_set_storm_rx_mode(bp);
7916 /* Disable HW interrupts, NAPI and Tx */
7917 bnx2x_netif_stop(bp, 1);
7919 del_timer_sync(&bp->timer);
7920 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7921 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7922 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7927 /* Wait until tx fastpath tasks complete */
7928 for_each_tx_queue(bp, i) {
7929 struct bnx2x_fastpath *fp = &bp->fp[i];
7932 while (bnx2x_has_tx_work_unload(fp)) {
7936 BNX2X_ERR("timeout waiting for queue[%d]\n",
7938 #ifdef BNX2X_STOP_ON_ERROR
7949 /* Give HW time to discard old tx messages */
7952 if (CHIP_IS_E1(bp)) {
7953 struct mac_configuration_cmd *config =
7954 bnx2x_sp(bp, mcast_config);
7956 bnx2x_set_eth_mac_addr_e1(bp, 0);
7958 for (i = 0; i < config->hdr.length; i++)
7959 CAM_INVALIDATE(config->config_table[i]);
7961 config->hdr.length = i;
7962 if (CHIP_REV_IS_SLOW(bp))
7963 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7965 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7966 config->hdr.client_id = bp->fp->cl_id;
7967 config->hdr.reserved1 = 0;
7969 bp->set_mac_pending++;
7972 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7973 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7974 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7977 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7979 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7981 for (i = 0; i < MC_HASH_SIZE; i++)
7982 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7984 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7987 /* Clear iSCSI L2 MAC */
7988 mutex_lock(&bp->cnic_mutex);
7989 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7990 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7991 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7993 mutex_unlock(&bp->cnic_mutex);
7996 if (unload_mode == UNLOAD_NORMAL)
7997 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7999 else if (bp->flags & NO_WOL_FLAG)
8000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8003 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8004 u8 *mac_addr = bp->dev->dev_addr;
8006 /* The mac address is written to entries 1-4 to
8007 preserve entry 0 which is used by the PMF */
8008 u8 entry = (BP_E1HVN(bp) + 1)*8;
8010 val = (mac_addr[0] << 8) | mac_addr[1];
8011 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8013 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8014 (mac_addr[4] << 8) | mac_addr[5];
8015 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8017 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8022 /* Close multi and leading connections
8023 Completions for ramrods are collected in a synchronous way */
8024 for_each_nondefault_queue(bp, i)
8025 if (bnx2x_stop_multi(bp, i))
8028 rc = bnx2x_stop_leading(bp);
8030 BNX2X_ERR("Stop leading failed!\n");
8031 #ifdef BNX2X_STOP_ON_ERROR
8040 reset_code = bnx2x_fw_command(bp, reset_code);
8042 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8043 load_count[0], load_count[1], load_count[2]);
8045 load_count[1 + port]--;
8046 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8047 load_count[0], load_count[1], load_count[2]);
8048 if (load_count[0] == 0)
8049 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8050 else if (load_count[1 + port] == 0)
8051 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8053 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8056 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8057 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8058 bnx2x__link_reset(bp);
8060 /* Reset the chip */
8061 bnx2x_reset_chip(bp, reset_code);
8063 /* Report UNLOAD_DONE to MCP */
8065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8069 /* Free SKBs, SGEs, TPA pool and driver internals */
8070 bnx2x_free_skbs(bp);
8071 for_each_rx_queue(bp, i)
8072 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8073 for_each_rx_queue(bp, i)
8074 netif_napi_del(&bnx2x_fp(bp, i, napi));
8077 bp->state = BNX2X_STATE_CLOSED;
8079 netif_carrier_off(bp->dev);
8084 static void bnx2x_reset_task(struct work_struct *work)
8086 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8088 #ifdef BNX2X_STOP_ON_ERROR
8089 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8090 " so reset not done to allow debug dump,\n"
8091 " you will need to reboot when done\n");
8097 if (!netif_running(bp->dev))
8098 goto reset_task_exit;
8100 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8101 bnx2x_nic_load(bp, LOAD_NORMAL);
8107 /* end of nic load/unload */
8112 * Init service functions
8115 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8118 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8119 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8120 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8121 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8122 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8123 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8124 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8125 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8127 BNX2X_ERR("Unsupported function index: %d\n", func);
8132 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8134 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8136 /* Flush all outstanding writes */
8139 /* Pretend to be function 0 */
8141 /* Flush the GRC transaction (in the chip) */
8142 new_val = REG_RD(bp, reg);
8144 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8149 /* From now we are in the "like-E1" mode */
8150 bnx2x_int_disable(bp);
8152 /* Flush all outstanding writes */
8155 /* Restore the original funtion settings */
8156 REG_WR(bp, reg, orig_func);
8157 new_val = REG_RD(bp, reg);
8158 if (new_val != orig_func) {
8159 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8160 orig_func, new_val);
8165 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8167 if (CHIP_IS_E1H(bp))
8168 bnx2x_undi_int_disable_e1h(bp, func);
8170 bnx2x_int_disable(bp);
8173 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8177 /* Check if there is any driver already loaded */
8178 val = REG_RD(bp, MISC_REG_UNPREPARED);
8180 /* Check if it is the UNDI driver
8181 * UNDI driver initializes CID offset for normal bell to 0x7
8183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8184 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8186 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8188 int func = BP_FUNC(bp);
8192 /* clear the UNDI indication */
8193 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8195 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8197 /* try unload UNDI on port 0 */
8200 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201 DRV_MSG_SEQ_NUMBER_MASK);
8202 reset_code = bnx2x_fw_command(bp, reset_code);
8204 /* if UNDI is loaded on the other port */
8205 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8207 /* send "DONE" for previous unload */
8208 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8210 /* unload UNDI on port 1 */
8213 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8214 DRV_MSG_SEQ_NUMBER_MASK);
8215 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8217 bnx2x_fw_command(bp, reset_code);
8220 /* now it's safe to release the lock */
8221 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8223 bnx2x_undi_int_disable(bp, func);
8225 /* close input traffic and wait for it */
8226 /* Do not rcv packets to BRB */
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8229 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8230 /* Do not direct rcv packets that are not for MCP to
8233 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8234 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8237 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8238 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8241 /* save NIG port swap info */
8242 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8243 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8246 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8251 /* take the NIG out of reset and restore swap values */
8253 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8254 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8255 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8256 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8258 /* send unload done to the MCP */
8259 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8261 /* restore our func and fw_seq */
8264 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8265 DRV_MSG_SEQ_NUMBER_MASK);
8268 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8272 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8274 u32 val, val2, val3, val4, id;
8277 /* Get the chip revision id and number. */
8278 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8279 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8280 id = ((val & 0xffff) << 16);
8281 val = REG_RD(bp, MISC_REG_CHIP_REV);
8282 id |= ((val & 0xf) << 12);
8283 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8284 id |= ((val & 0xff) << 4);
8285 val = REG_RD(bp, MISC_REG_BOND_ID);
8287 bp->common.chip_id = id;
8288 bp->link_params.chip_id = bp->common.chip_id;
8289 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8291 val = (REG_RD(bp, 0x2874) & 0x55);
8292 if ((bp->common.chip_id & 0x1) ||
8293 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8294 bp->flags |= ONE_PORT_FLAG;
8295 BNX2X_DEV_INFO("single port device\n");
8298 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8299 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8300 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8301 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8302 bp->common.flash_size, bp->common.flash_size);
8304 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8305 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8306 bp->link_params.shmem_base = bp->common.shmem_base;
8307 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8308 bp->common.shmem_base, bp->common.shmem2_base);
8310 if (!bp->common.shmem_base ||
8311 (bp->common.shmem_base < 0xA0000) ||
8312 (bp->common.shmem_base >= 0xC0000)) {
8313 BNX2X_DEV_INFO("MCP not active\n");
8314 bp->flags |= NO_MCP_FLAG;
8318 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8319 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8320 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8321 BNX2X_ERR("BAD MCP validity signature\n");
8323 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8324 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8326 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8327 SHARED_HW_CFG_LED_MODE_MASK) >>
8328 SHARED_HW_CFG_LED_MODE_SHIFT);
8330 bp->link_params.feature_config_flags = 0;
8331 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8332 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8333 bp->link_params.feature_config_flags |=
8334 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8336 bp->link_params.feature_config_flags &=
8337 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8339 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8340 bp->common.bc_ver = val;
8341 BNX2X_DEV_INFO("bc_ver %X\n", val);
8342 if (val < BNX2X_BC_VER) {
8343 /* for now only warn
8344 * later we might need to enforce this */
8345 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8346 " please upgrade BC\n", BNX2X_BC_VER, val);
8348 bp->link_params.feature_config_flags |=
8349 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8350 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8352 if (BP_E1HVN(bp) == 0) {
8353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8354 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8356 /* no WOL capability for E1HVN != 0 */
8357 bp->flags |= NO_WOL_FLAG;
8359 BNX2X_DEV_INFO("%sWoL capable\n",
8360 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8362 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8363 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8364 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8365 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8367 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8368 val, val2, val3, val4);
8371 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8374 int port = BP_PORT(bp);
8377 switch (switch_cfg) {
8379 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8382 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8383 switch (ext_phy_type) {
8384 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8385 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8388 bp->port.supported |= (SUPPORTED_10baseT_Half |
8389 SUPPORTED_10baseT_Full |
8390 SUPPORTED_100baseT_Half |
8391 SUPPORTED_100baseT_Full |
8392 SUPPORTED_1000baseT_Full |
8393 SUPPORTED_2500baseX_Full |
8398 SUPPORTED_Asym_Pause);
8401 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8402 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8405 bp->port.supported |= (SUPPORTED_10baseT_Half |
8406 SUPPORTED_10baseT_Full |
8407 SUPPORTED_100baseT_Half |
8408 SUPPORTED_100baseT_Full |
8409 SUPPORTED_1000baseT_Full |
8414 SUPPORTED_Asym_Pause);
8418 BNX2X_ERR("NVRAM config error. "
8419 "BAD SerDes ext_phy_config 0x%x\n",
8420 bp->link_params.ext_phy_config);
8424 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8426 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8429 case SWITCH_CFG_10G:
8430 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8433 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8434 switch (ext_phy_type) {
8435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8436 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8439 bp->port.supported |= (SUPPORTED_10baseT_Half |
8440 SUPPORTED_10baseT_Full |
8441 SUPPORTED_100baseT_Half |
8442 SUPPORTED_100baseT_Full |
8443 SUPPORTED_1000baseT_Full |
8444 SUPPORTED_2500baseX_Full |
8445 SUPPORTED_10000baseT_Full |
8450 SUPPORTED_Asym_Pause);
8453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8454 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8457 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8458 SUPPORTED_1000baseT_Full |
8462 SUPPORTED_Asym_Pause);
8465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8466 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8469 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8470 SUPPORTED_2500baseX_Full |
8471 SUPPORTED_1000baseT_Full |
8475 SUPPORTED_Asym_Pause);
8478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8482 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8485 SUPPORTED_Asym_Pause);
8488 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8489 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8492 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8493 SUPPORTED_1000baseT_Full |
8496 SUPPORTED_Asym_Pause);
8499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8500 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8503 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8504 SUPPORTED_1000baseT_Full |
8508 SUPPORTED_Asym_Pause);
8511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8512 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8515 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8516 SUPPORTED_1000baseT_Full |
8520 SUPPORTED_Asym_Pause);
8523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8524 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8527 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8531 SUPPORTED_Asym_Pause);
8534 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8535 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8538 bp->port.supported |= (SUPPORTED_10baseT_Half |
8539 SUPPORTED_10baseT_Full |
8540 SUPPORTED_100baseT_Half |
8541 SUPPORTED_100baseT_Full |
8542 SUPPORTED_1000baseT_Full |
8543 SUPPORTED_10000baseT_Full |
8547 SUPPORTED_Asym_Pause);
8550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8551 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8552 bp->link_params.ext_phy_config);
8556 BNX2X_ERR("NVRAM config error. "
8557 "BAD XGXS ext_phy_config 0x%x\n",
8558 bp->link_params.ext_phy_config);
8562 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8564 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8569 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8570 bp->port.link_config);
8573 bp->link_params.phy_addr = bp->port.phy_addr;
8575 /* mask what we support according to speed_cap_mask */
8576 if (!(bp->link_params.speed_cap_mask &
8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8578 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8580 if (!(bp->link_params.speed_cap_mask &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8582 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8584 if (!(bp->link_params.speed_cap_mask &
8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8586 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8588 if (!(bp->link_params.speed_cap_mask &
8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8590 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8594 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8595 SUPPORTED_1000baseT_Full);
8597 if (!(bp->link_params.speed_cap_mask &
8598 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8599 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8601 if (!(bp->link_params.speed_cap_mask &
8602 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8603 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8605 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8608 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8610 bp->link_params.req_duplex = DUPLEX_FULL;
8612 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8613 case PORT_FEATURE_LINK_SPEED_AUTO:
8614 if (bp->port.supported & SUPPORTED_Autoneg) {
8615 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8616 bp->port.advertising = bp->port.supported;
8619 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8621 if ((ext_phy_type ==
8622 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8625 /* force 10G, no AN */
8626 bp->link_params.req_line_speed = SPEED_10000;
8627 bp->port.advertising =
8628 (ADVERTISED_10000baseT_Full |
8632 BNX2X_ERR("NVRAM config error. "
8633 "Invalid link_config 0x%x"
8634 " Autoneg not supported\n",
8635 bp->port.link_config);
8640 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8641 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8642 bp->link_params.req_line_speed = SPEED_10;
8643 bp->port.advertising = (ADVERTISED_10baseT_Full |
8646 BNX2X_ERR("NVRAM config error. "
8647 "Invalid link_config 0x%x"
8648 " speed_cap_mask 0x%x\n",
8649 bp->port.link_config,
8650 bp->link_params.speed_cap_mask);
8655 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8656 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8657 bp->link_params.req_line_speed = SPEED_10;
8658 bp->link_params.req_duplex = DUPLEX_HALF;
8659 bp->port.advertising = (ADVERTISED_10baseT_Half |
8662 BNX2X_ERR("NVRAM config error. "
8663 "Invalid link_config 0x%x"
8664 " speed_cap_mask 0x%x\n",
8665 bp->port.link_config,
8666 bp->link_params.speed_cap_mask);
8671 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8672 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8673 bp->link_params.req_line_speed = SPEED_100;
8674 bp->port.advertising = (ADVERTISED_100baseT_Full |
8677 BNX2X_ERR("NVRAM config error. "
8678 "Invalid link_config 0x%x"
8679 " speed_cap_mask 0x%x\n",
8680 bp->port.link_config,
8681 bp->link_params.speed_cap_mask);
8686 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8687 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8688 bp->link_params.req_line_speed = SPEED_100;
8689 bp->link_params.req_duplex = DUPLEX_HALF;
8690 bp->port.advertising = (ADVERTISED_100baseT_Half |
8693 BNX2X_ERR("NVRAM config error. "
8694 "Invalid link_config 0x%x"
8695 " speed_cap_mask 0x%x\n",
8696 bp->port.link_config,
8697 bp->link_params.speed_cap_mask);
8702 case PORT_FEATURE_LINK_SPEED_1G:
8703 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8704 bp->link_params.req_line_speed = SPEED_1000;
8705 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8708 BNX2X_ERR("NVRAM config error. "
8709 "Invalid link_config 0x%x"
8710 " speed_cap_mask 0x%x\n",
8711 bp->port.link_config,
8712 bp->link_params.speed_cap_mask);
8717 case PORT_FEATURE_LINK_SPEED_2_5G:
8718 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8719 bp->link_params.req_line_speed = SPEED_2500;
8720 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8723 BNX2X_ERR("NVRAM config error. "
8724 "Invalid link_config 0x%x"
8725 " speed_cap_mask 0x%x\n",
8726 bp->port.link_config,
8727 bp->link_params.speed_cap_mask);
8732 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8733 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8734 case PORT_FEATURE_LINK_SPEED_10G_KR:
8735 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8736 bp->link_params.req_line_speed = SPEED_10000;
8737 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8740 BNX2X_ERR("NVRAM config error. "
8741 "Invalid link_config 0x%x"
8742 " speed_cap_mask 0x%x\n",
8743 bp->port.link_config,
8744 bp->link_params.speed_cap_mask);
8750 BNX2X_ERR("NVRAM config error. "
8751 "BAD link speed link_config 0x%x\n",
8752 bp->port.link_config);
8753 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8754 bp->port.advertising = bp->port.supported;
8758 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8759 PORT_FEATURE_FLOW_CONTROL_MASK);
8760 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8761 !(bp->port.supported & SUPPORTED_Autoneg))
8762 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8764 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8765 " advertising 0x%x\n",
8766 bp->link_params.req_line_speed,
8767 bp->link_params.req_duplex,
8768 bp->link_params.req_flow_ctrl, bp->port.advertising);
8771 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8773 mac_hi = cpu_to_be16(mac_hi);
8774 mac_lo = cpu_to_be32(mac_lo);
8775 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8776 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8779 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8781 int port = BP_PORT(bp);
8787 bp->link_params.bp = bp;
8788 bp->link_params.port = port;
8790 bp->link_params.lane_config =
8791 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8792 bp->link_params.ext_phy_config =
8794 dev_info.port_hw_config[port].external_phy_config);
8795 /* BCM8727_NOC => BCM8727 no over current */
8796 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8797 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8798 bp->link_params.ext_phy_config &=
8799 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8800 bp->link_params.ext_phy_config |=
8801 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8802 bp->link_params.feature_config_flags |=
8803 FEATURE_CONFIG_BCM8727_NOC;
8806 bp->link_params.speed_cap_mask =
8808 dev_info.port_hw_config[port].speed_capability_mask);
8810 bp->port.link_config =
8811 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8813 /* Get the 4 lanes xgxs config rx and tx */
8814 for (i = 0; i < 2; i++) {
8816 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8817 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8821 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8822 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8823 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8826 /* If the device is capable of WoL, set the default state according
8829 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8830 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8831 (config & PORT_FEATURE_WOL_ENABLED));
8833 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8834 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8835 bp->link_params.lane_config,
8836 bp->link_params.ext_phy_config,
8837 bp->link_params.speed_cap_mask, bp->port.link_config);
8839 bp->link_params.switch_cfg |= (bp->port.link_config &
8840 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8841 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8843 bnx2x_link_settings_requested(bp);
8846 * If connected directly, work with the internal PHY, otherwise, work
8847 * with the external PHY
8849 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8850 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8851 bp->mdio.prtad = bp->link_params.phy_addr;
8853 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8854 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8858 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8859 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8860 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8861 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8862 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8865 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8866 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8867 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8871 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8873 int func = BP_FUNC(bp);
8877 bnx2x_get_common_hwinfo(bp);
8881 if (CHIP_IS_E1H(bp)) {
8883 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8885 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8886 FUNC_MF_CFG_E1HOV_TAG_MASK);
8887 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8889 BNX2X_DEV_INFO("%s function mode\n",
8890 IS_E1HMF(bp) ? "multi" : "single");
8893 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8895 FUNC_MF_CFG_E1HOV_TAG_MASK);
8896 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8898 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8900 func, bp->e1hov, bp->e1hov);
8902 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8903 " aborting\n", func);
8908 BNX2X_ERR("!!! VN %d in single function mode,"
8909 " aborting\n", BP_E1HVN(bp));
8915 if (!BP_NOMCP(bp)) {
8916 bnx2x_get_port_hwinfo(bp);
8918 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8919 DRV_MSG_SEQ_NUMBER_MASK);
8920 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8924 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8925 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8926 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8927 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8928 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8929 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8930 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8931 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8932 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8933 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8934 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8936 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8944 /* only supposed to happen on emulation/FPGA */
8945 BNX2X_ERR("warning random MAC workaround active\n");
8946 random_ether_addr(bp->dev->dev_addr);
8947 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8953 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8955 int func = BP_FUNC(bp);
8959 /* Disable interrupt handling until HW is initialized */
8960 atomic_set(&bp->intr_sem, 1);
8961 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8963 mutex_init(&bp->port.phy_mutex);
8964 mutex_init(&bp->fw_mb_mutex);
8966 mutex_init(&bp->cnic_mutex);
8969 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8970 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8972 rc = bnx2x_get_hwinfo(bp);
8974 /* need to reset chip if undi was active */
8976 bnx2x_undi_unload(bp);
8978 if (CHIP_REV_IS_FPGA(bp))
8979 printk(KERN_ERR PFX "FPGA detected\n");
8981 if (BP_NOMCP(bp) && (func == 0))
8983 "MCP disabled, must load devices in order!\n");
8985 /* Set multi queue mode */
8986 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8987 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8989 "Multi disabled since int_mode requested is not MSI-X\n");
8990 multi_mode = ETH_RSS_MODE_DISABLED;
8992 bp->multi_mode = multi_mode;
8997 bp->flags &= ~TPA_ENABLE_FLAG;
8998 bp->dev->features &= ~NETIF_F_LRO;
9000 bp->flags |= TPA_ENABLE_FLAG;
9001 bp->dev->features |= NETIF_F_LRO;
9005 bp->dropless_fc = 0;
9007 bp->dropless_fc = dropless_fc;
9011 bp->tx_ring_size = MAX_TX_AVAIL;
9012 bp->rx_ring_size = MAX_RX_AVAIL;
9019 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9020 bp->current_interval = (poll ? poll : timer_interval);
9022 init_timer(&bp->timer);
9023 bp->timer.expires = jiffies + bp->current_interval;
9024 bp->timer.data = (unsigned long) bp;
9025 bp->timer.function = bnx2x_timer;
9031 * ethtool service functions
9034 /* All ethtool functions called with rtnl_lock */
9036 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9038 struct bnx2x *bp = netdev_priv(dev);
9040 cmd->supported = bp->port.supported;
9041 cmd->advertising = bp->port.advertising;
9043 if ((bp->state == BNX2X_STATE_OPEN) &&
9044 !(bp->flags & MF_FUNC_DIS) &&
9045 (bp->link_vars.link_up)) {
9046 cmd->speed = bp->link_vars.line_speed;
9047 cmd->duplex = bp->link_vars.duplex;
9052 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9053 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9054 if (vn_max_rate < cmd->speed)
9055 cmd->speed = vn_max_rate;
9062 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9064 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9066 switch (ext_phy_type) {
9067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9074 cmd->port = PORT_FIBRE;
9077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9079 cmd->port = PORT_TP;
9082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9083 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9084 bp->link_params.ext_phy_config);
9088 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9089 bp->link_params.ext_phy_config);
9093 cmd->port = PORT_TP;
9095 cmd->phy_address = bp->mdio.prtad;
9096 cmd->transceiver = XCVR_INTERNAL;
9098 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9099 cmd->autoneg = AUTONEG_ENABLE;
9101 cmd->autoneg = AUTONEG_DISABLE;
9106 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9107 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9108 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9109 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9110 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9111 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9112 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9117 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9119 struct bnx2x *bp = netdev_priv(dev);
9125 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9126 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9127 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9128 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9129 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9130 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9131 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9133 if (cmd->autoneg == AUTONEG_ENABLE) {
9134 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9135 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9139 /* advertise the requested speed and duplex if supported */
9140 cmd->advertising &= bp->port.supported;
9142 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9143 bp->link_params.req_duplex = DUPLEX_FULL;
9144 bp->port.advertising |= (ADVERTISED_Autoneg |
9147 } else { /* forced speed */
9148 /* advertise the requested speed and duplex if supported */
9149 switch (cmd->speed) {
9151 if (cmd->duplex == DUPLEX_FULL) {
9152 if (!(bp->port.supported &
9153 SUPPORTED_10baseT_Full)) {
9155 "10M full not supported\n");
9159 advertising = (ADVERTISED_10baseT_Full |
9162 if (!(bp->port.supported &
9163 SUPPORTED_10baseT_Half)) {
9165 "10M half not supported\n");
9169 advertising = (ADVERTISED_10baseT_Half |
9175 if (cmd->duplex == DUPLEX_FULL) {
9176 if (!(bp->port.supported &
9177 SUPPORTED_100baseT_Full)) {
9179 "100M full not supported\n");
9183 advertising = (ADVERTISED_100baseT_Full |
9186 if (!(bp->port.supported &
9187 SUPPORTED_100baseT_Half)) {
9189 "100M half not supported\n");
9193 advertising = (ADVERTISED_100baseT_Half |
9199 if (cmd->duplex != DUPLEX_FULL) {
9200 DP(NETIF_MSG_LINK, "1G half not supported\n");
9204 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9205 DP(NETIF_MSG_LINK, "1G full not supported\n");
9209 advertising = (ADVERTISED_1000baseT_Full |
9214 if (cmd->duplex != DUPLEX_FULL) {
9216 "2.5G half not supported\n");
9220 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9222 "2.5G full not supported\n");
9226 advertising = (ADVERTISED_2500baseX_Full |
9231 if (cmd->duplex != DUPLEX_FULL) {
9232 DP(NETIF_MSG_LINK, "10G half not supported\n");
9236 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9237 DP(NETIF_MSG_LINK, "10G full not supported\n");
9241 advertising = (ADVERTISED_10000baseT_Full |
9246 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9250 bp->link_params.req_line_speed = cmd->speed;
9251 bp->link_params.req_duplex = cmd->duplex;
9252 bp->port.advertising = advertising;
9255 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9256 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9257 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9258 bp->port.advertising);
9260 if (netif_running(dev)) {
9261 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9268 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9269 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9271 static int bnx2x_get_regs_len(struct net_device *dev)
9273 struct bnx2x *bp = netdev_priv(dev);
9274 int regdump_len = 0;
9277 if (CHIP_IS_E1(bp)) {
9278 for (i = 0; i < REGS_COUNT; i++)
9279 if (IS_E1_ONLINE(reg_addrs[i].info))
9280 regdump_len += reg_addrs[i].size;
9282 for (i = 0; i < WREGS_COUNT_E1; i++)
9283 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9284 regdump_len += wreg_addrs_e1[i].size *
9285 (1 + wreg_addrs_e1[i].read_regs_count);
9288 for (i = 0; i < REGS_COUNT; i++)
9289 if (IS_E1H_ONLINE(reg_addrs[i].info))
9290 regdump_len += reg_addrs[i].size;
9292 for (i = 0; i < WREGS_COUNT_E1H; i++)
9293 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9294 regdump_len += wreg_addrs_e1h[i].size *
9295 (1 + wreg_addrs_e1h[i].read_regs_count);
9298 regdump_len += sizeof(struct dump_hdr);
9303 static void bnx2x_get_regs(struct net_device *dev,
9304 struct ethtool_regs *regs, void *_p)
9307 struct bnx2x *bp = netdev_priv(dev);
9308 struct dump_hdr dump_hdr = {0};
9311 memset(p, 0, regs->len);
9313 if (!netif_running(bp->dev))
9316 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9317 dump_hdr.dump_sign = dump_sign_all;
9318 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9319 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9320 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9321 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9322 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9324 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9325 p += dump_hdr.hdr_size + 1;
9327 if (CHIP_IS_E1(bp)) {
9328 for (i = 0; i < REGS_COUNT; i++)
9329 if (IS_E1_ONLINE(reg_addrs[i].info))
9330 for (j = 0; j < reg_addrs[i].size; j++)
9332 reg_addrs[i].addr + j*4);
9335 for (i = 0; i < REGS_COUNT; i++)
9336 if (IS_E1H_ONLINE(reg_addrs[i].info))
9337 for (j = 0; j < reg_addrs[i].size; j++)
9339 reg_addrs[i].addr + j*4);
9343 #define PHY_FW_VER_LEN 10
9345 static void bnx2x_get_drvinfo(struct net_device *dev,
9346 struct ethtool_drvinfo *info)
9348 struct bnx2x *bp = netdev_priv(dev);
9349 u8 phy_fw_ver[PHY_FW_VER_LEN];
9351 strcpy(info->driver, DRV_MODULE_NAME);
9352 strcpy(info->version, DRV_MODULE_VERSION);
9354 phy_fw_ver[0] = '\0';
9356 bnx2x_acquire_phy_lock(bp);
9357 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9358 (bp->state != BNX2X_STATE_CLOSED),
9359 phy_fw_ver, PHY_FW_VER_LEN);
9360 bnx2x_release_phy_lock(bp);
9363 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9364 (bp->common.bc_ver & 0xff0000) >> 16,
9365 (bp->common.bc_ver & 0xff00) >> 8,
9366 (bp->common.bc_ver & 0xff),
9367 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9368 strcpy(info->bus_info, pci_name(bp->pdev));
9369 info->n_stats = BNX2X_NUM_STATS;
9370 info->testinfo_len = BNX2X_NUM_TESTS;
9371 info->eedump_len = bp->common.flash_size;
9372 info->regdump_len = bnx2x_get_regs_len(dev);
9375 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9377 struct bnx2x *bp = netdev_priv(dev);
9379 if (bp->flags & NO_WOL_FLAG) {
9383 wol->supported = WAKE_MAGIC;
9385 wol->wolopts = WAKE_MAGIC;
9389 memset(&wol->sopass, 0, sizeof(wol->sopass));
9392 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9394 struct bnx2x *bp = netdev_priv(dev);
9396 if (wol->wolopts & ~WAKE_MAGIC)
9399 if (wol->wolopts & WAKE_MAGIC) {
9400 if (bp->flags & NO_WOL_FLAG)
9410 static u32 bnx2x_get_msglevel(struct net_device *dev)
9412 struct bnx2x *bp = netdev_priv(dev);
9414 return bp->msglevel;
9417 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9419 struct bnx2x *bp = netdev_priv(dev);
9421 if (capable(CAP_NET_ADMIN))
9422 bp->msglevel = level;
9425 static int bnx2x_nway_reset(struct net_device *dev)
9427 struct bnx2x *bp = netdev_priv(dev);
9432 if (netif_running(dev)) {
9433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9440 static u32 bnx2x_get_link(struct net_device *dev)
9442 struct bnx2x *bp = netdev_priv(dev);
9444 if (bp->flags & MF_FUNC_DIS)
9447 return bp->link_vars.link_up;
9450 static int bnx2x_get_eeprom_len(struct net_device *dev)
9452 struct bnx2x *bp = netdev_priv(dev);
9454 return bp->common.flash_size;
9457 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9459 int port = BP_PORT(bp);
9463 /* adjust timeout for emulation/FPGA */
9464 count = NVRAM_TIMEOUT_COUNT;
9465 if (CHIP_REV_IS_SLOW(bp))
9468 /* request access to nvram interface */
9469 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9470 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9472 for (i = 0; i < count*10; i++) {
9473 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9474 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9480 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9481 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9488 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9490 int port = BP_PORT(bp);
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9499 /* relinquish nvram interface */
9500 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9501 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9503 for (i = 0; i < count*10; i++) {
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9505 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9511 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9512 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9519 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9523 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9525 /* enable both bits, even on read */
9526 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9527 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9528 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9531 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9535 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9537 /* disable both bits, even after read */
9538 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9539 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9540 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9543 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9549 /* build the command word */
9550 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9552 /* need to clear DONE bit separately */
9553 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9555 /* address of the NVRAM to read from */
9556 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9557 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9559 /* issue a read command */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9562 /* adjust timeout for emulation/FPGA */
9563 count = NVRAM_TIMEOUT_COUNT;
9564 if (CHIP_REV_IS_SLOW(bp))
9567 /* wait for completion */
9570 for (i = 0; i < count; i++) {
9572 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9574 if (val & MCPR_NVM_COMMAND_DONE) {
9575 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9576 /* we read nvram data in cpu order
9577 * but ethtool sees it as an array of bytes
9578 * converting to big-endian will do the work */
9579 *ret_val = cpu_to_be32(val);
9588 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9595 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9597 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9602 if (offset + buf_size > bp->common.flash_size) {
9603 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9604 " buf_size (0x%x) > flash_size (0x%x)\n",
9605 offset, buf_size, bp->common.flash_size);
9609 /* request access to nvram interface */
9610 rc = bnx2x_acquire_nvram_lock(bp);
9614 /* enable access to nvram interface */
9615 bnx2x_enable_nvram_access(bp);
9617 /* read the first word(s) */
9618 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9619 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9620 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9621 memcpy(ret_buf, &val, 4);
9623 /* advance to the next dword */
9624 offset += sizeof(u32);
9625 ret_buf += sizeof(u32);
9626 buf_size -= sizeof(u32);
9631 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9632 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9633 memcpy(ret_buf, &val, 4);
9636 /* disable access to nvram interface */
9637 bnx2x_disable_nvram_access(bp);
9638 bnx2x_release_nvram_lock(bp);
9643 static int bnx2x_get_eeprom(struct net_device *dev,
9644 struct ethtool_eeprom *eeprom, u8 *eebuf)
9646 struct bnx2x *bp = netdev_priv(dev);
9649 if (!netif_running(dev))
9652 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9653 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9654 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9655 eeprom->len, eeprom->len);
9657 /* parameters already validated in ethtool_get_eeprom */
9659 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9664 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9669 /* build the command word */
9670 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9672 /* need to clear DONE bit separately */
9673 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9675 /* write the data */
9676 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9678 /* address of the NVRAM to write to */
9679 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9680 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9682 /* issue the write command */
9683 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9685 /* adjust timeout for emulation/FPGA */
9686 count = NVRAM_TIMEOUT_COUNT;
9687 if (CHIP_REV_IS_SLOW(bp))
9690 /* wait for completion */
9692 for (i = 0; i < count; i++) {
9694 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9695 if (val & MCPR_NVM_COMMAND_DONE) {
9704 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9706 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9714 if (offset + buf_size > bp->common.flash_size) {
9715 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9716 " buf_size (0x%x) > flash_size (0x%x)\n",
9717 offset, buf_size, bp->common.flash_size);
9721 /* request access to nvram interface */
9722 rc = bnx2x_acquire_nvram_lock(bp);
9726 /* enable access to nvram interface */
9727 bnx2x_enable_nvram_access(bp);
9729 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9730 align_offset = (offset & ~0x03);
9731 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9734 val &= ~(0xff << BYTE_OFFSET(offset));
9735 val |= (*data_buf << BYTE_OFFSET(offset));
9737 /* nvram data is returned as an array of bytes
9738 * convert it back to cpu order */
9739 val = be32_to_cpu(val);
9741 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9745 /* disable access to nvram interface */
9746 bnx2x_disable_nvram_access(bp);
9747 bnx2x_release_nvram_lock(bp);
9752 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9760 if (buf_size == 1) /* ethtool */
9761 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9763 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9765 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9770 if (offset + buf_size > bp->common.flash_size) {
9771 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9772 " buf_size (0x%x) > flash_size (0x%x)\n",
9773 offset, buf_size, bp->common.flash_size);
9777 /* request access to nvram interface */
9778 rc = bnx2x_acquire_nvram_lock(bp);
9782 /* enable access to nvram interface */
9783 bnx2x_enable_nvram_access(bp);
9786 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9787 while ((written_so_far < buf_size) && (rc == 0)) {
9788 if (written_so_far == (buf_size - sizeof(u32)))
9789 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9790 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9791 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9792 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9793 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9795 memcpy(&val, data_buf, 4);
9797 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9799 /* advance to the next dword */
9800 offset += sizeof(u32);
9801 data_buf += sizeof(u32);
9802 written_so_far += sizeof(u32);
9806 /* disable access to nvram interface */
9807 bnx2x_disable_nvram_access(bp);
9808 bnx2x_release_nvram_lock(bp);
9813 static int bnx2x_set_eeprom(struct net_device *dev,
9814 struct ethtool_eeprom *eeprom, u8 *eebuf)
9816 struct bnx2x *bp = netdev_priv(dev);
9817 int port = BP_PORT(bp);
9820 if (!netif_running(dev))
9823 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9824 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9825 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9826 eeprom->len, eeprom->len);
9828 /* parameters already validated in ethtool_set_eeprom */
9830 /* PHY eeprom can be accessed only by the PMF */
9831 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9835 if (eeprom->magic == 0x50485950) {
9836 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9837 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9839 bnx2x_acquire_phy_lock(bp);
9840 rc |= bnx2x_link_reset(&bp->link_params,
9842 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9843 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9844 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9845 MISC_REGISTERS_GPIO_HIGH, port);
9846 bnx2x_release_phy_lock(bp);
9847 bnx2x_link_report(bp);
9849 } else if (eeprom->magic == 0x50485952) {
9850 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9851 if (bp->state == BNX2X_STATE_OPEN) {
9852 bnx2x_acquire_phy_lock(bp);
9853 rc |= bnx2x_link_reset(&bp->link_params,
9856 rc |= bnx2x_phy_init(&bp->link_params,
9858 bnx2x_release_phy_lock(bp);
9859 bnx2x_calc_fc_adv(bp);
9861 } else if (eeprom->magic == 0x53985943) {
9862 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9863 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9864 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9866 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9868 /* DSP Remove Download Mode */
9869 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9870 MISC_REGISTERS_GPIO_LOW, port);
9872 bnx2x_acquire_phy_lock(bp);
9874 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9876 /* wait 0.5 sec to allow it to run */
9878 bnx2x_ext_phy_hw_reset(bp, port);
9880 bnx2x_release_phy_lock(bp);
9883 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9888 static int bnx2x_get_coalesce(struct net_device *dev,
9889 struct ethtool_coalesce *coal)
9891 struct bnx2x *bp = netdev_priv(dev);
9893 memset(coal, 0, sizeof(struct ethtool_coalesce));
9895 coal->rx_coalesce_usecs = bp->rx_ticks;
9896 coal->tx_coalesce_usecs = bp->tx_ticks;
9901 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9902 static int bnx2x_set_coalesce(struct net_device *dev,
9903 struct ethtool_coalesce *coal)
9905 struct bnx2x *bp = netdev_priv(dev);
9907 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9908 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9909 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9911 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9912 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9913 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9915 if (netif_running(dev))
9916 bnx2x_update_coalesce(bp);
9921 static void bnx2x_get_ringparam(struct net_device *dev,
9922 struct ethtool_ringparam *ering)
9924 struct bnx2x *bp = netdev_priv(dev);
9926 ering->rx_max_pending = MAX_RX_AVAIL;
9927 ering->rx_mini_max_pending = 0;
9928 ering->rx_jumbo_max_pending = 0;
9930 ering->rx_pending = bp->rx_ring_size;
9931 ering->rx_mini_pending = 0;
9932 ering->rx_jumbo_pending = 0;
9934 ering->tx_max_pending = MAX_TX_AVAIL;
9935 ering->tx_pending = bp->tx_ring_size;
9938 static int bnx2x_set_ringparam(struct net_device *dev,
9939 struct ethtool_ringparam *ering)
9941 struct bnx2x *bp = netdev_priv(dev);
9944 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9945 (ering->tx_pending > MAX_TX_AVAIL) ||
9946 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9949 bp->rx_ring_size = ering->rx_pending;
9950 bp->tx_ring_size = ering->tx_pending;
9952 if (netif_running(dev)) {
9953 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9954 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9960 static void bnx2x_get_pauseparam(struct net_device *dev,
9961 struct ethtool_pauseparam *epause)
9963 struct bnx2x *bp = netdev_priv(dev);
9965 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9966 BNX2X_FLOW_CTRL_AUTO) &&
9967 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9969 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9970 BNX2X_FLOW_CTRL_RX);
9971 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9972 BNX2X_FLOW_CTRL_TX);
9974 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9975 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9976 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9979 static int bnx2x_set_pauseparam(struct net_device *dev,
9980 struct ethtool_pauseparam *epause)
9982 struct bnx2x *bp = netdev_priv(dev);
9987 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9988 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9989 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9991 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9993 if (epause->rx_pause)
9994 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9996 if (epause->tx_pause)
9997 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9999 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10000 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10002 if (epause->autoneg) {
10003 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10004 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10008 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10009 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10013 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10015 if (netif_running(dev)) {
10016 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10017 bnx2x_link_set(bp);
10023 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10025 struct bnx2x *bp = netdev_priv(dev);
10029 /* TPA requires Rx CSUM offloading */
10030 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10031 if (!(dev->features & NETIF_F_LRO)) {
10032 dev->features |= NETIF_F_LRO;
10033 bp->flags |= TPA_ENABLE_FLAG;
10037 } else if (dev->features & NETIF_F_LRO) {
10038 dev->features &= ~NETIF_F_LRO;
10039 bp->flags &= ~TPA_ENABLE_FLAG;
10043 if (changed && netif_running(dev)) {
10044 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10045 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10051 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10053 struct bnx2x *bp = netdev_priv(dev);
10055 return bp->rx_csum;
10058 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10060 struct bnx2x *bp = netdev_priv(dev);
10063 bp->rx_csum = data;
10065 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10066 TPA'ed packets will be discarded due to wrong TCP CSUM */
10068 u32 flags = ethtool_op_get_flags(dev);
10070 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10076 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10079 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10080 dev->features |= NETIF_F_TSO6;
10082 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10083 dev->features &= ~NETIF_F_TSO6;
10089 static const struct {
10090 char string[ETH_GSTRING_LEN];
10091 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10092 { "register_test (offline)" },
10093 { "memory_test (offline)" },
10094 { "loopback_test (offline)" },
10095 { "nvram_test (online)" },
10096 { "interrupt_test (online)" },
10097 { "link_test (online)" },
10098 { "idle check (online)" }
10101 static int bnx2x_test_registers(struct bnx2x *bp)
10103 int idx, i, rc = -ENODEV;
10105 int port = BP_PORT(bp);
10106 static const struct {
10111 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10112 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10113 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10114 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10115 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10116 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10117 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10118 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10119 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10120 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10121 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10122 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10123 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10124 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10125 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10126 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10127 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10128 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10130 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10131 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10140 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10141 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10149 { 0xffffffff, 0, 0x00000000 }
10152 if (!netif_running(bp->dev))
10155 /* Repeat the test twice:
10156 First by writing 0x00000000, second by writing 0xffffffff */
10157 for (idx = 0; idx < 2; idx++) {
10164 wr_val = 0xffffffff;
10168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10169 u32 offset, mask, save_val, val;
10171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10172 mask = reg_tbl[i].mask;
10174 save_val = REG_RD(bp, offset);
10176 REG_WR(bp, offset, wr_val);
10177 val = REG_RD(bp, offset);
10179 /* Restore the original register's value */
10180 REG_WR(bp, offset, save_val);
10182 /* verify that value is as expected value */
10183 if ((val & mask) != (wr_val & mask))
10184 goto test_reg_exit;
10194 static int bnx2x_test_memory(struct bnx2x *bp)
10196 int i, j, rc = -ENODEV;
10198 static const struct {
10202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10212 static const struct {
10218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10225 { NULL, 0xffffffff, 0, 0 }
10228 if (!netif_running(bp->dev))
10231 /* Go through all the memories */
10232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10233 for (j = 0; j < mem_tbl[i].size; j++)
10234 REG_RD(bp, mem_tbl[i].offset + j*4);
10236 /* Check the parity status */
10237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10238 val = REG_RD(bp, prty_tbl[i].offset);
10239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10242 "%s is 0x%x\n", prty_tbl[i].name, val);
10243 goto test_mem_exit;
10253 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10258 while (bnx2x_link_test(bp) && cnt--)
10262 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10264 unsigned int pkt_size, num_pkts, i;
10265 struct sk_buff *skb;
10266 unsigned char *packet;
10267 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10268 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10269 u16 tx_start_idx, tx_idx;
10270 u16 rx_start_idx, rx_idx;
10271 u16 pkt_prod, bd_prod;
10272 struct sw_tx_bd *tx_buf;
10273 struct eth_tx_start_bd *tx_start_bd;
10274 struct eth_tx_parse_bd *pbd = NULL;
10275 dma_addr_t mapping;
10276 union eth_rx_cqe *cqe;
10278 struct sw_rx_bd *rx_buf;
10282 /* check the loopback mode */
10283 switch (loopback_mode) {
10284 case BNX2X_PHY_LOOPBACK:
10285 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10288 case BNX2X_MAC_LOOPBACK:
10289 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10290 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10296 /* prepare the loopback packet */
10297 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10298 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10299 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10302 goto test_loopback_exit;
10304 packet = skb_put(skb, pkt_size);
10305 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10306 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10307 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 packet[i] = (unsigned char) (i & 0xff);
10311 /* send the loopback packet */
10313 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10314 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10316 pkt_prod = fp_tx->tx_pkt_prod++;
10317 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10318 tx_buf->first_bd = fp_tx->tx_bd_prod;
10322 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10323 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10324 mapping = pci_map_single(bp->pdev, skb->data,
10325 skb_headlen(skb), PCI_DMA_TODEVICE);
10326 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10327 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10328 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10329 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10330 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10331 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10332 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10333 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10335 /* turn on parsing and get a BD */
10336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10337 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10339 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10343 fp_tx->tx_db.data.prod += 2;
10345 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10350 fp_tx->tx_bd_prod += 2; /* start + pbd */
10351 bp->dev->trans_start = jiffies;
10355 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10356 if (tx_idx != tx_start_idx + num_pkts)
10357 goto test_loopback_exit;
10359 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10360 if (rx_idx != rx_start_idx + num_pkts)
10361 goto test_loopback_exit;
10363 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10364 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10365 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10366 goto test_loopback_rx_exit;
10368 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10369 if (len != pkt_size)
10370 goto test_loopback_rx_exit;
10372 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10374 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10375 for (i = ETH_HLEN; i < pkt_size; i++)
10376 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10377 goto test_loopback_rx_exit;
10381 test_loopback_rx_exit:
10383 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10384 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10385 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10386 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10388 /* Update producers */
10389 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10390 fp_rx->rx_sge_prod);
10392 test_loopback_exit:
10393 bp->link_params.loopback_mode = LOOPBACK_NONE;
10398 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10402 if (!netif_running(bp->dev))
10403 return BNX2X_LOOPBACK_FAILED;
10405 bnx2x_netif_stop(bp, 1);
10406 bnx2x_acquire_phy_lock(bp);
10408 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10410 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10411 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10414 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10416 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10417 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10420 bnx2x_release_phy_lock(bp);
10421 bnx2x_netif_start(bp);
10426 #define CRC32_RESIDUAL 0xdebb20e3
10428 static int bnx2x_test_nvram(struct bnx2x *bp)
10430 static const struct {
10434 { 0, 0x14 }, /* bootstrap */
10435 { 0x14, 0xec }, /* dir */
10436 { 0x100, 0x350 }, /* manuf_info */
10437 { 0x450, 0xf0 }, /* feature_info */
10438 { 0x640, 0x64 }, /* upgrade_key_info */
10440 { 0x708, 0x70 }, /* manuf_key_info */
10444 __be32 buf[0x350 / 4];
10445 u8 *data = (u8 *)buf;
10449 rc = bnx2x_nvram_read(bp, 0, data, 4);
10451 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10452 goto test_nvram_exit;
10455 magic = be32_to_cpu(buf[0]);
10456 if (magic != 0x669955aa) {
10457 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10459 goto test_nvram_exit;
10462 for (i = 0; nvram_tbl[i].size; i++) {
10464 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10465 nvram_tbl[i].size);
10467 DP(NETIF_MSG_PROBE,
10468 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10469 goto test_nvram_exit;
10472 crc = ether_crc_le(nvram_tbl[i].size, data);
10473 if (crc != CRC32_RESIDUAL) {
10474 DP(NETIF_MSG_PROBE,
10475 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10477 goto test_nvram_exit;
10485 static int bnx2x_test_intr(struct bnx2x *bp)
10487 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10490 if (!netif_running(bp->dev))
10493 config->hdr.length = 0;
10494 if (CHIP_IS_E1(bp))
10495 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10497 config->hdr.offset = BP_FUNC(bp);
10498 config->hdr.client_id = bp->fp->cl_id;
10499 config->hdr.reserved1 = 0;
10501 bp->set_mac_pending++;
10503 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10507 for (i = 0; i < 10; i++) {
10508 if (!bp->set_mac_pending)
10511 msleep_interruptible(10);
10520 static void bnx2x_self_test(struct net_device *dev,
10521 struct ethtool_test *etest, u64 *buf)
10523 struct bnx2x *bp = netdev_priv(dev);
10525 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10527 if (!netif_running(dev))
10530 /* offline tests are not supported in MF mode */
10532 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10534 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10535 int port = BP_PORT(bp);
10539 /* save current value of input enable for TX port IF */
10540 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10541 /* disable input for TX port IF */
10542 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10544 link_up = (bnx2x_link_test(bp) == 0);
10545 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10546 bnx2x_nic_load(bp, LOAD_DIAG);
10547 /* wait until link state is restored */
10548 bnx2x_wait_for_link(bp, link_up);
10550 if (bnx2x_test_registers(bp) != 0) {
10552 etest->flags |= ETH_TEST_FL_FAILED;
10554 if (bnx2x_test_memory(bp) != 0) {
10556 etest->flags |= ETH_TEST_FL_FAILED;
10558 buf[2] = bnx2x_test_loopback(bp, link_up);
10560 etest->flags |= ETH_TEST_FL_FAILED;
10562 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10564 /* restore input for TX port IF */
10565 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10567 bnx2x_nic_load(bp, LOAD_NORMAL);
10568 /* wait until link state is restored */
10569 bnx2x_wait_for_link(bp, link_up);
10571 if (bnx2x_test_nvram(bp) != 0) {
10573 etest->flags |= ETH_TEST_FL_FAILED;
10575 if (bnx2x_test_intr(bp) != 0) {
10577 etest->flags |= ETH_TEST_FL_FAILED;
10580 if (bnx2x_link_test(bp) != 0) {
10582 etest->flags |= ETH_TEST_FL_FAILED;
10585 #ifdef BNX2X_EXTRA_DEBUG
10586 bnx2x_panic_dump(bp);
10590 static const struct {
10593 u8 string[ETH_GSTRING_LEN];
10594 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10595 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10596 { Q_STATS_OFFSET32(error_bytes_received_hi),
10597 8, "[%d]: rx_error_bytes" },
10598 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10599 8, "[%d]: rx_ucast_packets" },
10600 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10601 8, "[%d]: rx_mcast_packets" },
10602 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10603 8, "[%d]: rx_bcast_packets" },
10604 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10605 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10606 4, "[%d]: rx_phy_ip_err_discards"},
10607 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10608 4, "[%d]: rx_skb_alloc_discard" },
10609 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10611 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10612 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10613 8, "[%d]: tx_packets" }
10616 static const struct {
10620 #define STATS_FLAGS_PORT 1
10621 #define STATS_FLAGS_FUNC 2
10622 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10623 u8 string[ETH_GSTRING_LEN];
10624 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10625 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10626 8, STATS_FLAGS_BOTH, "rx_bytes" },
10627 { STATS_OFFSET32(error_bytes_received_hi),
10628 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10629 { STATS_OFFSET32(total_unicast_packets_received_hi),
10630 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10631 { STATS_OFFSET32(total_multicast_packets_received_hi),
10632 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10633 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10634 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10635 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10636 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10637 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10638 8, STATS_FLAGS_PORT, "rx_align_errors" },
10639 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10640 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10641 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10642 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10643 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10644 8, STATS_FLAGS_PORT, "rx_fragments" },
10645 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10646 8, STATS_FLAGS_PORT, "rx_jabbers" },
10647 { STATS_OFFSET32(no_buff_discard_hi),
10648 8, STATS_FLAGS_BOTH, "rx_discards" },
10649 { STATS_OFFSET32(mac_filter_discard),
10650 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10651 { STATS_OFFSET32(xxoverflow_discard),
10652 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10653 { STATS_OFFSET32(brb_drop_hi),
10654 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10655 { STATS_OFFSET32(brb_truncate_hi),
10656 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10657 { STATS_OFFSET32(pause_frames_received_hi),
10658 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10659 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10660 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10661 { STATS_OFFSET32(nig_timer_max),
10662 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10663 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10664 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10665 { STATS_OFFSET32(rx_skb_alloc_failed),
10666 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10667 { STATS_OFFSET32(hw_csum_err),
10668 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10670 { STATS_OFFSET32(total_bytes_transmitted_hi),
10671 8, STATS_FLAGS_BOTH, "tx_bytes" },
10672 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10673 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10674 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10675 8, STATS_FLAGS_BOTH, "tx_packets" },
10676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10680 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10684 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10685 8, STATS_FLAGS_PORT, "tx_deferred" },
10686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10700 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10704 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10706 { STATS_OFFSET32(pause_frames_sent_hi),
10707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10710 #define IS_PORT_STAT(i) \
10711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10712 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10713 #define IS_E1HMF_MODE_STAT(bp) \
10714 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10716 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10718 struct bnx2x *bp = netdev_priv(dev);
10721 switch(stringset) {
10723 if (is_multi(bp)) {
10724 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10725 if (!IS_E1HMF_MODE_STAT(bp))
10726 num_stats += BNX2X_NUM_STATS;
10728 if (IS_E1HMF_MODE_STAT(bp)) {
10730 for (i = 0; i < BNX2X_NUM_STATS; i++)
10731 if (IS_FUNC_STAT(i))
10734 num_stats = BNX2X_NUM_STATS;
10739 return BNX2X_NUM_TESTS;
10746 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10748 struct bnx2x *bp = netdev_priv(dev);
10751 switch (stringset) {
10753 if (is_multi(bp)) {
10755 for_each_rx_queue(bp, i) {
10756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10758 bnx2x_q_stats_arr[j].string, i);
10759 k += BNX2X_NUM_Q_STATS;
10761 if (IS_E1HMF_MODE_STAT(bp))
10763 for (j = 0; j < BNX2X_NUM_STATS; j++)
10764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10765 bnx2x_stats_arr[j].string);
10767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10770 strcpy(buf + j*ETH_GSTRING_LEN,
10771 bnx2x_stats_arr[i].string);
10778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10783 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10784 struct ethtool_stats *stats, u64 *buf)
10786 struct bnx2x *bp = netdev_priv(dev);
10787 u32 *hw_stats, *offset;
10790 if (is_multi(bp)) {
10792 for_each_rx_queue(bp, i) {
10793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10795 if (bnx2x_q_stats_arr[j].size == 0) {
10796 /* skip this counter */
10800 offset = (hw_stats +
10801 bnx2x_q_stats_arr[j].offset);
10802 if (bnx2x_q_stats_arr[j].size == 4) {
10803 /* 4-byte counter */
10804 buf[k + j] = (u64) *offset;
10807 /* 8-byte counter */
10808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10810 k += BNX2X_NUM_Q_STATS;
10812 if (IS_E1HMF_MODE_STAT(bp))
10814 hw_stats = (u32 *)&bp->eth_stats;
10815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10816 if (bnx2x_stats_arr[j].size == 0) {
10817 /* skip this counter */
10821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10822 if (bnx2x_stats_arr[j].size == 4) {
10823 /* 4-byte counter */
10824 buf[k + j] = (u64) *offset;
10827 /* 8-byte counter */
10828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10831 hw_stats = (u32 *)&bp->eth_stats;
10832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10835 if (bnx2x_stats_arr[i].size == 0) {
10836 /* skip this counter */
10841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10842 if (bnx2x_stats_arr[i].size == 4) {
10843 /* 4-byte counter */
10844 buf[j] = (u64) *offset;
10848 /* 8-byte counter */
10849 buf[j] = HILO_U64(*offset, *(offset + 1));
10855 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10857 struct bnx2x *bp = netdev_priv(dev);
10858 int port = BP_PORT(bp);
10861 if (!netif_running(dev))
10870 for (i = 0; i < (data * 2); i++) {
10872 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10873 bp->link_params.hw_led_mode,
10874 bp->link_params.chip_id);
10876 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10877 bp->link_params.hw_led_mode,
10878 bp->link_params.chip_id);
10880 msleep_interruptible(500);
10881 if (signal_pending(current))
10885 if (bp->link_vars.link_up)
10886 bnx2x_set_led(bp, port, LED_MODE_OPER,
10887 bp->link_vars.line_speed,
10888 bp->link_params.hw_led_mode,
10889 bp->link_params.chip_id);
10894 static const struct ethtool_ops bnx2x_ethtool_ops = {
10895 .get_settings = bnx2x_get_settings,
10896 .set_settings = bnx2x_set_settings,
10897 .get_drvinfo = bnx2x_get_drvinfo,
10898 .get_regs_len = bnx2x_get_regs_len,
10899 .get_regs = bnx2x_get_regs,
10900 .get_wol = bnx2x_get_wol,
10901 .set_wol = bnx2x_set_wol,
10902 .get_msglevel = bnx2x_get_msglevel,
10903 .set_msglevel = bnx2x_set_msglevel,
10904 .nway_reset = bnx2x_nway_reset,
10905 .get_link = bnx2x_get_link,
10906 .get_eeprom_len = bnx2x_get_eeprom_len,
10907 .get_eeprom = bnx2x_get_eeprom,
10908 .set_eeprom = bnx2x_set_eeprom,
10909 .get_coalesce = bnx2x_get_coalesce,
10910 .set_coalesce = bnx2x_set_coalesce,
10911 .get_ringparam = bnx2x_get_ringparam,
10912 .set_ringparam = bnx2x_set_ringparam,
10913 .get_pauseparam = bnx2x_get_pauseparam,
10914 .set_pauseparam = bnx2x_set_pauseparam,
10915 .get_rx_csum = bnx2x_get_rx_csum,
10916 .set_rx_csum = bnx2x_set_rx_csum,
10917 .get_tx_csum = ethtool_op_get_tx_csum,
10918 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10919 .set_flags = bnx2x_set_flags,
10920 .get_flags = ethtool_op_get_flags,
10921 .get_sg = ethtool_op_get_sg,
10922 .set_sg = ethtool_op_set_sg,
10923 .get_tso = ethtool_op_get_tso,
10924 .set_tso = bnx2x_set_tso,
10925 .self_test = bnx2x_self_test,
10926 .get_sset_count = bnx2x_get_sset_count,
10927 .get_strings = bnx2x_get_strings,
10928 .phys_id = bnx2x_phys_id,
10929 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10932 /* end of ethtool_ops */
10934 /****************************************************************************
10935 * General service functions
10936 ****************************************************************************/
10938 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10942 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10946 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10947 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10948 PCI_PM_CTRL_PME_STATUS));
10950 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10951 /* delay required during transition out of D3hot */
10956 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10960 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10962 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10965 /* No more memory access after this point until
10966 * device is brought back to D0.
10976 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10980 /* Tell compiler that status block fields can change */
10982 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10983 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10985 return (fp->rx_comp_cons != rx_cons_sb);
10989 * net_device service functions
10992 static int bnx2x_poll(struct napi_struct *napi, int budget)
10994 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10996 struct bnx2x *bp = fp->bp;
10999 #ifdef BNX2X_STOP_ON_ERROR
11000 if (unlikely(bp->panic))
11004 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11005 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11007 bnx2x_update_fpsb_idx(fp);
11009 if (bnx2x_has_rx_work(fp)) {
11010 work_done = bnx2x_rx_int(fp, budget);
11012 /* must not complete if we consumed full budget */
11013 if (work_done >= budget)
11017 /* bnx2x_has_rx_work() reads the status block, thus we need to
11018 * ensure that status block indices have been actually read
11019 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11020 * so that we won't write the "newer" value of the status block to IGU
11021 * (if there was a DMA right after bnx2x_has_rx_work and
11022 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11023 * may be postponed to right before bnx2x_ack_sb). In this case
11024 * there will never be another interrupt until there is another update
11025 * of the status block, while there is still unhandled work.
11029 if (!bnx2x_has_rx_work(fp)) {
11030 #ifdef BNX2X_STOP_ON_ERROR
11033 napi_complete(napi);
11035 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11036 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11037 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11038 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11046 /* we split the first BD into headers and data BDs
11047 * to ease the pain of our fellow microcode engineers
11048 * we use one mapping for both BDs
11049 * So far this has only been observed to happen
11050 * in Other Operating Systems(TM)
11052 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11053 struct bnx2x_fastpath *fp,
11054 struct sw_tx_bd *tx_buf,
11055 struct eth_tx_start_bd **tx_bd, u16 hlen,
11056 u16 bd_prod, int nbd)
11058 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11059 struct eth_tx_bd *d_tx_bd;
11060 dma_addr_t mapping;
11061 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11063 /* first fix first BD */
11064 h_tx_bd->nbd = cpu_to_le16(nbd);
11065 h_tx_bd->nbytes = cpu_to_le16(hlen);
11067 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11068 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11069 h_tx_bd->addr_lo, h_tx_bd->nbd);
11071 /* now get a new data BD
11072 * (after the pbd) and fill it */
11073 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11074 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11076 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11077 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11079 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11080 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11081 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11083 /* this marks the BD as one that has no individual mapping */
11084 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11086 DP(NETIF_MSG_TX_QUEUED,
11087 "TSO split data size is %d (%x:%x)\n",
11088 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11091 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11096 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11099 csum = (u16) ~csum_fold(csum_sub(csum,
11100 csum_partial(t_header - fix, fix, 0)));
11103 csum = (u16) ~csum_fold(csum_add(csum,
11104 csum_partial(t_header, -fix, 0)));
11106 return swab16(csum);
11109 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11113 if (skb->ip_summed != CHECKSUM_PARTIAL)
11117 if (skb->protocol == htons(ETH_P_IPV6)) {
11119 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11120 rc |= XMIT_CSUM_TCP;
11124 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11125 rc |= XMIT_CSUM_TCP;
11129 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11132 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11138 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11139 /* check if packet requires linearization (packet is too fragmented)
11140 no need to check fragmentation if page size > 8K (there will be no
11141 violation to FW restrictions) */
11142 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11147 int first_bd_sz = 0;
11149 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11150 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11152 if (xmit_type & XMIT_GSO) {
11153 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11154 /* Check if LSO packet needs to be copied:
11155 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11156 int wnd_size = MAX_FETCH_BD - 3;
11157 /* Number of windows to check */
11158 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11163 /* Headers length */
11164 hlen = (int)(skb_transport_header(skb) - skb->data) +
11167 /* Amount of data (w/o headers) on linear part of SKB*/
11168 first_bd_sz = skb_headlen(skb) - hlen;
11170 wnd_sum = first_bd_sz;
11172 /* Calculate the first sum - it's special */
11173 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11175 skb_shinfo(skb)->frags[frag_idx].size;
11177 /* If there was data on linear skb data - check it */
11178 if (first_bd_sz > 0) {
11179 if (unlikely(wnd_sum < lso_mss)) {
11184 wnd_sum -= first_bd_sz;
11187 /* Others are easier: run through the frag list and
11188 check all windows */
11189 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11191 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11193 if (unlikely(wnd_sum < lso_mss)) {
11198 skb_shinfo(skb)->frags[wnd_idx].size;
11201 /* in non-LSO too fragmented packet should always
11208 if (unlikely(to_copy))
11209 DP(NETIF_MSG_TX_QUEUED,
11210 "Linearization IS REQUIRED for %s packet. "
11211 "num_frags %d hlen %d first_bd_sz %d\n",
11212 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11213 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11219 /* called with netif_tx_lock
11220 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11221 * netif_wake_queue()
11223 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11225 struct bnx2x *bp = netdev_priv(dev);
11226 struct bnx2x_fastpath *fp, *fp_stat;
11227 struct netdev_queue *txq;
11228 struct sw_tx_bd *tx_buf;
11229 struct eth_tx_start_bd *tx_start_bd;
11230 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11231 struct eth_tx_parse_bd *pbd = NULL;
11232 u16 pkt_prod, bd_prod;
11234 dma_addr_t mapping;
11235 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11238 __le16 pkt_size = 0;
11240 #ifdef BNX2X_STOP_ON_ERROR
11241 if (unlikely(bp->panic))
11242 return NETDEV_TX_BUSY;
11245 fp_index = skb_get_queue_mapping(skb);
11246 txq = netdev_get_tx_queue(dev, fp_index);
11248 fp = &bp->fp[fp_index + bp->num_rx_queues];
11249 fp_stat = &bp->fp[fp_index];
11251 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11252 fp_stat->eth_q_stats.driver_xoff++;
11253 netif_tx_stop_queue(txq);
11254 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11255 return NETDEV_TX_BUSY;
11258 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11259 " gso type %x xmit_type %x\n",
11260 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11261 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11263 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11264 /* First, check if we need to linearize the skb (due to FW
11265 restrictions). No need to check fragmentation if page size > 8K
11266 (there will be no violation to FW restrictions) */
11267 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11268 /* Statistics of linearization */
11270 if (skb_linearize(skb) != 0) {
11271 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11272 "silently dropping this SKB\n");
11273 dev_kfree_skb_any(skb);
11274 return NETDEV_TX_OK;
11280 Please read carefully. First we use one BD which we mark as start,
11281 then we have a parsing info BD (used for TSO or xsum),
11282 and only then we have the rest of the TSO BDs.
11283 (don't forget to mark the last one as last,
11284 and to unmap only AFTER you write to the BD ...)
11285 And above all, all pdb sizes are in words - NOT DWORDS!
11288 pkt_prod = fp->tx_pkt_prod++;
11289 bd_prod = TX_BD(fp->tx_bd_prod);
11291 /* get a tx_buf and first BD */
11292 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11293 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11295 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11296 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11297 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11299 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11301 /* remember the first BD of the packet */
11302 tx_buf->first_bd = fp->tx_bd_prod;
11306 DP(NETIF_MSG_TX_QUEUED,
11307 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11308 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11311 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11312 (bp->flags & HW_VLAN_TX_FLAG)) {
11313 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11314 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11317 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11319 /* turn on parsing and get a BD */
11320 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11321 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11323 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11325 if (xmit_type & XMIT_CSUM) {
11326 hlen = (skb_network_header(skb) - skb->data) / 2;
11328 /* for now NS flag is not used in Linux */
11330 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11331 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11333 pbd->ip_hlen = (skb_transport_header(skb) -
11334 skb_network_header(skb)) / 2;
11336 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11338 pbd->total_hlen = cpu_to_le16(hlen);
11341 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11343 if (xmit_type & XMIT_CSUM_V4)
11344 tx_start_bd->bd_flags.as_bitfield |=
11345 ETH_TX_BD_FLAGS_IP_CSUM;
11347 tx_start_bd->bd_flags.as_bitfield |=
11348 ETH_TX_BD_FLAGS_IPV6;
11350 if (xmit_type & XMIT_CSUM_TCP) {
11351 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11354 s8 fix = SKB_CS_OFF(skb); /* signed! */
11356 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11358 DP(NETIF_MSG_TX_QUEUED,
11359 "hlen %d fix %d csum before fix %x\n",
11360 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11362 /* HW bug: fixup the CSUM */
11363 pbd->tcp_pseudo_csum =
11364 bnx2x_csum_fix(skb_transport_header(skb),
11367 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11368 pbd->tcp_pseudo_csum);
11372 mapping = pci_map_single(bp->pdev, skb->data,
11373 skb_headlen(skb), PCI_DMA_TODEVICE);
11375 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11376 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11377 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11378 tx_start_bd->nbd = cpu_to_le16(nbd);
11379 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11380 pkt_size = tx_start_bd->nbytes;
11382 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11383 " nbytes %d flags %x vlan %x\n",
11384 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11385 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11386 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11388 if (xmit_type & XMIT_GSO) {
11390 DP(NETIF_MSG_TX_QUEUED,
11391 "TSO packet len %d hlen %d total len %d tso size %d\n",
11392 skb->len, hlen, skb_headlen(skb),
11393 skb_shinfo(skb)->gso_size);
11395 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11397 if (unlikely(skb_headlen(skb) > hlen))
11398 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11399 hlen, bd_prod, ++nbd);
11401 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11402 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11403 pbd->tcp_flags = pbd_tcp_flags(skb);
11405 if (xmit_type & XMIT_GSO_V4) {
11406 pbd->ip_id = swab16(ip_hdr(skb)->id);
11407 pbd->tcp_pseudo_csum =
11408 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11409 ip_hdr(skb)->daddr,
11410 0, IPPROTO_TCP, 0));
11413 pbd->tcp_pseudo_csum =
11414 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11415 &ipv6_hdr(skb)->daddr,
11416 0, IPPROTO_TCP, 0));
11418 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11420 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11423 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11425 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11426 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11427 if (total_pkt_bd == NULL)
11428 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11430 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11431 frag->size, PCI_DMA_TODEVICE);
11433 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11434 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11435 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11436 le16_add_cpu(&pkt_size, frag->size);
11438 DP(NETIF_MSG_TX_QUEUED,
11439 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11440 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11441 le16_to_cpu(tx_data_bd->nbytes));
11444 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11446 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11448 /* now send a tx doorbell, counting the next BD
11449 * if the packet contains or ends with it
11451 if (TX_BD_POFF(bd_prod) < nbd)
11454 if (total_pkt_bd != NULL)
11455 total_pkt_bd->total_pkt_bytes = pkt_size;
11458 DP(NETIF_MSG_TX_QUEUED,
11459 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11460 " tcp_flags %x xsum %x seq %u hlen %u\n",
11461 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11462 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11463 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11465 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11468 * Make sure that the BD data is updated before updating the producer
11469 * since FW might read the BD right after the producer is updated.
11470 * This is only applicable for weak-ordered memory model archs such
11471 * as IA-64. The following barrier is also mandatory since FW will
11472 * assumes packets must have BDs.
11476 fp->tx_db.data.prod += nbd;
11478 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11482 fp->tx_bd_prod += nbd;
11484 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11485 netif_tx_stop_queue(txq);
11486 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11487 if we put Tx into XOFF state. */
11489 fp_stat->eth_q_stats.driver_xoff++;
11490 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11491 netif_tx_wake_queue(txq);
11495 return NETDEV_TX_OK;
11498 /* called with rtnl_lock */
11499 static int bnx2x_open(struct net_device *dev)
11501 struct bnx2x *bp = netdev_priv(dev);
11503 netif_carrier_off(dev);
11505 bnx2x_set_power_state(bp, PCI_D0);
11507 return bnx2x_nic_load(bp, LOAD_OPEN);
11510 /* called with rtnl_lock */
11511 static int bnx2x_close(struct net_device *dev)
11513 struct bnx2x *bp = netdev_priv(dev);
11515 /* Unload the driver, release IRQs */
11516 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11517 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11518 if (!CHIP_REV_IS_SLOW(bp))
11519 bnx2x_set_power_state(bp, PCI_D3hot);
11524 /* called with netif_tx_lock from dev_mcast.c */
11525 static void bnx2x_set_rx_mode(struct net_device *dev)
11527 struct bnx2x *bp = netdev_priv(dev);
11528 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11529 int port = BP_PORT(bp);
11531 if (bp->state != BNX2X_STATE_OPEN) {
11532 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11536 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11538 if (dev->flags & IFF_PROMISC)
11539 rx_mode = BNX2X_RX_MODE_PROMISC;
11541 else if ((dev->flags & IFF_ALLMULTI) ||
11542 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11543 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11545 else { /* some multicasts */
11546 if (CHIP_IS_E1(bp)) {
11547 int i, old, offset;
11548 struct dev_mc_list *mclist;
11549 struct mac_configuration_cmd *config =
11550 bnx2x_sp(bp, mcast_config);
11552 for (i = 0, mclist = dev->mc_list;
11553 mclist && (i < dev->mc_count);
11554 i++, mclist = mclist->next) {
11556 config->config_table[i].
11557 cam_entry.msb_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[0]);
11559 config->config_table[i].
11560 cam_entry.middle_mac_addr =
11561 swab16(*(u16 *)&mclist->dmi_addr[2]);
11562 config->config_table[i].
11563 cam_entry.lsb_mac_addr =
11564 swab16(*(u16 *)&mclist->dmi_addr[4]);
11565 config->config_table[i].cam_entry.flags =
11567 config->config_table[i].
11568 target_table_entry.flags = 0;
11569 config->config_table[i].target_table_entry.
11570 clients_bit_vector =
11571 cpu_to_le32(1 << BP_L_ID(bp));
11572 config->config_table[i].
11573 target_table_entry.vlan_id = 0;
11576 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11577 config->config_table[i].
11578 cam_entry.msb_mac_addr,
11579 config->config_table[i].
11580 cam_entry.middle_mac_addr,
11581 config->config_table[i].
11582 cam_entry.lsb_mac_addr);
11584 old = config->hdr.length;
11586 for (; i < old; i++) {
11587 if (CAM_IS_INVALID(config->
11588 config_table[i])) {
11589 /* already invalidated */
11593 CAM_INVALIDATE(config->
11598 if (CHIP_REV_IS_SLOW(bp))
11599 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11601 offset = BNX2X_MAX_MULTICAST*(1 + port);
11603 config->hdr.length = i;
11604 config->hdr.offset = offset;
11605 config->hdr.client_id = bp->fp->cl_id;
11606 config->hdr.reserved1 = 0;
11608 bp->set_mac_pending++;
11611 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11612 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11613 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11616 /* Accept one or more multicasts */
11617 struct dev_mc_list *mclist;
11618 u32 mc_filter[MC_HASH_SIZE];
11619 u32 crc, bit, regidx;
11622 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11624 for (i = 0, mclist = dev->mc_list;
11625 mclist && (i < dev->mc_count);
11626 i++, mclist = mclist->next) {
11628 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11631 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11632 bit = (crc >> 24) & 0xff;
11635 mc_filter[regidx] |= (1 << bit);
11638 for (i = 0; i < MC_HASH_SIZE; i++)
11639 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11644 bp->rx_mode = rx_mode;
11645 bnx2x_set_storm_rx_mode(bp);
11648 /* called with rtnl_lock */
11649 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11651 struct sockaddr *addr = p;
11652 struct bnx2x *bp = netdev_priv(dev);
11654 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11657 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11658 if (netif_running(dev)) {
11659 if (CHIP_IS_E1(bp))
11660 bnx2x_set_eth_mac_addr_e1(bp, 1);
11662 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11668 /* called with rtnl_lock */
11669 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11670 int devad, u16 addr)
11672 struct bnx2x *bp = netdev_priv(netdev);
11675 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11677 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11678 prtad, devad, addr);
11680 if (prtad != bp->mdio.prtad) {
11681 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11682 prtad, bp->mdio.prtad);
11686 /* The HW expects different devad if CL22 is used */
11687 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11689 bnx2x_acquire_phy_lock(bp);
11690 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11691 devad, addr, &value);
11692 bnx2x_release_phy_lock(bp);
11693 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11700 /* called with rtnl_lock */
11701 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11702 u16 addr, u16 value)
11704 struct bnx2x *bp = netdev_priv(netdev);
11705 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11708 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11709 " value 0x%x\n", prtad, devad, addr, value);
11711 if (prtad != bp->mdio.prtad) {
11712 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11713 prtad, bp->mdio.prtad);
11717 /* The HW expects different devad if CL22 is used */
11718 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11720 bnx2x_acquire_phy_lock(bp);
11721 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11722 devad, addr, value);
11723 bnx2x_release_phy_lock(bp);
11727 /* called with rtnl_lock */
11728 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11730 struct bnx2x *bp = netdev_priv(dev);
11731 struct mii_ioctl_data *mdio = if_mii(ifr);
11733 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11734 mdio->phy_id, mdio->reg_num, mdio->val_in);
11736 if (!netif_running(dev))
11739 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11742 /* called with rtnl_lock */
11743 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11745 struct bnx2x *bp = netdev_priv(dev);
11748 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11749 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11752 /* This does not race with packet allocation
11753 * because the actual alloc size is
11754 * only updated as part of load
11756 dev->mtu = new_mtu;
11758 if (netif_running(dev)) {
11759 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11760 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11766 static void bnx2x_tx_timeout(struct net_device *dev)
11768 struct bnx2x *bp = netdev_priv(dev);
11770 #ifdef BNX2X_STOP_ON_ERROR
11774 /* This allows the netif to be shutdown gracefully before resetting */
11775 schedule_work(&bp->reset_task);
11779 /* called with rtnl_lock */
11780 static void bnx2x_vlan_rx_register(struct net_device *dev,
11781 struct vlan_group *vlgrp)
11783 struct bnx2x *bp = netdev_priv(dev);
11787 /* Set flags according to the required capabilities */
11788 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11790 if (dev->features & NETIF_F_HW_VLAN_TX)
11791 bp->flags |= HW_VLAN_TX_FLAG;
11793 if (dev->features & NETIF_F_HW_VLAN_RX)
11794 bp->flags |= HW_VLAN_RX_FLAG;
11796 if (netif_running(dev))
11797 bnx2x_set_client_config(bp);
11802 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11803 static void poll_bnx2x(struct net_device *dev)
11805 struct bnx2x *bp = netdev_priv(dev);
11807 disable_irq(bp->pdev->irq);
11808 bnx2x_interrupt(bp->pdev->irq, dev);
11809 enable_irq(bp->pdev->irq);
11813 static const struct net_device_ops bnx2x_netdev_ops = {
11814 .ndo_open = bnx2x_open,
11815 .ndo_stop = bnx2x_close,
11816 .ndo_start_xmit = bnx2x_start_xmit,
11817 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11818 .ndo_set_mac_address = bnx2x_change_mac_addr,
11819 .ndo_validate_addr = eth_validate_addr,
11820 .ndo_do_ioctl = bnx2x_ioctl,
11821 .ndo_change_mtu = bnx2x_change_mtu,
11822 .ndo_tx_timeout = bnx2x_tx_timeout,
11824 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11826 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11827 .ndo_poll_controller = poll_bnx2x,
11831 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11832 struct net_device *dev)
11837 SET_NETDEV_DEV(dev, &pdev->dev);
11838 bp = netdev_priv(dev);
11843 bp->func = PCI_FUNC(pdev->devfn);
11845 rc = pci_enable_device(pdev);
11847 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11851 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11852 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11855 goto err_out_disable;
11858 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11859 printk(KERN_ERR PFX "Cannot find second PCI device"
11860 " base address, aborting\n");
11862 goto err_out_disable;
11865 if (atomic_read(&pdev->enable_cnt) == 1) {
11866 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11868 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11870 goto err_out_disable;
11873 pci_set_master(pdev);
11874 pci_save_state(pdev);
11877 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11878 if (bp->pm_cap == 0) {
11879 printk(KERN_ERR PFX "Cannot find power management"
11880 " capability, aborting\n");
11882 goto err_out_release;
11885 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11886 if (bp->pcie_cap == 0) {
11887 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11890 goto err_out_release;
11893 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11894 bp->flags |= USING_DAC_FLAG;
11895 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11896 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11897 " failed, aborting\n");
11899 goto err_out_release;
11902 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11903 printk(KERN_ERR PFX "System does not support DMA,"
11906 goto err_out_release;
11909 dev->mem_start = pci_resource_start(pdev, 0);
11910 dev->base_addr = dev->mem_start;
11911 dev->mem_end = pci_resource_end(pdev, 0);
11913 dev->irq = pdev->irq;
11915 bp->regview = pci_ioremap_bar(pdev, 0);
11916 if (!bp->regview) {
11917 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11919 goto err_out_release;
11922 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11923 min_t(u64, BNX2X_DB_SIZE,
11924 pci_resource_len(pdev, 2)));
11925 if (!bp->doorbells) {
11926 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11928 goto err_out_unmap;
11931 bnx2x_set_power_state(bp, PCI_D0);
11933 /* clean indirect addresses */
11934 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11935 PCICFG_VENDOR_ID_OFFSET);
11936 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11937 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11938 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11939 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11941 dev->watchdog_timeo = TX_TIMEOUT;
11943 dev->netdev_ops = &bnx2x_netdev_ops;
11944 dev->ethtool_ops = &bnx2x_ethtool_ops;
11945 dev->features |= NETIF_F_SG;
11946 dev->features |= NETIF_F_HW_CSUM;
11947 if (bp->flags & USING_DAC_FLAG)
11948 dev->features |= NETIF_F_HIGHDMA;
11949 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950 dev->features |= NETIF_F_TSO6;
11952 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11953 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11955 dev->vlan_features |= NETIF_F_SG;
11956 dev->vlan_features |= NETIF_F_HW_CSUM;
11957 if (bp->flags & USING_DAC_FLAG)
11958 dev->vlan_features |= NETIF_F_HIGHDMA;
11959 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11960 dev->vlan_features |= NETIF_F_TSO6;
11963 /* get_port_hwinfo() will set prtad and mmds properly */
11964 bp->mdio.prtad = MDIO_PRTAD_NONE;
11966 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11967 bp->mdio.dev = dev;
11968 bp->mdio.mdio_read = bnx2x_mdio_read;
11969 bp->mdio.mdio_write = bnx2x_mdio_write;
11975 iounmap(bp->regview);
11976 bp->regview = NULL;
11978 if (bp->doorbells) {
11979 iounmap(bp->doorbells);
11980 bp->doorbells = NULL;
11984 if (atomic_read(&pdev->enable_cnt) == 1)
11985 pci_release_regions(pdev);
11988 pci_disable_device(pdev);
11989 pci_set_drvdata(pdev, NULL);
11995 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11996 int *width, int *speed)
11998 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12000 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12002 /* return value of 1=2.5GHz 2=5GHz */
12003 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12006 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12008 const struct firmware *firmware = bp->firmware;
12009 struct bnx2x_fw_file_hdr *fw_hdr;
12010 struct bnx2x_fw_file_section *sections;
12011 u32 offset, len, num_ops;
12016 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12019 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12020 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12022 /* Make sure none of the offsets and sizes make us read beyond
12023 * the end of the firmware data */
12024 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12025 offset = be32_to_cpu(sections[i].offset);
12026 len = be32_to_cpu(sections[i].len);
12027 if (offset + len > firmware->size) {
12028 printk(KERN_ERR PFX "Section %d length is out of "
12034 /* Likewise for the init_ops offsets */
12035 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12036 ops_offsets = (u16 *)(firmware->data + offset);
12037 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12039 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12040 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12041 printk(KERN_ERR PFX "Section offset %d is out of "
12047 /* Check FW version */
12048 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12049 fw_ver = firmware->data + offset;
12050 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12051 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12052 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12053 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12054 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12055 " Should be %d.%d.%d.%d\n",
12056 fw_ver[0], fw_ver[1], fw_ver[2],
12057 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12058 BCM_5710_FW_MINOR_VERSION,
12059 BCM_5710_FW_REVISION_VERSION,
12060 BCM_5710_FW_ENGINEERING_VERSION);
12067 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12069 const __be32 *source = (const __be32 *)_source;
12070 u32 *target = (u32 *)_target;
12073 for (i = 0; i < n/4; i++)
12074 target[i] = be32_to_cpu(source[i]);
12078 Ops array is stored in the following format:
12079 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12081 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12083 const __be32 *source = (const __be32 *)_source;
12084 struct raw_op *target = (struct raw_op *)_target;
12087 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12088 tmp = be32_to_cpu(source[j]);
12089 target[i].op = (tmp >> 24) & 0xff;
12090 target[i].offset = tmp & 0xffffff;
12091 target[i].raw_data = be32_to_cpu(source[j+1]);
12095 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12097 const __be16 *source = (const __be16 *)_source;
12098 u16 *target = (u16 *)_target;
12101 for (i = 0; i < n/2; i++)
12102 target[i] = be16_to_cpu(source[i]);
12105 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12107 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12108 bp->arr = kmalloc(len, GFP_KERNEL); \
12110 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12111 "for "#arr"\n", len); \
12114 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12115 (u8 *)bp->arr, len); \
12118 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12120 char fw_file_name[40] = {0};
12121 struct bnx2x_fw_file_hdr *fw_hdr;
12124 /* Create a FW file name */
12125 if (CHIP_IS_E1(bp))
12126 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12128 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12130 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12131 BCM_5710_FW_MAJOR_VERSION,
12132 BCM_5710_FW_MINOR_VERSION,
12133 BCM_5710_FW_REVISION_VERSION,
12134 BCM_5710_FW_ENGINEERING_VERSION);
12136 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12138 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12140 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12142 goto request_firmware_exit;
12145 rc = bnx2x_check_firmware(bp);
12147 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12148 goto request_firmware_exit;
12151 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12153 /* Initialize the pointers to the init arrays */
12155 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12158 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12161 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12164 /* STORMs firmware */
12165 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12167 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12169 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12171 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12172 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12173 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12174 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12175 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12176 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12177 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12178 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12179 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12180 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12184 init_offsets_alloc_err:
12185 kfree(bp->init_ops);
12186 init_ops_alloc_err:
12187 kfree(bp->init_data);
12188 request_firmware_exit:
12189 release_firmware(bp->firmware);
12195 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12196 const struct pci_device_id *ent)
12198 struct net_device *dev = NULL;
12200 int pcie_width, pcie_speed;
12203 /* dev zeroed in init_etherdev */
12204 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12206 printk(KERN_ERR PFX "Cannot allocate net device\n");
12210 bp = netdev_priv(dev);
12211 bp->msglevel = debug;
12213 pci_set_drvdata(pdev, dev);
12215 rc = bnx2x_init_dev(pdev, dev);
12221 rc = bnx2x_init_bp(bp);
12223 goto init_one_exit;
12225 /* Set init arrays */
12226 rc = bnx2x_init_firmware(bp, &pdev->dev);
12228 printk(KERN_ERR PFX "Error loading firmware\n");
12229 goto init_one_exit;
12232 rc = register_netdev(dev);
12234 dev_err(&pdev->dev, "Cannot register net device\n");
12235 goto init_one_exit;
12238 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12239 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12240 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12241 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12242 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12243 dev->base_addr, bp->pdev->irq);
12244 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12250 iounmap(bp->regview);
12253 iounmap(bp->doorbells);
12257 if (atomic_read(&pdev->enable_cnt) == 1)
12258 pci_release_regions(pdev);
12260 pci_disable_device(pdev);
12261 pci_set_drvdata(pdev, NULL);
12266 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12268 struct net_device *dev = pci_get_drvdata(pdev);
12272 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12275 bp = netdev_priv(dev);
12277 unregister_netdev(dev);
12279 kfree(bp->init_ops_offsets);
12280 kfree(bp->init_ops);
12281 kfree(bp->init_data);
12282 release_firmware(bp->firmware);
12285 iounmap(bp->regview);
12288 iounmap(bp->doorbells);
12292 if (atomic_read(&pdev->enable_cnt) == 1)
12293 pci_release_regions(pdev);
12295 pci_disable_device(pdev);
12296 pci_set_drvdata(pdev, NULL);
12299 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12301 struct net_device *dev = pci_get_drvdata(pdev);
12305 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12308 bp = netdev_priv(dev);
12312 pci_save_state(pdev);
12314 if (!netif_running(dev)) {
12319 netif_device_detach(dev);
12321 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12323 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12330 static int bnx2x_resume(struct pci_dev *pdev)
12332 struct net_device *dev = pci_get_drvdata(pdev);
12337 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12340 bp = netdev_priv(dev);
12344 pci_restore_state(pdev);
12346 if (!netif_running(dev)) {
12351 bnx2x_set_power_state(bp, PCI_D0);
12352 netif_device_attach(dev);
12354 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12361 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12365 bp->state = BNX2X_STATE_ERROR;
12367 bp->rx_mode = BNX2X_RX_MODE_NONE;
12369 bnx2x_netif_stop(bp, 0);
12371 del_timer_sync(&bp->timer);
12372 bp->stats_state = STATS_STATE_DISABLED;
12373 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12376 bnx2x_free_irq(bp);
12378 if (CHIP_IS_E1(bp)) {
12379 struct mac_configuration_cmd *config =
12380 bnx2x_sp(bp, mcast_config);
12382 for (i = 0; i < config->hdr.length; i++)
12383 CAM_INVALIDATE(config->config_table[i]);
12386 /* Free SKBs, SGEs, TPA pool and driver internals */
12387 bnx2x_free_skbs(bp);
12388 for_each_rx_queue(bp, i)
12389 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12390 for_each_rx_queue(bp, i)
12391 netif_napi_del(&bnx2x_fp(bp, i, napi));
12392 bnx2x_free_mem(bp);
12394 bp->state = BNX2X_STATE_CLOSED;
12396 netif_carrier_off(bp->dev);
12401 static void bnx2x_eeh_recover(struct bnx2x *bp)
12405 mutex_init(&bp->port.phy_mutex);
12407 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12408 bp->link_params.shmem_base = bp->common.shmem_base;
12409 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12411 if (!bp->common.shmem_base ||
12412 (bp->common.shmem_base < 0xA0000) ||
12413 (bp->common.shmem_base >= 0xC0000)) {
12414 BNX2X_DEV_INFO("MCP not active\n");
12415 bp->flags |= NO_MCP_FLAG;
12419 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12420 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12421 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12422 BNX2X_ERR("BAD MCP validity signature\n");
12424 if (!BP_NOMCP(bp)) {
12425 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12426 & DRV_MSG_SEQ_NUMBER_MASK);
12427 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12432 * bnx2x_io_error_detected - called when PCI error is detected
12433 * @pdev: Pointer to PCI device
12434 * @state: The current pci connection state
12436 * This function is called after a PCI bus error affecting
12437 * this device has been detected.
12439 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12440 pci_channel_state_t state)
12442 struct net_device *dev = pci_get_drvdata(pdev);
12443 struct bnx2x *bp = netdev_priv(dev);
12447 netif_device_detach(dev);
12449 if (state == pci_channel_io_perm_failure) {
12451 return PCI_ERS_RESULT_DISCONNECT;
12454 if (netif_running(dev))
12455 bnx2x_eeh_nic_unload(bp);
12457 pci_disable_device(pdev);
12461 /* Request a slot reset */
12462 return PCI_ERS_RESULT_NEED_RESET;
12466 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12467 * @pdev: Pointer to PCI device
12469 * Restart the card from scratch, as if from a cold-boot.
12471 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12473 struct net_device *dev = pci_get_drvdata(pdev);
12474 struct bnx2x *bp = netdev_priv(dev);
12478 if (pci_enable_device(pdev)) {
12479 dev_err(&pdev->dev,
12480 "Cannot re-enable PCI device after reset\n");
12482 return PCI_ERS_RESULT_DISCONNECT;
12485 pci_set_master(pdev);
12486 pci_restore_state(pdev);
12488 if (netif_running(dev))
12489 bnx2x_set_power_state(bp, PCI_D0);
12493 return PCI_ERS_RESULT_RECOVERED;
12497 * bnx2x_io_resume - called when traffic can start flowing again
12498 * @pdev: Pointer to PCI device
12500 * This callback is called when the error recovery driver tells us that
12501 * its OK to resume normal operation.
12503 static void bnx2x_io_resume(struct pci_dev *pdev)
12505 struct net_device *dev = pci_get_drvdata(pdev);
12506 struct bnx2x *bp = netdev_priv(dev);
12510 bnx2x_eeh_recover(bp);
12512 if (netif_running(dev))
12513 bnx2x_nic_load(bp, LOAD_NORMAL);
12515 netif_device_attach(dev);
12520 static struct pci_error_handlers bnx2x_err_handler = {
12521 .error_detected = bnx2x_io_error_detected,
12522 .slot_reset = bnx2x_io_slot_reset,
12523 .resume = bnx2x_io_resume,
12526 static struct pci_driver bnx2x_pci_driver = {
12527 .name = DRV_MODULE_NAME,
12528 .id_table = bnx2x_pci_tbl,
12529 .probe = bnx2x_init_one,
12530 .remove = __devexit_p(bnx2x_remove_one),
12531 .suspend = bnx2x_suspend,
12532 .resume = bnx2x_resume,
12533 .err_handler = &bnx2x_err_handler,
12536 static int __init bnx2x_init(void)
12540 printk(KERN_INFO "%s", version);
12542 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12543 if (bnx2x_wq == NULL) {
12544 printk(KERN_ERR PFX "Cannot create workqueue\n");
12548 ret = pci_register_driver(&bnx2x_pci_driver);
12550 printk(KERN_ERR PFX "Cannot register driver\n");
12551 destroy_workqueue(bnx2x_wq);
12556 static void __exit bnx2x_cleanup(void)
12558 pci_unregister_driver(&bnx2x_pci_driver);
12560 destroy_workqueue(bnx2x_wq);
12563 module_init(bnx2x_init);
12564 module_exit(bnx2x_cleanup);
12568 /* count denotes the number of new completions we have seen */
12569 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12571 struct eth_spe *spe;
12573 #ifdef BNX2X_STOP_ON_ERROR
12574 if (unlikely(bp->panic))
12578 spin_lock_bh(&bp->spq_lock);
12579 bp->cnic_spq_pending -= count;
12581 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12582 bp->cnic_spq_pending++) {
12584 if (!bp->cnic_kwq_pending)
12587 spe = bnx2x_sp_get_next(bp);
12588 *spe = *bp->cnic_kwq_cons;
12590 bp->cnic_kwq_pending--;
12592 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12593 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12595 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12596 bp->cnic_kwq_cons = bp->cnic_kwq;
12598 bp->cnic_kwq_cons++;
12600 bnx2x_sp_prod_update(bp);
12601 spin_unlock_bh(&bp->spq_lock);
12604 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12605 struct kwqe_16 *kwqes[], u32 count)
12607 struct bnx2x *bp = netdev_priv(dev);
12610 #ifdef BNX2X_STOP_ON_ERROR
12611 if (unlikely(bp->panic))
12615 spin_lock_bh(&bp->spq_lock);
12617 for (i = 0; i < count; i++) {
12618 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12620 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12623 *bp->cnic_kwq_prod = *spe;
12625 bp->cnic_kwq_pending++;
12627 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12628 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12629 spe->data.mac_config_addr.hi,
12630 spe->data.mac_config_addr.lo,
12631 bp->cnic_kwq_pending);
12633 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12634 bp->cnic_kwq_prod = bp->cnic_kwq;
12636 bp->cnic_kwq_prod++;
12639 spin_unlock_bh(&bp->spq_lock);
12641 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12642 bnx2x_cnic_sp_post(bp, 0);
12647 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12649 struct cnic_ops *c_ops;
12652 mutex_lock(&bp->cnic_mutex);
12653 c_ops = bp->cnic_ops;
12655 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12656 mutex_unlock(&bp->cnic_mutex);
12661 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12663 struct cnic_ops *c_ops;
12667 c_ops = rcu_dereference(bp->cnic_ops);
12669 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12676 * for commands that have no data
12678 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12680 struct cnic_ctl_info ctl = {0};
12684 return bnx2x_cnic_ctl_send(bp, &ctl);
12687 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12689 struct cnic_ctl_info ctl;
12691 /* first we tell CNIC and only then we count this as a completion */
12692 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12693 ctl.data.comp.cid = cid;
12695 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12696 bnx2x_cnic_sp_post(bp, 1);
12699 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12701 struct bnx2x *bp = netdev_priv(dev);
12704 switch (ctl->cmd) {
12705 case DRV_CTL_CTXTBL_WR_CMD: {
12706 u32 index = ctl->data.io.offset;
12707 dma_addr_t addr = ctl->data.io.dma_addr;
12709 bnx2x_ilt_wr(bp, index, addr);
12713 case DRV_CTL_COMPLETION_CMD: {
12714 int count = ctl->data.comp.comp_count;
12716 bnx2x_cnic_sp_post(bp, count);
12720 /* rtnl_lock is held. */
12721 case DRV_CTL_START_L2_CMD: {
12722 u32 cli = ctl->data.ring.client_id;
12724 bp->rx_mode_cl_mask |= (1 << cli);
12725 bnx2x_set_storm_rx_mode(bp);
12729 /* rtnl_lock is held. */
12730 case DRV_CTL_STOP_L2_CMD: {
12731 u32 cli = ctl->data.ring.client_id;
12733 bp->rx_mode_cl_mask &= ~(1 << cli);
12734 bnx2x_set_storm_rx_mode(bp);
12739 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12746 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12748 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12750 if (bp->flags & USING_MSIX_FLAG) {
12751 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12752 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12753 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12755 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12756 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12758 cp->irq_arr[0].status_blk = bp->cnic_sb;
12759 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12760 cp->irq_arr[1].status_blk = bp->def_status_blk;
12761 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12766 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12769 struct bnx2x *bp = netdev_priv(dev);
12770 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12775 if (atomic_read(&bp->intr_sem) != 0)
12778 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12782 bp->cnic_kwq_cons = bp->cnic_kwq;
12783 bp->cnic_kwq_prod = bp->cnic_kwq;
12784 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12786 bp->cnic_spq_pending = 0;
12787 bp->cnic_kwq_pending = 0;
12789 bp->cnic_data = data;
12792 cp->drv_state = CNIC_DRV_STATE_REGD;
12794 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12796 bnx2x_setup_cnic_irq_info(bp);
12797 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12798 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12799 rcu_assign_pointer(bp->cnic_ops, ops);
12804 static int bnx2x_unregister_cnic(struct net_device *dev)
12806 struct bnx2x *bp = netdev_priv(dev);
12807 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12809 mutex_lock(&bp->cnic_mutex);
12810 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12811 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12812 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12815 rcu_assign_pointer(bp->cnic_ops, NULL);
12816 mutex_unlock(&bp->cnic_mutex);
12818 kfree(bp->cnic_kwq);
12819 bp->cnic_kwq = NULL;
12824 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12826 struct bnx2x *bp = netdev_priv(dev);
12827 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12829 cp->drv_owner = THIS_MODULE;
12830 cp->chip_id = CHIP_ID(bp);
12831 cp->pdev = bp->pdev;
12832 cp->io_base = bp->regview;
12833 cp->io_base2 = bp->doorbells;
12834 cp->max_kwqe_pending = 8;
12835 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12836 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12837 cp->ctx_tbl_len = CNIC_ILT_LINES;
12838 cp->starting_cid = BCM_CNIC_CID_START;
12839 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12840 cp->drv_ctl = bnx2x_drv_ctl;
12841 cp->drv_register_cnic = bnx2x_register_cnic;
12842 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12846 EXPORT_SYMBOL(bnx2x_cnic_probe);
12848 #endif /* BCM_CNIC */