1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.24"
61 #define DRV_MODULE_RELDATE "2009/01/14"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 /* enable nig attention */
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
740 /* Tell compiler that status block fields can change */
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return ((fp->tx_pkt_prod != tx_cons_sb) ||
744 (fp->tx_pkt_prod != fp->tx_pkt_cons));
747 /* free skb in the packet ring at pos idx
748 * return idx of last bd freed
750 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
753 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754 struct eth_tx_bd *tx_bd;
755 struct sk_buff *skb = tx_buf->skb;
756 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
759 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
763 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764 tx_bd = &fp->tx_desc_ring[bd_idx];
765 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
768 nbd = le16_to_cpu(tx_bd->nbd) - 1;
769 new_cons = nbd + tx_buf->first_bd;
770 #ifdef BNX2X_STOP_ON_ERROR
771 if (nbd > (MAX_SKB_FRAGS + 2)) {
772 BNX2X_ERR("BAD nbd!\n");
777 /* Skip a parse bd and the TSO split header bd
778 since they have no mapping */
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
782 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783 ETH_TX_BD_FLAGS_TCP_CSUM |
784 ETH_TX_BD_FLAGS_SW_LSO)) {
786 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 /* is this a TSO split header bd? */
789 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799 tx_bd = &fp->tx_desc_ring[bd_idx];
800 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
803 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
809 tx_buf->first_bd = 0;
815 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
821 barrier(); /* Tell compiler that prod and cons can change */
822 prod = fp->tx_bd_prod;
823 cons = fp->tx_bd_cons;
825 /* NUM_TX_RINGS = number of "next-page" entries
826 It will be used as a threshold */
827 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
829 #ifdef BNX2X_STOP_ON_ERROR
831 WARN_ON(used > fp->bp->tx_ring_size);
832 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
835 return (s16)(fp->bp->tx_ring_size) - used;
838 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
840 struct bnx2x *bp = fp->bp;
841 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
844 #ifdef BNX2X_STOP_ON_ERROR
845 if (unlikely(bp->panic))
849 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850 sw_cons = fp->tx_pkt_cons;
852 while (sw_cons != hw_cons) {
855 pkt_cons = TX_BD(sw_cons);
857 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
859 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
860 hw_cons, sw_cons, pkt_cons);
862 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
864 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
867 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875 fp->tx_pkt_cons = sw_cons;
876 fp->tx_bd_cons = bd_cons;
878 /* Need to make the tx_cons update visible to start_xmit()
879 * before checking for netif_queue_stopped(). Without the
880 * memory barrier, there is a small possibility that start_xmit()
881 * will miss it and cause the queue to be stopped forever.
885 /* TBD need a thresh? */
886 if (unlikely(netif_queue_stopped(bp->dev))) {
888 netif_tx_lock(bp->dev);
890 if (netif_queue_stopped(bp->dev) &&
891 (bp->state == BNX2X_STATE_OPEN) &&
892 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893 netif_wake_queue(bp->dev);
895 netif_tx_unlock(bp->dev);
900 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901 union eth_rx_cqe *rr_cqe)
903 struct bnx2x *bp = fp->bp;
904 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
908 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
909 FP_IDX(fp), cid, command, bp->state,
910 rr_cqe->ramrod_cqe.ramrod_type);
915 switch (command | fp->state) {
916 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917 BNX2X_FP_STATE_OPENING):
918 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
920 fp->state = BNX2X_FP_STATE_OPEN;
923 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
926 fp->state = BNX2X_FP_STATE_HALTED;
930 BNX2X_ERR("unexpected MC reply (%d) "
931 "fp->state is %x\n", command, fp->state);
934 mb(); /* force bnx2x_wait_ramrod() to see the change */
938 switch (command | bp->state) {
939 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941 bp->state = BNX2X_STATE_OPEN;
944 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947 fp->state = BNX2X_FP_STATE_HALTED;
950 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
952 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
957 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
958 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
959 bp->set_mac_pending = 0;
962 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
967 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
971 mb(); /* force bnx2x_wait_ramrod() to see the change */
974 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975 struct bnx2x_fastpath *fp, u16 index)
977 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978 struct page *page = sw_buf->page;
979 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
981 /* Skip "next page" elements */
985 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
986 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
987 __free_pages(page, PAGES_PER_SGE_SHIFT);
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, int last)
999 for (i = 0; i < last; i++)
1000 bnx2x_free_rx_sge(bp, fp, i);
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004 struct bnx2x_fastpath *fp, u16 index)
1006 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1011 if (unlikely(page == NULL))
1014 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1015 PCI_DMA_FROMDEVICE);
1016 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1017 __free_pages(page, PAGES_PER_SGE_SHIFT);
1021 sw_buf->page = page;
1022 pci_unmap_addr_set(sw_buf, mapping, mapping);
1024 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1033 struct sk_buff *skb;
1034 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1038 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039 if (unlikely(skb == NULL))
1042 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1043 PCI_DMA_FROMDEVICE);
1044 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1050 pci_unmap_addr_set(rx_buf, mapping, mapping);
1052 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1058 /* note that we are not allocating a new skb,
1059 * we are just moving one from cons to prod
1060 * we are not creating a new mapping,
1061 * so there is no need to check for dma_mapping_error().
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064 struct sk_buff *skb, u16 cons, u16 prod)
1066 struct bnx2x *bp = fp->bp;
1067 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1072 pci_dma_sync_single_for_device(bp->pdev,
1073 pci_unmap_addr(cons_rx_buf, mapping),
1074 bp->rx_offset + RX_COPY_THRESH,
1075 PCI_DMA_FROMDEVICE);
1077 prod_rx_buf->skb = cons_rx_buf->skb;
1078 pci_unmap_addr_set(prod_rx_buf, mapping,
1079 pci_unmap_addr(cons_rx_buf, mapping));
1080 *prod_bd = *cons_bd;
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1086 u16 last_max = fp->last_max_sge;
1088 if (SUB_S16(idx, last_max) > 0)
1089 fp->last_max_sge = idx;
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1096 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097 int idx = RX_SGE_CNT * i - 1;
1099 for (j = 0; j < 2; j++) {
1100 SGE_MASK_CLEAR_BIT(fp, idx);
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107 struct eth_fast_path_rx_cqe *fp_cqe)
1109 struct bnx2x *bp = fp->bp;
1110 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1111 le16_to_cpu(fp_cqe->len_on_bd)) >>
1113 u16 last_max, last_elem, first_elem;
1120 /* First mark all used pages */
1121 for (i = 0; i < sge_len; i++)
1122 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1124 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1127 /* Here we assume that the last SGE index is the biggest */
1128 prefetch((void *)(fp->sge_mask));
1129 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1131 last_max = RX_SGE(fp->last_max_sge);
1132 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1135 /* If ring is not full */
1136 if (last_elem + 1 != first_elem)
1139 /* Now update the prod */
1140 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141 if (likely(fp->sge_mask[i]))
1144 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145 delta += RX_SGE_MASK_ELEM_SZ;
1149 fp->rx_sge_prod += delta;
1150 /* clear page-end entries */
1151 bnx2x_clear_sge_mask_next_elems(fp);
1154 DP(NETIF_MSG_RX_STATUS,
1155 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1156 fp->last_max_sge, fp->rx_sge_prod);
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1161 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162 memset(fp->sge_mask, 0xff,
1163 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1165 /* Clear the two last indices in the page to 1:
1166 these are the indices that correspond to the "next" element,
1167 hence will never be indicated and should be removed from
1168 the calculations. */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173 struct sk_buff *skb, u16 cons, u16 prod)
1175 struct bnx2x *bp = fp->bp;
1176 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1181 /* move empty skb from pool to prod and map it */
1182 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1184 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1185 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1187 /* move partial skb from cons to pool (don't unmap yet) */
1188 fp->tpa_pool[queue] = *cons_rx_buf;
1190 /* mark bin state as start - print error if current state != stop */
1191 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1194 fp->tpa_state[queue] = BNX2X_TPA_START;
1196 /* point prod_bd to new skb */
1197 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1200 #ifdef BNX2X_STOP_ON_ERROR
1201 fp->tpa_queue_used |= (1 << queue);
1202 #ifdef __powerpc64__
1203 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1205 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1207 fp->tpa_queue_used);
1211 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212 struct sk_buff *skb,
1213 struct eth_fast_path_rx_cqe *fp_cqe,
1216 struct sw_rx_page *rx_pg, old_rx_pg;
1217 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218 u32 i, frag_len, frag_size, pages;
1222 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1223 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1225 /* This is needed in order to enable forwarding support */
1227 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1228 max(frag_size, (u32)len_on_bd));
1230 #ifdef BNX2X_STOP_ON_ERROR
1232 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1233 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1235 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1236 fp_cqe->pkt_len, len_on_bd);
1242 /* Run through the SGL and compose the fragmented skb */
1243 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1246 /* FW gives the indices of the SGE as if the ring is an array
1247 (meaning that "next" element will consume 2 indices) */
1248 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1249 rx_pg = &fp->rx_page_ring[sge_idx];
1252 /* If we fail to allocate a substitute page, we simply stop
1253 where we are and drop the whole packet */
1254 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255 if (unlikely(err)) {
1256 bp->eth_stats.rx_skb_alloc_failed++;
1260 /* Unmap the page as we r going to pass it to the stack */
1261 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1262 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1264 /* Add one frag and update the appropriate fields in the skb */
1265 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1267 skb->data_len += frag_len;
1268 skb->truesize += frag_len;
1269 skb->len += frag_len;
1271 frag_size -= frag_len;
1277 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1281 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282 struct sk_buff *skb = rx_buf->skb;
1284 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1286 /* Unmap skb in the pool anyway, as we are going to change
1287 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1289 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1290 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1292 if (likely(new_skb)) {
1293 /* fix ip xsum and give it to the stack */
1294 /* (no need to map the new skb) */
1297 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298 PARSING_FLAGS_VLAN);
1299 int is_not_hwaccel_vlan_cqe =
1300 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1304 prefetch(((char *)(skb)) + 128);
1306 #ifdef BNX2X_STOP_ON_ERROR
1307 if (pad + len > bp->rx_buf_size) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad, len, bp->rx_buf_size);
1316 skb_reserve(skb, pad);
1319 skb->protocol = eth_type_trans(skb, bp->dev);
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325 iph = (struct iphdr *)skb->data;
1327 /* If there is no Rx VLAN offloading -
1328 take VLAN tag into an account */
1329 if (unlikely(is_not_hwaccel_vlan_cqe))
1330 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1333 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1336 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337 &cqe->fast_path_cqe, cqe_idx)) {
1339 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340 (!is_not_hwaccel_vlan_cqe))
1341 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342 le16_to_cpu(cqe->fast_path_cqe.
1346 netif_receive_skb(skb);
1348 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349 " - dropping packet!\n");
1354 /* put new skb in bin */
1355 fp->tpa_pool[queue].skb = new_skb;
1358 /* else drop the packet and keep the buffer in the bin */
1359 DP(NETIF_MSG_RX_STATUS,
1360 "Failed to allocate new skb - dropping packet!\n");
1361 bp->eth_stats.rx_skb_alloc_failed++;
1364 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1367 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368 struct bnx2x_fastpath *fp,
1369 u16 bd_prod, u16 rx_comp_prod,
1372 struct tstorm_eth_rx_producers rx_prods = {0};
1375 /* Update producers */
1376 rx_prods.bd_prod = bd_prod;
1377 rx_prods.cqe_prod = rx_comp_prod;
1378 rx_prods.sge_prod = rx_sge_prod;
1381 * Make sure that the BD and SGE data is updated before updating the
1382 * producers since FW might read the BD/SGE right after the producer
1384 * This is only applicable for weak-ordered memory model archs such
1385 * as IA-64. The following barrier is also mandatory since FW will
1386 * assumes BDs must have buffers.
1390 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393 ((u32 *)&rx_prods)[i]);
1395 mmiowb(); /* keep prod updates ordered */
1397 DP(NETIF_MSG_RX_STATUS,
1398 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1399 bd_prod, rx_comp_prod, rx_sge_prod);
1402 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1404 struct bnx2x *bp = fp->bp;
1405 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1406 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1409 #ifdef BNX2X_STOP_ON_ERROR
1410 if (unlikely(bp->panic))
1414 /* CQ "next element" is of the size of the regular element,
1415 that's why it's ok here */
1416 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1420 bd_cons = fp->rx_bd_cons;
1421 bd_prod = fp->rx_bd_prod;
1422 bd_prod_fw = bd_prod;
1423 sw_comp_cons = fp->rx_comp_cons;
1424 sw_comp_prod = fp->rx_comp_prod;
1426 /* Memory barrier necessary as speculative reads of the rx
1427 * buffer can be ahead of the index in the status block
1431 DP(NETIF_MSG_RX_STATUS,
1432 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1433 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1435 while (sw_comp_cons != hw_comp_cons) {
1436 struct sw_rx_bd *rx_buf = NULL;
1437 struct sk_buff *skb;
1438 union eth_rx_cqe *cqe;
1442 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443 bd_prod = RX_BD(bd_prod);
1444 bd_cons = RX_BD(bd_cons);
1446 cqe = &fp->rx_comp_ring[comp_ring_cons];
1447 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1449 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1450 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1451 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1452 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1453 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1456 /* is this a slowpath msg? */
1457 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1458 bnx2x_sp_event(fp, cqe);
1461 /* this is an rx packet */
1463 rx_buf = &fp->rx_buf_ring[bd_cons];
1465 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466 pad = cqe->fast_path_cqe.placement_offset;
1468 /* If CQE is marked both TPA_START and TPA_END
1469 it is a non-TPA CQE */
1470 if ((!fp->disable_tpa) &&
1471 (TPA_TYPE(cqe_fp_flags) !=
1472 (TPA_TYPE_START | TPA_TYPE_END))) {
1473 u16 queue = cqe->fast_path_cqe.queue_index;
1475 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476 DP(NETIF_MSG_RX_STATUS,
1477 "calling tpa_start on queue %d\n",
1480 bnx2x_tpa_start(fp, queue, skb,
1485 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486 DP(NETIF_MSG_RX_STATUS,
1487 "calling tpa_stop on queue %d\n",
1490 if (!BNX2X_RX_SUM_FIX(cqe))
1491 BNX2X_ERR("STOP on none TCP "
1494 /* This is a size of the linear data
1496 len = le16_to_cpu(cqe->fast_path_cqe.
1498 bnx2x_tpa_stop(bp, fp, queue, pad,
1499 len, cqe, comp_ring_cons);
1500 #ifdef BNX2X_STOP_ON_ERROR
1505 bnx2x_update_sge_prod(fp,
1506 &cqe->fast_path_cqe);
1511 pci_dma_sync_single_for_device(bp->pdev,
1512 pci_unmap_addr(rx_buf, mapping),
1513 pad + RX_COPY_THRESH,
1514 PCI_DMA_FROMDEVICE);
1516 prefetch(((char *)(skb)) + 128);
1518 /* is this an error packet? */
1519 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1520 DP(NETIF_MSG_RX_ERR,
1521 "ERROR flags %x rx packet %u\n",
1522 cqe_fp_flags, sw_comp_cons);
1523 bp->eth_stats.rx_err_discard_pkt++;
1527 /* Since we don't have a jumbo ring
1528 * copy small packets if mtu > 1500
1530 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531 (len <= RX_COPY_THRESH)) {
1532 struct sk_buff *new_skb;
1534 new_skb = netdev_alloc_skb(bp->dev,
1536 if (new_skb == NULL) {
1537 DP(NETIF_MSG_RX_ERR,
1538 "ERROR packet dropped "
1539 "because of alloc failure\n");
1540 bp->eth_stats.rx_skb_alloc_failed++;
1545 skb_copy_from_linear_data_offset(skb, pad,
1546 new_skb->data + pad, len);
1547 skb_reserve(new_skb, pad);
1548 skb_put(new_skb, len);
1550 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1554 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555 pci_unmap_single(bp->pdev,
1556 pci_unmap_addr(rx_buf, mapping),
1558 PCI_DMA_FROMDEVICE);
1559 skb_reserve(skb, pad);
1563 DP(NETIF_MSG_RX_ERR,
1564 "ERROR packet dropped because "
1565 "of alloc failure\n");
1566 bp->eth_stats.rx_skb_alloc_failed++;
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1572 skb->protocol = eth_type_trans(skb, bp->dev);
1574 skb->ip_summed = CHECKSUM_NONE;
1576 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1579 bp->eth_stats.hw_csum_err++;
1584 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1585 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586 PARSING_FLAGS_VLAN))
1587 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1591 netif_receive_skb(skb);
1597 bd_cons = NEXT_RX_IDX(bd_cons);
1598 bd_prod = NEXT_RX_IDX(bd_prod);
1599 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1602 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1605 if (rx_pkt == budget)
1609 fp->rx_bd_cons = bd_cons;
1610 fp->rx_bd_prod = bd_prod_fw;
1611 fp->rx_comp_cons = sw_comp_cons;
1612 fp->rx_comp_prod = sw_comp_prod;
1614 /* Update producers */
1615 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1618 fp->rx_pkt += rx_pkt;
1624 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1626 struct bnx2x_fastpath *fp = fp_cookie;
1627 struct bnx2x *bp = fp->bp;
1628 int index = FP_IDX(fp);
1630 /* Return here if interrupt is disabled */
1631 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1636 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637 index, FP_SB_ID(fp));
1638 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1640 #ifdef BNX2X_STOP_ON_ERROR
1641 if (unlikely(bp->panic))
1645 prefetch(fp->rx_cons_sb);
1646 prefetch(fp->tx_cons_sb);
1647 prefetch(&fp->status_blk->c_status_block.status_block_index);
1648 prefetch(&fp->status_blk->u_status_block.status_block_index);
1650 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1655 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1657 struct net_device *dev = dev_instance;
1658 struct bnx2x *bp = netdev_priv(dev);
1659 u16 status = bnx2x_ack_int(bp);
1662 /* Return here if interrupt is shared and it's not for us */
1663 if (unlikely(status == 0)) {
1664 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1667 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1669 /* Return here if interrupt is disabled */
1670 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1675 #ifdef BNX2X_STOP_ON_ERROR
1676 if (unlikely(bp->panic))
1680 mask = 0x2 << bp->fp[0].sb_id;
1681 if (status & mask) {
1682 struct bnx2x_fastpath *fp = &bp->fp[0];
1684 prefetch(fp->rx_cons_sb);
1685 prefetch(fp->tx_cons_sb);
1686 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687 prefetch(&fp->status_blk->u_status_block.status_block_index);
1689 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1695 if (unlikely(status & 0x1)) {
1696 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1704 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1710 /* end of fast path */
1712 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1717 * General service functions
1720 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1723 u32 resource_bit = (1 << resource);
1724 int func = BP_FUNC(bp);
1725 u32 hw_lock_control_reg;
1728 /* Validating that the resource is within range */
1729 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1731 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1737 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1739 hw_lock_control_reg =
1740 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1743 /* Validating that the resource is not already taken */
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
1745 if (lock_status & resource_bit) {
1746 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1747 lock_status, resource_bit);
1751 /* Try for 5 second every 5ms */
1752 for (cnt = 0; cnt < 1000; cnt++) {
1753 /* Try to acquire the lock */
1754 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (lock_status & resource_bit)
1761 DP(NETIF_MSG_HW, "Timeout\n");
1765 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1768 u32 resource_bit = (1 << resource);
1769 int func = BP_FUNC(bp);
1770 u32 hw_lock_control_reg;
1772 /* Validating that the resource is within range */
1773 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1775 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1781 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1783 hw_lock_control_reg =
1784 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1787 /* Validating that the resource is currently taken */
1788 lock_status = REG_RD(bp, hw_lock_control_reg);
1789 if (!(lock_status & resource_bit)) {
1790 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1791 lock_status, resource_bit);
1795 REG_WR(bp, hw_lock_control_reg, resource_bit);
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1804 mutex_lock(&bp->port.phy_mutex);
1806 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1808 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1811 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1813 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1815 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1819 mutex_unlock(&bp->port.phy_mutex);
1822 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1824 /* The GPIO should be swapped if swap register is set and active */
1825 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1826 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1827 int gpio_shift = gpio_num +
1828 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829 u32 gpio_mask = (1 << gpio_shift);
1832 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1837 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 /* read GPIO and mask except the float bits */
1839 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1842 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844 gpio_num, gpio_shift);
1845 /* clear FLOAT and set CLR */
1846 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1850 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852 gpio_num, gpio_shift);
1853 /* clear FLOAT and set SET */
1854 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1858 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1859 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860 gpio_num, gpio_shift);
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1869 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1875 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1877 u32 spio_mask = (1 << spio_num);
1880 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881 (spio_num > MISC_REGISTERS_SPIO_7)) {
1882 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887 /* read SPIO and mask except the float bits */
1888 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1891 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1892 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893 /* clear FLOAT and set CLR */
1894 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1898 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900 /* clear FLOAT and set SET */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1905 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1908 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1915 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1921 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1923 switch (bp->link_vars.ieee_fc &
1924 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1925 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1929 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1930 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1934 bp->port.advertising |= ADVERTISED_Asym_Pause;
1937 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1943 static void bnx2x_link_report(struct bnx2x *bp)
1945 if (bp->link_vars.link_up) {
1946 if (bp->state == BNX2X_STATE_OPEN)
1947 netif_carrier_on(bp->dev);
1948 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1950 printk("%d Mbps ", bp->link_vars.line_speed);
1952 if (bp->link_vars.duplex == DUPLEX_FULL)
1953 printk("full duplex");
1955 printk("half duplex");
1957 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1959 printk(", receive ");
1960 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1961 printk("& transmit ");
1963 printk(", transmit ");
1965 printk("flow control ON");
1969 } else { /* link_down */
1970 netif_carrier_off(bp->dev);
1971 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1975 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1977 if (!BP_NOMCP(bp)) {
1980 /* Initialize link parameters structure variables */
1981 /* It is recommended to turn off RX FC for jumbo frames
1982 for better performance */
1984 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1985 else if (bp->dev->mtu > 5000)
1986 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1988 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1990 bnx2x_acquire_phy_lock(bp);
1991 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1992 bnx2x_release_phy_lock(bp);
1994 bnx2x_calc_fc_adv(bp);
1996 if (bp->link_vars.link_up)
1997 bnx2x_link_report(bp);
2002 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2006 static void bnx2x_link_set(struct bnx2x *bp)
2008 if (!BP_NOMCP(bp)) {
2009 bnx2x_acquire_phy_lock(bp);
2010 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2011 bnx2x_release_phy_lock(bp);
2013 bnx2x_calc_fc_adv(bp);
2015 BNX2X_ERR("Bootcode is missing -not setting link\n");
2018 static void bnx2x__link_reset(struct bnx2x *bp)
2020 if (!BP_NOMCP(bp)) {
2021 bnx2x_acquire_phy_lock(bp);
2022 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2023 bnx2x_release_phy_lock(bp);
2025 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2028 static u8 bnx2x_link_test(struct bnx2x *bp)
2032 bnx2x_acquire_phy_lock(bp);
2033 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2034 bnx2x_release_phy_lock(bp);
2039 /* Calculates the sum of vn_min_rates.
2040 It's needed for further normalizing of the min_rates.
2045 0 - if all the min_rates are 0.
2046 In the later case fairness algorithm should be deactivated.
2047 If not all min_rates are zero then those that are zeroes will
2050 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2052 int i, port = BP_PORT(bp);
2056 for (i = 0; i < E1HVN_MAX; i++) {
2058 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062 /* If min rate is zero - set it to 1 */
2064 vn_min_rate = DEF_MIN_RATE;
2068 wsum += vn_min_rate;
2072 /* ... only if all min rates are zeros - disable FAIRNESS */
2079 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2082 struct cmng_struct_per_port *m_cmng_port)
2084 u32 r_param = port_rate / 8;
2085 int port = BP_PORT(bp);
2088 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2090 /* Enable minmax only if we are in e1hmf mode */
2092 u32 fair_periodic_timeout_usec;
2095 /* Enable rate shaping and fairness */
2096 m_cmng_port->flags.cmng_vn_enable = 1;
2097 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098 m_cmng_port->flags.rate_shaping_enable = 1;
2101 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102 " fairness will be disabled\n");
2104 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105 m_cmng_port->rs_vars.rs_periodic_timeout =
2106 RS_PERIODIC_TIMEOUT_USEC / 4;
2108 /* this is the threshold below which no timer arming will occur
2109 1.25 coefficient is for the threshold to be a little bigger
2110 than the real time, to compensate for timer in-accuracy */
2111 m_cmng_port->rs_vars.rs_threshold =
2112 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2114 /* resolution of fairness timer */
2115 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117 t_fair = T_FAIR_COEF / port_rate;
2119 /* this is the threshold below which we won't arm
2120 the timer anymore */
2121 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2123 /* we multiply by 1e3/8 to get bytes/msec.
2124 We don't want the credits to pass a credit
2125 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126 m_cmng_port->fair_vars.upper_bound =
2127 r_param * t_fair * FAIR_MEM;
2128 /* since each tick is 4 usec */
2129 m_cmng_port->fair_vars.fairness_timeout =
2130 fair_periodic_timeout_usec / 4;
2133 /* Disable rate shaping and fairness */
2134 m_cmng_port->flags.cmng_vn_enable = 0;
2135 m_cmng_port->flags.fairness_enable = 0;
2136 m_cmng_port->flags.rate_shaping_enable = 0;
2139 "Single function mode minmax will be disabled\n");
2142 /* Store it to internal memory */
2143 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146 ((u32 *)(m_cmng_port))[i]);
2149 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150 u32 wsum, u16 port_rate,
2151 struct cmng_struct_per_port *m_cmng_port)
2153 struct rate_shaping_vars_per_vn m_rs_vn;
2154 struct fairness_vars_per_vn m_fair_vn;
2155 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156 u16 vn_min_rate, vn_max_rate;
2159 /* If function is hidden - set min and max to zeroes */
2160 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2165 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168 if current min rate is zero - set it to 1.
2169 This is a requirement of the algorithm. */
2170 if ((vn_min_rate == 0) && wsum)
2171 vn_min_rate = DEF_MIN_RATE;
2172 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2176 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2177 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2179 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2182 /* global vn counter - maximal Mbps for this vn */
2183 m_rs_vn.vn_counter.rate = vn_max_rate;
2185 /* quota - number of bytes transmitted in this period */
2186 m_rs_vn.vn_counter.quota =
2187 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2189 #ifdef BNX2X_PER_PROT_QOS
2190 /* per protocol counter */
2191 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192 /* maximal Mbps for this protocol */
2193 m_rs_vn.protocol_counters[protocol].rate =
2194 protocol_max_rate[protocol];
2195 /* the quota in each timer period -
2196 number of bytes transmitted in this period */
2197 m_rs_vn.protocol_counters[protocol].quota =
2198 (u32)(rs_periodic_timeout_usec *
2200 protocol_counters[protocol].rate/8));
2205 /* credit for each period of the fairness algorithm:
2206 number of bytes in T_FAIR (the vn share the port rate).
2207 wsum should not be larger than 10000, thus
2208 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209 m_fair_vn.vn_credit_delta =
2210 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213 m_fair_vn.vn_credit_delta);
2216 #ifdef BNX2X_PER_PROT_QOS
2218 u32 protocolWeightSum = 0;
2220 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221 protocolWeightSum +=
2222 drvInit.protocol_min_rate[protocol];
2223 /* per protocol counter -
2224 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225 if (protocolWeightSum > 0) {
2227 protocol < NUM_OF_PROTOCOLS; protocol++)
2228 /* credit for each period of the
2229 fairness algorithm - number of bytes in
2230 T_FAIR (the protocol share the vn rate) */
2231 m_fair_vn.protocol_credit_delta[protocol] =
2232 (u32)((vn_min_rate / 8) * t_fair *
2233 protocol_min_rate / protocolWeightSum);
2238 /* Store it to internal memory */
2239 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242 ((u32 *)(&m_rs_vn))[i]);
2244 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_fair_vn))[i]);
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x *bp)
2255 /* Make sure that we are synced with the current statistics */
2256 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2258 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2260 if (bp->link_vars.link_up) {
2262 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263 struct host_port_stats *pstats;
2265 pstats = bnx2x_sp(bp, port_stats);
2266 /* reset old bmac stats */
2267 memset(&(pstats->mac_stx[0]), 0,
2268 sizeof(struct mac_stx));
2270 if ((bp->state == BNX2X_STATE_OPEN) ||
2271 (bp->state == BNX2X_STATE_DISABLED))
2272 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2275 /* indicate link status */
2276 bnx2x_link_report(bp);
2281 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282 if (vn == BP_E1HVN(bp))
2285 func = ((vn << 1) | BP_PORT(bp));
2287 /* Set the attention towards other drivers
2289 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2294 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295 struct cmng_struct_per_port m_cmng_port;
2297 int port = BP_PORT(bp);
2299 /* Init RATE SHAPING and FAIRNESS contexts */
2300 wsum = bnx2x_calc_vn_wsum(bp);
2301 bnx2x_init_port_minmax(bp, (int)wsum,
2302 bp->link_vars.line_speed,
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307 wsum, bp->link_vars.line_speed,
2312 static void bnx2x__link_status_update(struct bnx2x *bp)
2314 if (bp->state != BNX2X_STATE_OPEN)
2317 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2319 if (bp->link_vars.link_up)
2320 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2322 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2324 /* indicate link status */
2325 bnx2x_link_report(bp);
2328 static void bnx2x_pmf_update(struct bnx2x *bp)
2330 int port = BP_PORT(bp);
2334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2336 /* enable nig attention */
2337 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2341 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2349 * General service functions
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354 u32 data_hi, u32 data_lo, int common)
2356 int func = BP_FUNC(bp);
2358 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2360 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2364 #ifdef BNX2X_STOP_ON_ERROR
2365 if (unlikely(bp->panic))
2369 spin_lock_bh(&bp->spq_lock);
2371 if (!bp->spq_left) {
2372 BNX2X_ERR("BUG! SPQ ring full!\n");
2373 spin_unlock_bh(&bp->spq_lock);
2378 /* CID needs port number to be encoded int it */
2379 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2382 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2384 bp->spq_prod_bd->hdr.type |=
2385 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2387 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2392 if (bp->spq_prod_bd == bp->spq_last_bd) {
2393 bp->spq_prod_bd = bp->spq;
2394 bp->spq_prod_idx = 0;
2395 DP(NETIF_MSG_TIMER, "end of spq\n");
2402 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2405 spin_unlock_bh(&bp->spq_lock);
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x *bp)
2417 for (j = 0; j < i*10; j++) {
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421 if (val & (1L << 31))
2426 if (!(val & (1L << 31))) {
2427 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x *bp)
2439 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2444 struct host_def_status_block *def_sb = bp->def_status_blk;
2447 barrier(); /* status block is written to by the chip */
2448 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2452 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2456 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2460 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2464 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2472 * slow path service functions
2475 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2477 int port = BP_PORT(bp);
2478 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479 COMMAND_REG_ATTN_BITS_SET);
2480 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2482 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483 NIG_REG_MASK_INTERRUPT_PORT0;
2486 if (bp->attn_state & asserted)
2487 BNX2X_ERR("IGU ERROR\n");
2489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490 aeu_mask = REG_RD(bp, aeu_addr);
2492 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2493 aeu_mask, asserted);
2494 aeu_mask &= ~(asserted & 0xff);
2495 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2497 REG_WR(bp, aeu_addr, aeu_mask);
2498 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2500 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2501 bp->attn_state |= asserted;
2502 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2504 if (asserted & ATTN_HARD_WIRED_MASK) {
2505 if (asserted & ATTN_NIG_FOR_FUNC) {
2507 bnx2x_acquire_phy_lock(bp);
2509 /* save nig interrupt mask */
2510 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511 REG_WR(bp, nig_int_mask_addr, 0);
2513 bnx2x_link_attn(bp);
2515 /* handle unicore attn? */
2517 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2520 if (asserted & GPIO_2_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2523 if (asserted & GPIO_3_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2526 if (asserted & GPIO_4_FUNC)
2527 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2530 if (asserted & ATTN_GENERAL_ATTN_1) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2534 if (asserted & ATTN_GENERAL_ATTN_2) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2538 if (asserted & ATTN_GENERAL_ATTN_3) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2543 if (asserted & ATTN_GENERAL_ATTN_4) {
2544 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2547 if (asserted & ATTN_GENERAL_ATTN_5) {
2548 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2551 if (asserted & ATTN_GENERAL_ATTN_6) {
2552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2557 } /* if hardwired */
2559 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2561 REG_WR(bp, hc_addr, asserted);
2563 /* now set back the mask */
2564 if (asserted & ATTN_NIG_FOR_FUNC) {
2565 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2566 bnx2x_release_phy_lock(bp);
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2572 int port = BP_PORT(bp);
2576 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2579 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583 REG_WR(bp, reg_offset, val);
2585 BNX2X_ERR("SPIO5 hw attention\n");
2587 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2588 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2589 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590 /* Fan failure attention */
2592 /* The PHY reset is controlled by GPIO 1 */
2593 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2594 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595 /* Low power mode is controlled by GPIO 2 */
2596 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2597 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2598 /* mark the failure */
2599 bp->link_params.ext_phy_config &=
2600 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2601 bp->link_params.ext_phy_config |=
2602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2604 dev_info.port_hw_config[port].
2605 external_phy_config,
2606 bp->link_params.ext_phy_config);
2607 /* log the failure */
2608 printk(KERN_ERR PFX "Fan Failure on Network"
2609 " Controller %s has caused the driver to"
2610 " shutdown the card to prevent permanent"
2611 " damage. Please contact Dell Support for"
2612 " assistance\n", bp->dev->name);
2620 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2622 val = REG_RD(bp, reg_offset);
2623 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624 REG_WR(bp, reg_offset, val);
2626 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627 (attn & HW_INTERRUT_ASSERT_SET_0));
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2636 if (attn & BNX2X_DOORQ_ASSERT) {
2638 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640 /* DORQ discard attention */
2642 BNX2X_ERR("FATAL error from DORQ\n");
2645 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2647 int port = BP_PORT(bp);
2650 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2653 val = REG_RD(bp, reg_offset);
2654 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655 REG_WR(bp, reg_offset, val);
2657 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658 (attn & HW_INTERRUT_ASSERT_SET_1));
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2667 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2669 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671 /* CFC error attention */
2673 BNX2X_ERR("FATAL error from CFC\n");
2676 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2678 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680 /* RQ_USDMDP_FIFO_OVERFLOW */
2682 BNX2X_ERR("FATAL error from PXP\n");
2685 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2687 int port = BP_PORT(bp);
2690 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2693 val = REG_RD(bp, reg_offset);
2694 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695 REG_WR(bp, reg_offset, val);
2697 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698 (attn & HW_INTERRUT_ASSERT_SET_2));
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2707 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2709 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710 int func = BP_FUNC(bp);
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713 bnx2x__link_status_update(bp);
2714 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2716 bnx2x_pmf_update(bp);
2718 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2720 BNX2X_ERR("MC assert!\n");
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2727 } else if (attn & BNX2X_MCP_ASSERT) {
2729 BNX2X_ERR("MCP assert!\n");
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2734 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2737 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2738 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739 if (attn & BNX2X_GRC_TIMEOUT) {
2740 val = CHIP_IS_E1H(bp) ?
2741 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2744 if (attn & BNX2X_GRC_RSV) {
2745 val = CHIP_IS_E1H(bp) ?
2746 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2749 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2755 struct attn_route attn;
2756 struct attn_route group_mask;
2757 int port = BP_PORT(bp);
2763 /* need to take HW lock because MCP or other port might also
2764 try to handle this event */
2765 bnx2x_acquire_alr(bp);
2767 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2771 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2774 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775 if (deasserted & (1 << index)) {
2776 group_mask = bp->attn_group[index];
2778 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779 index, group_mask.sig[0], group_mask.sig[1],
2780 group_mask.sig[2], group_mask.sig[3]);
2782 bnx2x_attn_int_deasserted3(bp,
2783 attn.sig[3] & group_mask.sig[3]);
2784 bnx2x_attn_int_deasserted1(bp,
2785 attn.sig[1] & group_mask.sig[1]);
2786 bnx2x_attn_int_deasserted2(bp,
2787 attn.sig[2] & group_mask.sig[2]);
2788 bnx2x_attn_int_deasserted0(bp,
2789 attn.sig[0] & group_mask.sig[0]);
2791 if ((attn.sig[0] & group_mask.sig[0] &
2792 HW_PRTY_ASSERT_SET_0) ||
2793 (attn.sig[1] & group_mask.sig[1] &
2794 HW_PRTY_ASSERT_SET_1) ||
2795 (attn.sig[2] & group_mask.sig[2] &
2796 HW_PRTY_ASSERT_SET_2))
2797 BNX2X_ERR("FATAL HW block parity attention\n");
2801 bnx2x_release_alr(bp);
2803 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2806 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2808 REG_WR(bp, reg_addr, val);
2810 if (~bp->attn_state & deasserted)
2811 BNX2X_ERR("IGU ERROR\n");
2813 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817 aeu_mask = REG_RD(bp, reg_addr);
2819 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2820 aeu_mask, deasserted);
2821 aeu_mask |= (deasserted & 0xff);
2822 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2824 REG_WR(bp, reg_addr, aeu_mask);
2825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828 bp->attn_state &= ~deasserted;
2829 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2832 static void bnx2x_attn_int(struct bnx2x *bp)
2834 /* read local copy of bits */
2835 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2837 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2839 u32 attn_state = bp->attn_state;
2841 /* look for changed bits */
2842 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2843 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2846 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2847 attn_bits, attn_ack, asserted, deasserted);
2849 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2850 BNX2X_ERR("BAD attention state\n");
2852 /* handle bits that were raised */
2854 bnx2x_attn_int_asserted(bp, asserted);
2857 bnx2x_attn_int_deasserted(bp, deasserted);
2860 static void bnx2x_sp_task(struct work_struct *work)
2862 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2872 status = bnx2x_update_dsb_idx(bp);
2873 /* if (status == 0) */
2874 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2876 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2882 /* CStorm events: query_stats, port delete ramrod */
2884 bp->stats_pending = 0;
2886 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2888 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2890 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2892 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2894 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2899 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2901 struct net_device *dev = dev_instance;
2902 struct bnx2x *bp = netdev_priv(dev);
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2912 #ifdef BNX2X_STOP_ON_ERROR
2913 if (unlikely(bp->panic))
2917 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2922 /* end of slow path */
2926 /****************************************************************************
2928 ****************************************************************************/
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2934 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2940 if (m_lo < s_lo) { \
2942 d_hi = m_hi - s_hi; \
2944 /* we can 'loan' 1 */ \
2946 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2948 /* m_hi <= s_hi */ \
2953 /* m_lo >= s_lo */ \
2954 if (m_hi < s_hi) { \
2958 /* m_hi >= s_hi */ \
2959 d_hi = m_hi - s_hi; \
2960 d_lo = m_lo - s_lo; \
2965 #define UPDATE_STAT64(s, t) \
2967 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972 pstats->mac_stx[1].t##_lo, diff.lo); \
2975 #define UPDATE_STAT64_NIG(s, t) \
2977 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978 diff.lo, new->s##_lo, old->s##_lo); \
2979 ADD_64(estats->t##_hi, diff.hi, \
2980 estats->t##_lo, diff.lo); \
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2987 s_hi += (s_lo < a) ? 1 : 0; \
2990 #define UPDATE_EXTEND_STAT(s) \
2992 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993 pstats->mac_stx[1].s##_lo, \
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2999 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000 old_tclient->s = le32_to_cpu(tclient->s); \
3001 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3012 * General service functions
3015 static inline long bnx2x_hilo(u32 *hiref)
3017 u32 lo = *(hiref + 1);
3018 #if (BITS_PER_LONG == 64)
3021 return HILO_U64(hi, lo);
3028 * Init service functions
3031 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3033 if (!bp->stats_pending) {
3034 struct eth_query_ramrod_data ramrod_data = {0};
3037 ramrod_data.drv_counter = bp->stats_counter++;
3038 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3041 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042 ((u32 *)&ramrod_data)[1],
3043 ((u32 *)&ramrod_data)[0], 0);
3045 /* stats ramrod has it's own slot on the spq */
3047 bp->stats_pending = 1;
3052 static void bnx2x_stats_init(struct bnx2x *bp)
3054 int port = BP_PORT(bp);
3056 bp->executer_idx = 0;
3057 bp->stats_counter = 0;
3061 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3063 bp->port.port_stx = 0;
3064 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3066 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067 bp->port.old_nig_stats.brb_discard =
3068 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3069 bp->port.old_nig_stats.brb_truncate =
3070 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3071 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3076 /* function stats */
3077 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3082 bp->stats_state = STATS_STATE_DISABLED;
3083 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3087 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3089 struct dmae_command *dmae = &bp->stats_dmae;
3090 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3092 *stats_comp = DMAE_COMP_VAL;
3095 if (bp->executer_idx) {
3096 int loader_idx = PMF_DMAE_C(bp);
3098 memset(dmae, 0, sizeof(struct dmae_command));
3100 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102 DMAE_CMD_DST_RESET |
3104 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3106 DMAE_CMD_ENDIANITY_DW_SWAP |
3108 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3110 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114 sizeof(struct dmae_command) *
3115 (loader_idx + 1)) >> 2;
3116 dmae->dst_addr_hi = 0;
3117 dmae->len = sizeof(struct dmae_command) >> 2;
3120 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121 dmae->comp_addr_hi = 0;
3125 bnx2x_post_dmae(bp, dmae, loader_idx);
3127 } else if (bp->func_stx) {
3129 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3133 static int bnx2x_stats_comp(struct bnx2x *bp)
3135 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3139 while (*stats_comp != DMAE_COMP_VAL) {
3141 BNX2X_ERR("timeout waiting for stats finished\n");
3151 * Statistics service functions
3154 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3156 struct dmae_command *dmae;
3158 int loader_idx = PMF_DMAE_C(bp);
3159 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163 BNX2X_ERR("BUG!\n");
3167 bp->executer_idx = 0;
3169 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3171 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3173 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3175 DMAE_CMD_ENDIANITY_DW_SWAP |
3177 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182 dmae->src_addr_lo = bp->port.port_stx >> 2;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186 dmae->len = DMAE_LEN32_RD_MAX;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188 dmae->comp_addr_hi = 0;
3191 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194 dmae->src_addr_hi = 0;
3195 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196 DMAE_LEN32_RD_MAX * 4);
3197 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198 DMAE_LEN32_RD_MAX * 4);
3199 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202 dmae->comp_val = DMAE_COMP_VAL;
3205 bnx2x_hw_stats_post(bp);
3206 bnx2x_stats_comp(bp);
3209 static void bnx2x_port_stats_init(struct bnx2x *bp)
3211 struct dmae_command *dmae;
3212 int port = BP_PORT(bp);
3213 int vn = BP_E1HVN(bp);
3215 int loader_idx = PMF_DMAE_C(bp);
3217 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3220 if (!bp->link_vars.link_up || !bp->port.pmf) {
3221 BNX2X_ERR("BUG!\n");
3225 bp->executer_idx = 0;
3228 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3232 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3234 DMAE_CMD_ENDIANITY_DW_SWAP |
3236 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237 (vn << DMAE_CMD_E1HVN_SHIFT));
3239 if (bp->port.port_stx) {
3241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242 dmae->opcode = opcode;
3243 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3246 dmae->dst_addr_hi = 0;
3247 dmae->len = sizeof(struct host_port_stats) >> 2;
3248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249 dmae->comp_addr_hi = 0;
3255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256 dmae->opcode = opcode;
3257 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259 dmae->dst_addr_lo = bp->func_stx >> 2;
3260 dmae->dst_addr_hi = 0;
3261 dmae->len = sizeof(struct host_func_stats) >> 2;
3262 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263 dmae->comp_addr_hi = 0;
3268 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3272 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3274 DMAE_CMD_ENDIANITY_DW_SWAP |
3276 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277 (vn << DMAE_CMD_E1HVN_SHIFT));
3279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3281 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282 NIG_REG_INGRESS_BMAC0_MEM);
3284 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285 BIGMAC_REGISTER_TX_STAT_GTBYT */
3286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287 dmae->opcode = opcode;
3288 dmae->src_addr_lo = (mac_addr +
3289 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290 dmae->src_addr_hi = 0;
3291 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3299 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302 dmae->opcode = opcode;
3303 dmae->src_addr_lo = (mac_addr +
3304 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305 dmae->src_addr_hi = 0;
3306 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3307 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3309 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3310 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3316 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3318 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3320 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (mac_addr +
3324 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3348 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350 dmae->opcode = opcode;
3351 dmae->src_addr_lo = (mac_addr +
3352 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353 dmae->src_addr_hi = 0;
3354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3355 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3357 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3358 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374 dmae->comp_addr_hi = 0;
3377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378 dmae->opcode = opcode;
3379 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386 dmae->len = (2*sizeof(u32)) >> 2;
3387 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388 dmae->comp_addr_hi = 0;
3391 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 DMAE_CMD_ENDIANITY_DW_SWAP |
3400 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401 (vn << DMAE_CMD_E1HVN_SHIFT));
3402 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409 dmae->len = (2*sizeof(u32)) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
3417 static void bnx2x_func_stats_init(struct bnx2x *bp)
3419 struct dmae_command *dmae = &bp->stats_dmae;
3420 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3423 if (!bp->func_stx) {
3424 BNX2X_ERR("BUG!\n");
3428 bp->executer_idx = 0;
3429 memset(dmae, 0, sizeof(struct dmae_command));
3431 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3437 DMAE_CMD_ENDIANITY_DW_SWAP |
3439 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443 dmae->dst_addr_lo = bp->func_stx >> 2;
3444 dmae->dst_addr_hi = 0;
3445 dmae->len = sizeof(struct host_func_stats) >> 2;
3446 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448 dmae->comp_val = DMAE_COMP_VAL;
3453 static void bnx2x_stats_start(struct bnx2x *bp)
3456 bnx2x_port_stats_init(bp);
3458 else if (bp->func_stx)
3459 bnx2x_func_stats_init(bp);
3461 bnx2x_hw_stats_post(bp);
3462 bnx2x_storm_stats_post(bp);
3465 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3467 bnx2x_stats_comp(bp);
3468 bnx2x_stats_pmf_update(bp);
3469 bnx2x_stats_start(bp);
3472 static void bnx2x_stats_restart(struct bnx2x *bp)
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_start(bp);
3478 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3480 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482 struct regpair diff;
3484 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3490 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3491 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496 UPDATE_STAT64(tx_stat_gt127,
3497 tx_stat_etherstatspkts65octetsto127octets);
3498 UPDATE_STAT64(tx_stat_gt255,
3499 tx_stat_etherstatspkts128octetsto255octets);
3500 UPDATE_STAT64(tx_stat_gt511,
3501 tx_stat_etherstatspkts256octetsto511octets);
3502 UPDATE_STAT64(tx_stat_gt1023,
3503 tx_stat_etherstatspkts512octetsto1023octets);
3504 UPDATE_STAT64(tx_stat_gt1518,
3505 tx_stat_etherstatspkts1024octetsto1522octets);
3506 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510 UPDATE_STAT64(tx_stat_gterr,
3511 tx_stat_dot3statsinternalmactransmiterrors);
3512 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3515 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3517 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3553 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3555 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556 struct nig_stats *old = &(bp->port.old_nig_stats);
3557 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559 struct regpair diff;
3561 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562 bnx2x_bmac_stats_update(bp);
3564 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565 bnx2x_emac_stats_update(bp);
3567 else { /* unreached */
3568 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3572 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573 new->brb_discard - old->brb_discard);
3574 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575 new->brb_truncate - old->brb_truncate);
3577 UPDATE_STAT64_NIG(egress_mac_pkt0,
3578 etherstatspkts1024octetsto1522octets);
3579 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3581 memcpy(old, new, sizeof(struct nig_stats));
3583 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584 sizeof(struct mac_stx));
3585 estats->brb_drop_hi = pstats->brb_drop_hi;
3586 estats->brb_drop_lo = pstats->brb_drop_lo;
3588 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3593 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3595 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3596 int cl_id = BP_CL_ID(bp);
3597 struct tstorm_per_port_stats *tport =
3598 &stats->tstorm_common.port_statistics;
3599 struct tstorm_per_client_stats *tclient =
3600 &stats->tstorm_common.client_statistics[cl_id];
3601 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3602 struct xstorm_per_client_stats *xclient =
3603 &stats->xstorm_common.client_statistics[cl_id];
3604 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3609 /* are storm stats valid? */
3610 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611 bp->stats_counter) {
3612 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613 " tstorm counter (%d) != stats_counter (%d)\n",
3614 tclient->stats_counter, bp->stats_counter);
3617 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620 " xstorm counter (%d) != stats_counter (%d)\n",
3621 xclient->stats_counter, bp->stats_counter);
3625 fstats->total_bytes_received_hi =
3626 fstats->valid_bytes_received_hi =
3627 le32_to_cpu(tclient->total_rcv_bytes.hi);
3628 fstats->total_bytes_received_lo =
3629 fstats->valid_bytes_received_lo =
3630 le32_to_cpu(tclient->total_rcv_bytes.lo);
3632 estats->error_bytes_received_hi =
3633 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634 estats->error_bytes_received_lo =
3635 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636 ADD_64(estats->error_bytes_received_hi,
3637 estats->rx_stat_ifhcinbadoctets_hi,
3638 estats->error_bytes_received_lo,
3639 estats->rx_stat_ifhcinbadoctets_lo);
3641 ADD_64(fstats->total_bytes_received_hi,
3642 estats->error_bytes_received_hi,
3643 fstats->total_bytes_received_lo,
3644 estats->error_bytes_received_lo);
3646 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3647 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3648 total_multicast_packets_received);
3649 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3650 total_broadcast_packets_received);
3652 fstats->total_bytes_transmitted_hi =
3653 le32_to_cpu(xclient->total_sent_bytes.hi);
3654 fstats->total_bytes_transmitted_lo =
3655 le32_to_cpu(xclient->total_sent_bytes.lo);
3657 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658 total_unicast_packets_transmitted);
3659 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660 total_multicast_packets_transmitted);
3661 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662 total_broadcast_packets_transmitted);
3664 memcpy(estats, &(fstats->total_bytes_received_hi),
3665 sizeof(struct host_func_stats) - 2*sizeof(u32));
3667 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669 estats->brb_truncate_discard =
3670 le32_to_cpu(tport->brb_truncate_discard);
3671 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3673 old_tclient->rcv_unicast_bytes.hi =
3674 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3675 old_tclient->rcv_unicast_bytes.lo =
3676 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3677 old_tclient->rcv_broadcast_bytes.hi =
3678 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3679 old_tclient->rcv_broadcast_bytes.lo =
3680 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3681 old_tclient->rcv_multicast_bytes.hi =
3682 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3683 old_tclient->rcv_multicast_bytes.lo =
3684 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3685 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3687 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688 old_tclient->packets_too_big_discard =
3689 le32_to_cpu(tclient->packets_too_big_discard);
3690 estats->no_buff_discard =
3691 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3694 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695 old_xclient->unicast_bytes_sent.hi =
3696 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697 old_xclient->unicast_bytes_sent.lo =
3698 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699 old_xclient->multicast_bytes_sent.hi =
3700 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701 old_xclient->multicast_bytes_sent.lo =
3702 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703 old_xclient->broadcast_bytes_sent.hi =
3704 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705 old_xclient->broadcast_bytes_sent.lo =
3706 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3708 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3713 static void bnx2x_net_stats_update(struct bnx2x *bp)
3715 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717 struct net_device_stats *nstats = &bp->dev->stats;
3719 nstats->rx_packets =
3720 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3724 nstats->tx_packets =
3725 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3729 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3731 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3733 nstats->rx_dropped = old_tclient->checksum_discard +
3734 estats->mac_discard;
3735 nstats->tx_dropped = 0;
3738 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3740 nstats->collisions =
3741 estats->tx_stat_dot3statssinglecollisionframes_lo +
3742 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743 estats->tx_stat_dot3statslatecollisions_lo +
3744 estats->tx_stat_dot3statsexcessivecollisions_lo;
3746 estats->jabber_packets_received =
3747 old_tclient->packets_too_big_discard +
3748 estats->rx_stat_dot3statsframestoolong_lo;
3750 nstats->rx_length_errors =
3751 estats->rx_stat_etherstatsundersizepkts_lo +
3752 estats->jabber_packets_received;
3753 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3754 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3757 nstats->rx_missed_errors = estats->xxoverflow_discard;
3759 nstats->rx_errors = nstats->rx_length_errors +
3760 nstats->rx_over_errors +
3761 nstats->rx_crc_errors +
3762 nstats->rx_frame_errors +
3763 nstats->rx_fifo_errors +
3764 nstats->rx_missed_errors;
3766 nstats->tx_aborted_errors =
3767 estats->tx_stat_dot3statslatecollisions_lo +
3768 estats->tx_stat_dot3statsexcessivecollisions_lo;
3769 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3770 nstats->tx_fifo_errors = 0;
3771 nstats->tx_heartbeat_errors = 0;
3772 nstats->tx_window_errors = 0;
3774 nstats->tx_errors = nstats->tx_aborted_errors +
3775 nstats->tx_carrier_errors;
3778 static void bnx2x_stats_update(struct bnx2x *bp)
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783 if (*stats_comp != DMAE_COMP_VAL)
3787 update = (bnx2x_hw_stats_update(bp) == 0);
3789 update |= (bnx2x_storm_stats_update(bp) == 0);
3792 bnx2x_net_stats_update(bp);
3795 if (bp->stats_pending) {
3796 bp->stats_pending++;
3797 if (bp->stats_pending == 3) {
3798 BNX2X_ERR("stats not updated for 3 times\n");
3805 if (bp->msglevel & NETIF_MSG_TIMER) {
3806 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808 struct net_device_stats *nstats = &bp->dev->stats;
3811 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3814 bnx2x_tx_avail(bp->fp),
3815 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3816 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3818 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819 bp->fp->rx_comp_cons),
3820 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3821 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3822 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3823 estats->driver_xoff, estats->brb_drop_lo);
3824 printk(KERN_DEBUG "tstats: checksum_discard %u "
3825 "packets_too_big_discard %u no_buff_discard %u "
3826 "mac_discard %u mac_filter_discard %u "
3827 "xxovrflow_discard %u brb_truncate_discard %u "
3828 "ttl0_discard %u\n",
3829 old_tclient->checksum_discard,
3830 old_tclient->packets_too_big_discard,
3831 old_tclient->no_buff_discard, estats->mac_discard,
3832 estats->mac_filter_discard, estats->xxoverflow_discard,
3833 estats->brb_truncate_discard,
3834 old_tclient->ttl0_discard);
3836 for_each_queue(bp, i) {
3837 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838 bnx2x_fp(bp, i, tx_pkt),
3839 bnx2x_fp(bp, i, rx_pkt),
3840 bnx2x_fp(bp, i, rx_calls));
3844 bnx2x_hw_stats_post(bp);
3845 bnx2x_storm_stats_post(bp);
3848 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3850 struct dmae_command *dmae;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3855 bp->executer_idx = 0;
3857 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3859 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3861 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3863 DMAE_CMD_ENDIANITY_DW_SWAP |
3865 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3868 if (bp->port.port_stx) {
3870 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3874 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3878 dmae->dst_addr_hi = 0;
3879 dmae->len = sizeof(struct host_port_stats) >> 2;
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3885 dmae->comp_addr_lo =
3886 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887 dmae->comp_addr_hi =
3888 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889 dmae->comp_val = DMAE_COMP_VAL;
3897 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901 dmae->dst_addr_lo = bp->func_stx >> 2;
3902 dmae->dst_addr_hi = 0;
3903 dmae->len = sizeof(struct host_func_stats) >> 2;
3904 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906 dmae->comp_val = DMAE_COMP_VAL;
3912 static void bnx2x_stats_stop(struct bnx2x *bp)
3916 bnx2x_stats_comp(bp);
3919 update = (bnx2x_hw_stats_update(bp) == 0);
3921 update |= (bnx2x_storm_stats_update(bp) == 0);
3924 bnx2x_net_stats_update(bp);
3927 bnx2x_port_stats_stop(bp);
3929 bnx2x_hw_stats_post(bp);
3930 bnx2x_stats_comp(bp);
3934 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3938 static const struct {
3939 void (*action)(struct bnx2x *bp);
3940 enum bnx2x_stats_state next_state;
3941 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3944 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3946 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3950 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3951 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3952 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3953 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3957 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3959 enum bnx2x_stats_state state = bp->stats_state;
3961 bnx2x_stats_stm[state][event].action(bp);
3962 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3964 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966 state, event, bp->stats_state);
3969 static void bnx2x_timer(unsigned long data)
3971 struct bnx2x *bp = (struct bnx2x *) data;
3973 if (!netif_running(bp->dev))
3976 if (atomic_read(&bp->intr_sem) != 0)
3980 struct bnx2x_fastpath *fp = &bp->fp[0];
3983 bnx2x_tx_int(fp, 1000);
3984 rc = bnx2x_rx_int(fp, 1000);
3987 if (!BP_NOMCP(bp)) {
3988 int func = BP_FUNC(bp);
3992 ++bp->fw_drv_pulse_wr_seq;
3993 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994 /* TBD - add SYSTEM_TIME */
3995 drv_pulse = bp->fw_drv_pulse_wr_seq;
3996 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3998 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3999 MCP_PULSE_SEQ_MASK);
4000 /* The delta between driver pulse and mcp response
4001 * should be 1 (before mcp response) or 0 (after mcp response)
4003 if ((drv_pulse != mcp_pulse) &&
4004 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005 /* someone lost a heartbeat... */
4006 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007 drv_pulse, mcp_pulse);
4011 if ((bp->state == BNX2X_STATE_OPEN) ||
4012 (bp->state == BNX2X_STATE_DISABLED))
4013 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4016 mod_timer(&bp->timer, jiffies + bp->current_interval);
4019 /* end of Statistics */
4024 * nic init service functions
4027 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4029 int port = BP_PORT(bp);
4031 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4033 sizeof(struct ustorm_status_block)/4);
4034 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4036 sizeof(struct cstorm_status_block)/4);
4039 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040 dma_addr_t mapping, int sb_id)
4042 int port = BP_PORT(bp);
4043 int func = BP_FUNC(bp);
4048 section = ((u64)mapping) + offsetof(struct host_status_block,
4050 sb->u_status_block.status_block_id = sb_id;
4052 REG_WR(bp, BAR_USTRORM_INTMEM +
4053 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4054 REG_WR(bp, BAR_USTRORM_INTMEM +
4055 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4057 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4060 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061 REG_WR16(bp, BAR_USTRORM_INTMEM +
4062 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4065 section = ((u64)mapping) + offsetof(struct host_status_block,
4067 sb->c_status_block.status_block_id = sb_id;
4069 REG_WR(bp, BAR_CSTRORM_INTMEM +
4070 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4071 REG_WR(bp, BAR_CSTRORM_INTMEM +
4072 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4074 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4077 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4079 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4081 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4084 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4086 int func = BP_FUNC(bp);
4088 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090 sizeof(struct ustorm_def_status_block)/4);
4091 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093 sizeof(struct cstorm_def_status_block)/4);
4094 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096 sizeof(struct xstorm_def_status_block)/4);
4097 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099 sizeof(struct tstorm_def_status_block)/4);
4102 static void bnx2x_init_def_sb(struct bnx2x *bp,
4103 struct host_def_status_block *def_sb,
4104 dma_addr_t mapping, int sb_id)
4106 int port = BP_PORT(bp);
4107 int func = BP_FUNC(bp);
4108 int index, val, reg_offset;
4112 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113 atten_status_block);
4114 def_sb->atten_status_block.status_block_id = sb_id;
4118 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4121 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4122 bp->attn_group[index].sig[0] = REG_RD(bp,
4123 reg_offset + 0x10*index);
4124 bp->attn_group[index].sig[1] = REG_RD(bp,
4125 reg_offset + 0x4 + 0x10*index);
4126 bp->attn_group[index].sig[2] = REG_RD(bp,
4127 reg_offset + 0x8 + 0x10*index);
4128 bp->attn_group[index].sig[3] = REG_RD(bp,
4129 reg_offset + 0xc + 0x10*index);
4132 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133 HC_REG_ATTN_MSG0_ADDR_L);
4135 REG_WR(bp, reg_offset, U64_LO(section));
4136 REG_WR(bp, reg_offset + 4, U64_HI(section));
4138 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4140 val = REG_RD(bp, reg_offset);
4142 REG_WR(bp, reg_offset, val);
4145 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146 u_def_status_block);
4147 def_sb->u_def_status_block.status_block_id = sb_id;
4149 REG_WR(bp, BAR_USTRORM_INTMEM +
4150 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4151 REG_WR(bp, BAR_USTRORM_INTMEM +
4152 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4154 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4155 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4157 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158 REG_WR16(bp, BAR_USTRORM_INTMEM +
4159 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4162 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163 c_def_status_block);
4164 def_sb->c_def_status_block.status_block_id = sb_id;
4166 REG_WR(bp, BAR_CSTRORM_INTMEM +
4167 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4168 REG_WR(bp, BAR_CSTRORM_INTMEM +
4169 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4171 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4174 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4176 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4179 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180 t_def_status_block);
4181 def_sb->t_def_status_block.status_block_id = sb_id;
4183 REG_WR(bp, BAR_TSTRORM_INTMEM +
4184 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4185 REG_WR(bp, BAR_TSTRORM_INTMEM +
4186 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4188 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4189 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4191 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4193 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4196 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197 x_def_status_block);
4198 def_sb->x_def_status_block.status_block_id = sb_id;
4200 REG_WR(bp, BAR_XSTRORM_INTMEM +
4201 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4202 REG_WR(bp, BAR_XSTRORM_INTMEM +
4203 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4205 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4206 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4208 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4210 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4212 bp->stats_pending = 0;
4213 bp->set_mac_pending = 0;
4215 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4218 static void bnx2x_update_coalesce(struct bnx2x *bp)
4220 int port = BP_PORT(bp);
4223 for_each_queue(bp, i) {
4224 int sb_id = bp->fp[i].sb_id;
4226 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227 REG_WR8(bp, BAR_USTRORM_INTMEM +
4228 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229 U_SB_ETH_RX_CQ_INDEX),
4231 REG_WR16(bp, BAR_USTRORM_INTMEM +
4232 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233 U_SB_ETH_RX_CQ_INDEX),
4234 bp->rx_ticks ? 0 : 1);
4235 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237 U_SB_ETH_RX_BD_INDEX),
4238 bp->rx_ticks ? 0 : 1);
4240 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4242 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4243 C_SB_ETH_TX_CQ_INDEX),
4245 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4246 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4247 C_SB_ETH_TX_CQ_INDEX),
4248 bp->tx_ticks ? 0 : 1);
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253 struct bnx2x_fastpath *fp, int last)
4257 for (i = 0; i < last; i++) {
4258 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259 struct sk_buff *skb = rx_buf->skb;
4262 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4266 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267 pci_unmap_single(bp->pdev,
4268 pci_unmap_addr(rx_buf, mapping),
4270 PCI_DMA_FROMDEVICE);
4277 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4279 int func = BP_FUNC(bp);
4280 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281 ETH_MAX_AGGREGATION_QUEUES_E1H;
4282 u16 ring_prod, cqe_ring_prod;
4285 bp->rx_buf_size = bp->dev->mtu;
4286 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287 BCM_RX_ETH_PAYLOAD_ALIGN;
4289 if (bp->flags & TPA_ENABLE_FLAG) {
4291 "rx_buf_size %d effective_mtu %d\n",
4292 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4294 for_each_queue(bp, j) {
4295 struct bnx2x_fastpath *fp = &bp->fp[j];
4297 for (i = 0; i < max_agg_queues; i++) {
4298 fp->tpa_pool[i].skb =
4299 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300 if (!fp->tpa_pool[i].skb) {
4301 BNX2X_ERR("Failed to allocate TPA "
4302 "skb pool for queue[%d] - "
4303 "disabling TPA on this "
4305 bnx2x_free_tpa_pool(bp, fp, i);
4306 fp->disable_tpa = 1;
4309 pci_unmap_addr_set((struct sw_rx_bd *)
4310 &bp->fp->tpa_pool[i],
4312 fp->tpa_state[i] = BNX2X_TPA_STOP;
4317 for_each_queue(bp, j) {
4318 struct bnx2x_fastpath *fp = &bp->fp[j];
4321 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4322 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4324 /* "next page" elements initialization */
4326 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327 struct eth_rx_sge *sge;
4329 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4331 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4334 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4338 bnx2x_init_sge_ring_bit_mask(fp);
4341 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342 struct eth_rx_bd *rx_bd;
4344 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4346 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4347 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4349 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4354 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355 struct eth_rx_cqe_next_page *nextpg;
4357 nextpg = (struct eth_rx_cqe_next_page *)
4358 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4360 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4361 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4363 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4364 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4367 /* Allocate SGEs and initialize the ring elements */
4368 for (i = 0, ring_prod = 0;
4369 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4371 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372 BNX2X_ERR("was only able to allocate "
4374 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375 /* Cleanup already allocated elements */
4376 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4377 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4378 fp->disable_tpa = 1;
4382 ring_prod = NEXT_SGE_IDX(ring_prod);
4384 fp->rx_sge_prod = ring_prod;
4386 /* Allocate BDs and initialize BD ring */
4387 fp->rx_comp_cons = 0;
4388 cqe_ring_prod = ring_prod = 0;
4389 for (i = 0; i < bp->rx_ring_size; i++) {
4390 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391 BNX2X_ERR("was only able to allocate "
4393 bp->eth_stats.rx_skb_alloc_failed++;
4396 ring_prod = NEXT_RX_IDX(ring_prod);
4397 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4398 WARN_ON(ring_prod <= i);
4401 fp->rx_bd_prod = ring_prod;
4402 /* must not have more available CQEs than BDs */
4403 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4405 fp->rx_pkt = fp->rx_calls = 0;
4408 * this will generate an interrupt (to the TSTORM)
4409 * must only be done after chip is initialized
4411 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4416 REG_WR(bp, BAR_USTRORM_INTMEM +
4417 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4418 U64_LO(fp->rx_comp_mapping));
4419 REG_WR(bp, BAR_USTRORM_INTMEM +
4420 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4421 U64_HI(fp->rx_comp_mapping));
4425 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4429 for_each_queue(bp, j) {
4430 struct bnx2x_fastpath *fp = &bp->fp[j];
4432 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433 struct eth_tx_bd *tx_bd =
4434 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4437 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4438 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4440 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4441 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4444 fp->tx_pkt_prod = 0;
4445 fp->tx_pkt_cons = 0;
4448 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4453 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4455 int func = BP_FUNC(bp);
4457 spin_lock_init(&bp->spq_lock);
4459 bp->spq_left = MAX_SPQ_PENDING;
4460 bp->spq_prod_idx = 0;
4461 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462 bp->spq_prod_bd = bp->spq;
4463 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4465 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4466 U64_LO(bp->spq_mapping));
4468 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4469 U64_HI(bp->spq_mapping));
4471 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4475 static void bnx2x_init_context(struct bnx2x *bp)
4479 for_each_queue(bp, i) {
4480 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481 struct bnx2x_fastpath *fp = &bp->fp[i];
4482 u8 sb_id = FP_SB_ID(fp);
4484 context->xstorm_st_context.tx_bd_page_base_hi =
4485 U64_HI(fp->tx_desc_mapping);
4486 context->xstorm_st_context.tx_bd_page_base_lo =
4487 U64_LO(fp->tx_desc_mapping);
4488 context->xstorm_st_context.db_data_addr_hi =
4489 U64_HI(fp->tx_prods_mapping);
4490 context->xstorm_st_context.db_data_addr_lo =
4491 U64_LO(fp->tx_prods_mapping);
4492 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501 context->ustorm_st_context.common.mc_alignment_size =
4502 BCM_RX_ETH_PAYLOAD_ALIGN;
4503 context->ustorm_st_context.common.bd_buff_size =
4505 context->ustorm_st_context.common.bd_page_base_hi =
4506 U64_HI(fp->rx_desc_mapping);
4507 context->ustorm_st_context.common.bd_page_base_lo =
4508 U64_LO(fp->rx_desc_mapping);
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
4514 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515 context->ustorm_st_context.common.sge_page_base_hi =
4516 U64_HI(fp->rx_sge_mapping);
4517 context->ustorm_st_context.common.sge_page_base_lo =
4518 U64_LO(fp->rx_sge_mapping);
4521 context->cstorm_st_context.sb_index_number =
4522 C_SB_ETH_TX_CQ_INDEX;
4523 context->cstorm_st_context.status_block_id = sb_id;
4525 context->xstorm_ag_context.cdu_reserved =
4526 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527 CDU_REGION_NUMBER_XCM_AG,
4528 ETH_CONNECTION_TYPE);
4529 context->ustorm_ag_context.cdu_usage =
4530 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531 CDU_REGION_NUMBER_UCM_AG,
4532 ETH_CONNECTION_TYPE);
4536 static void bnx2x_init_ind_table(struct bnx2x *bp)
4538 int func = BP_FUNC(bp);
4544 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4545 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4546 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4547 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548 BP_CL_ID(bp) + (i % bp->num_queues));
4551 static void bnx2x_set_client_config(struct bnx2x *bp)
4553 struct tstorm_eth_client_config tstorm_client = {0};
4554 int port = BP_PORT(bp);
4557 tstorm_client.mtu = bp->dev->mtu;
4558 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4559 tstorm_client.config_flags =
4560 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4562 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4563 tstorm_client.config_flags |=
4564 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4569 if (bp->flags & TPA_ENABLE_FLAG) {
4570 tstorm_client.max_sges_for_packet =
4571 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4572 tstorm_client.max_sges_for_packet =
4573 ((tstorm_client.max_sges_for_packet +
4574 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575 PAGES_PER_SGE_SHIFT;
4577 tstorm_client.config_flags |=
4578 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4581 for_each_queue(bp, i) {
4582 REG_WR(bp, BAR_TSTRORM_INTMEM +
4583 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4584 ((u32 *)&tstorm_client)[0]);
4585 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4587 ((u32 *)&tstorm_client)[1]);
4590 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4596 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4597 int mode = bp->rx_mode;
4598 int mask = (1 << BP_L_ID(bp));
4599 int func = BP_FUNC(bp);
4602 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4605 case BNX2X_RX_MODE_NONE: /* no Rx */
4606 tstorm_mac_filter.ucast_drop_all = mask;
4607 tstorm_mac_filter.mcast_drop_all = mask;
4608 tstorm_mac_filter.bcast_drop_all = mask;
4610 case BNX2X_RX_MODE_NORMAL:
4611 tstorm_mac_filter.bcast_accept_all = mask;
4613 case BNX2X_RX_MODE_ALLMULTI:
4614 tstorm_mac_filter.mcast_accept_all = mask;
4615 tstorm_mac_filter.bcast_accept_all = mask;
4617 case BNX2X_RX_MODE_PROMISC:
4618 tstorm_mac_filter.ucast_accept_all = mask;
4619 tstorm_mac_filter.mcast_accept_all = mask;
4620 tstorm_mac_filter.bcast_accept_all = mask;
4623 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4627 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628 REG_WR(bp, BAR_TSTRORM_INTMEM +
4629 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4630 ((u32 *)&tstorm_mac_filter)[i]);
4632 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633 ((u32 *)&tstorm_mac_filter)[i]); */
4636 if (mode != BNX2X_RX_MODE_NONE)
4637 bnx2x_set_client_config(bp);
4640 static void bnx2x_init_internal_common(struct bnx2x *bp)
4644 if (bp->flags & TPA_ENABLE_FLAG) {
4645 struct tstorm_eth_tpa_exist tpa = {0};
4649 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4651 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4655 /* Zero this manually as its initialization is
4656 currently missing in the initTool */
4657 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658 REG_WR(bp, BAR_USTRORM_INTMEM +
4659 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4662 static void bnx2x_init_internal_port(struct bnx2x *bp)
4664 int port = BP_PORT(bp);
4666 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4672 static void bnx2x_init_internal_func(struct bnx2x *bp)
4674 struct tstorm_eth_function_common_config tstorm_config = {0};
4675 struct stats_indication_flags stats_flags = {0};
4676 int port = BP_PORT(bp);
4677 int func = BP_FUNC(bp);
4682 tstorm_config.config_flags = MULTI_FLAGS;
4683 tstorm_config.rss_result_mask = MULTI_MASK;
4686 tstorm_config.leading_client_id = BP_L_ID(bp);
4688 REG_WR(bp, BAR_TSTRORM_INTMEM +
4689 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4690 (*(u32 *)&tstorm_config));
4692 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4693 bnx2x_set_storm_rx_mode(bp);
4695 /* reset xstorm per client statistics */
4696 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4701 /* reset tstorm per client statistics */
4702 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4708 /* Init statistics related context */
4709 stats_flags.collect_eth = 1;
4711 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4712 ((u32 *)&stats_flags)[0]);
4713 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4714 ((u32 *)&stats_flags)[1]);
4716 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4717 ((u32 *)&stats_flags)[0]);
4718 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4719 ((u32 *)&stats_flags)[1]);
4721 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4722 ((u32 *)&stats_flags)[0]);
4723 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4724 ((u32 *)&stats_flags)[1]);
4726 REG_WR(bp, BAR_XSTRORM_INTMEM +
4727 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729 REG_WR(bp, BAR_XSTRORM_INTMEM +
4730 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4733 REG_WR(bp, BAR_TSTRORM_INTMEM +
4734 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736 REG_WR(bp, BAR_TSTRORM_INTMEM +
4737 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740 if (CHIP_IS_E1H(bp)) {
4741 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4743 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4747 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4750 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4754 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4756 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757 SGE_PAGE_SIZE * PAGES_PER_SGE),
4759 for_each_queue(bp, i) {
4760 struct bnx2x_fastpath *fp = &bp->fp[i];
4762 REG_WR(bp, BAR_USTRORM_INTMEM +
4763 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764 U64_LO(fp->rx_comp_mapping));
4765 REG_WR(bp, BAR_USTRORM_INTMEM +
4766 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767 U64_HI(fp->rx_comp_mapping));
4769 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4775 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4777 switch (load_code) {
4778 case FW_MSG_CODE_DRV_LOAD_COMMON:
4779 bnx2x_init_internal_common(bp);
4782 case FW_MSG_CODE_DRV_LOAD_PORT:
4783 bnx2x_init_internal_port(bp);
4786 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787 bnx2x_init_internal_func(bp);
4791 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4796 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4800 for_each_queue(bp, i) {
4801 struct bnx2x_fastpath *fp = &bp->fp[i];
4804 fp->state = BNX2X_FP_STATE_CLOSED;
4806 fp->cl_id = BP_L_ID(bp) + i;
4807 fp->sb_id = fp->cl_id;
4809 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4810 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4811 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4813 bnx2x_update_fpsb_idx(fp);
4816 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4818 bnx2x_update_dsb_idx(bp);
4819 bnx2x_update_coalesce(bp);
4820 bnx2x_init_rx_rings(bp);
4821 bnx2x_init_tx_ring(bp);
4822 bnx2x_init_sp_ring(bp);
4823 bnx2x_init_context(bp);
4824 bnx2x_init_internal(bp, load_code);
4825 bnx2x_init_ind_table(bp);
4826 bnx2x_stats_init(bp);
4828 /* At this point, we are ready for interrupts */
4829 atomic_set(&bp->intr_sem, 0);
4831 /* flush all before enabling interrupts */
4835 bnx2x_int_enable(bp);
4838 /* end of nic init */
4841 * gzip service functions
4844 static int bnx2x_gunzip_init(struct bnx2x *bp)
4846 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847 &bp->gunzip_mapping);
4848 if (bp->gunzip_buf == NULL)
4851 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852 if (bp->strm == NULL)
4855 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4857 if (bp->strm->workspace == NULL)
4867 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
4869 bp->gunzip_buf = NULL;
4872 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4873 " un-compression\n", bp->dev->name);
4877 static void bnx2x_gunzip_end(struct bnx2x *bp)
4879 kfree(bp->strm->workspace);
4884 if (bp->gunzip_buf) {
4885 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886 bp->gunzip_mapping);
4887 bp->gunzip_buf = NULL;
4891 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4895 /* check gzip header */
4896 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4903 if (zbuf[3] & FNAME)
4904 while ((zbuf[n++] != 0) && (n < len));
4906 bp->strm->next_in = zbuf + n;
4907 bp->strm->avail_in = len - n;
4908 bp->strm->next_out = bp->gunzip_buf;
4909 bp->strm->avail_out = FW_BUF_SIZE;
4911 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4915 rc = zlib_inflate(bp->strm, Z_FINISH);
4916 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918 bp->dev->name, bp->strm->msg);
4920 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921 if (bp->gunzip_outlen & 0x3)
4922 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923 " gunzip_outlen (%d) not aligned\n",
4924 bp->dev->name, bp->gunzip_outlen);
4925 bp->gunzip_outlen >>= 2;
4927 zlib_inflateEnd(bp->strm);
4929 if (rc == Z_STREAM_END)
4935 /* nic load/unload */
4938 * General service functions
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x *bp)
4946 /* Ethernet source and destination addresses */
4947 wb_write[0] = 0x55555555;
4948 wb_write[1] = 0x55555555;
4949 wb_write[2] = 0x20; /* SOP */
4950 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4952 /* NON-IP protocol */
4953 wb_write[0] = 0x09000000;
4954 wb_write[1] = 0x55555555;
4955 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4956 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4959 /* some of the internal memories
4960 * are not directly readable from the driver
4961 * to test them we send debug packets
4963 static int bnx2x_int_mem_test(struct bnx2x *bp)
4969 if (CHIP_REV_IS_FPGA(bp))
4971 else if (CHIP_REV_IS_EMUL(bp))
4976 DP(NETIF_MSG_HW, "start part1\n");
4978 /* Disable inputs of parser neighbor blocks */
4979 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4982 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4984 /* Write 0 to parser credits for CFC search request */
4985 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4987 /* send Ethernet packet */
4990 /* TODO do i reset NIG statistic? */
4991 /* Wait until NIG register shows 1 packet of size 0x10 */
4992 count = 1000 * factor;
4995 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996 val = *bnx2x_sp(bp, wb_data[0]);
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5008 /* Wait until PRS register shows 1 packet */
5009 count = 1000 * factor;
5011 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5023 /* Reset and init BRB, PRS */
5024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5026 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5028 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5031 DP(NETIF_MSG_HW, "part2\n");
5033 /* Disable inputs of parser neighbor blocks */
5034 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5037 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5039 /* Write 0 to parser credits for CFC search request */
5040 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5042 /* send 10 Ethernet packets */
5043 for (i = 0; i < 10; i++)
5046 /* Wait until NIG register shows 10 + 1
5047 packets of size 11*0x10 = 0xb0 */
5048 count = 1000 * factor;
5051 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052 val = *bnx2x_sp(bp, wb_data[0]);
5060 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5064 /* Wait until PRS register shows 2 packets */
5065 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5067 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5069 /* Write 1 to parser credits for CFC search request */
5070 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5072 /* Wait until PRS register shows 3 packets */
5073 msleep(10 * factor);
5074 /* Wait until NIG register shows 1 packet of size 0x10 */
5075 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5077 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5079 /* clear NIG EOP FIFO */
5080 for (i = 0; i < 11; i++)
5081 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5084 BNX2X_ERR("clear of NIG failed\n");
5088 /* Reset and init BRB, PRS, NIG */
5089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5093 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5097 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5100 /* Enable inputs of parser neighbor blocks */
5101 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5104 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5106 DP(NETIF_MSG_HW, "done\n");
5111 static void enable_blocks_attention(struct bnx2x *bp)
5113 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5122 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5127 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5133 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135 if (CHIP_REV_IS_FPGA(bp))
5136 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5138 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5139 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5142 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5146 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5151 static void bnx2x_reset_common(struct bnx2x *bp)
5154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5159 static int bnx2x_init_common(struct bnx2x *bp)
5163 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5165 bnx2x_reset_common(bp);
5166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5167 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5169 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5170 if (CHIP_IS_E1H(bp))
5171 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5173 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5175 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5177 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5178 if (CHIP_IS_E1(bp)) {
5179 /* enable HW interrupt from PXP on USDM overflow
5180 bit 16 on INT_MASK_0 */
5181 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5184 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5188 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5189 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5190 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5191 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5192 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5194 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5196 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5197 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5198 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5201 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5203 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5204 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5205 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5208 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5209 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5211 /* let the HW do it's magic ... */
5213 /* finish PXP init */
5214 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5216 BNX2X_ERR("PXP2 CFG failed\n");
5219 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5221 BNX2X_ERR("PXP2 RD_INIT failed\n");
5225 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5226 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5228 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5230 /* clean the DMAE memory */
5232 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5234 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5235 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5236 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5237 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5239 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5240 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5241 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5242 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5244 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5245 /* soft reset pulse */
5246 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5247 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5250 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5253 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5254 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5255 if (!CHIP_REV_IS_SLOW(bp)) {
5256 /* enable hw interrupt from doorbell Q */
5257 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5260 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5261 if (CHIP_REV_IS_SLOW(bp)) {
5262 /* fix for emulation and FPGA for no pause */
5263 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5264 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5265 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5266 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5269 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5270 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5272 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5273 if (CHIP_IS_E1H(bp))
5274 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5276 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5277 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5278 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5279 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5281 if (CHIP_IS_E1H(bp)) {
5282 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5283 STORM_INTMEM_SIZE_E1H/2);
5285 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5286 0, STORM_INTMEM_SIZE_E1H/2);
5287 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5288 STORM_INTMEM_SIZE_E1H/2);
5290 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5291 0, STORM_INTMEM_SIZE_E1H/2);
5292 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5293 STORM_INTMEM_SIZE_E1H/2);
5295 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5296 0, STORM_INTMEM_SIZE_E1H/2);
5297 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5298 STORM_INTMEM_SIZE_E1H/2);
5300 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5301 0, STORM_INTMEM_SIZE_E1H/2);
5303 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5304 STORM_INTMEM_SIZE_E1);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1);
5307 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5308 STORM_INTMEM_SIZE_E1);
5309 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5310 STORM_INTMEM_SIZE_E1);
5313 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5314 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5315 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5316 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5319 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5324 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5325 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5326 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5328 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5329 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5330 REG_WR(bp, i, 0xc0cac01a);
5331 /* TODO: replace with something meaningful */
5333 if (CHIP_IS_E1H(bp))
5334 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5335 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5337 if (sizeof(union cdu_context) != 1024)
5338 /* we currently assume that a context is 1024 bytes */
5339 printk(KERN_ALERT PFX "please adjust the size of"
5340 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5342 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5343 val = (4 << 24) + (0 << 12) + 1024;
5344 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5345 if (CHIP_IS_E1(bp)) {
5346 /* !!! fix pxp client crdit until excel update */
5347 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5348 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5351 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5352 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5354 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5355 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5357 /* PXPCS COMMON comes here */
5358 /* Reset PCIE errors for debug */
5359 REG_WR(bp, 0x2814, 0xffffffff);
5360 REG_WR(bp, 0x3820, 0xffffffff);
5362 /* EMAC0 COMMON comes here */
5363 /* EMAC1 COMMON comes here */
5364 /* DBU COMMON comes here */
5365 /* DBG COMMON comes here */
5367 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5368 if (CHIP_IS_E1H(bp)) {
5369 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5370 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5373 if (CHIP_REV_IS_SLOW(bp))
5376 /* finish CFC init */
5377 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5379 BNX2X_ERR("CFC LL_INIT failed\n");
5382 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5384 BNX2X_ERR("CFC AC_INIT failed\n");
5387 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5389 BNX2X_ERR("CFC CAM_INIT failed\n");
5392 REG_WR(bp, CFC_REG_DEBUG0, 0);
5394 /* read NIG statistic
5395 to see if this is our first up since powerup */
5396 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397 val = *bnx2x_sp(bp, wb_data[0]);
5399 /* do internal memory self test */
5400 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5401 BNX2X_ERR("internal mem self test failed\n");
5405 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5406 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5407 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5408 /* Fan failure is indicated by SPIO 5 */
5409 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5410 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5412 /* set to active low mode */
5413 val = REG_RD(bp, MISC_REG_SPIO_INT);
5414 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5415 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5416 REG_WR(bp, MISC_REG_SPIO_INT, val);
5418 /* enable interrupt to signal the IGU */
5419 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5420 val |= (1 << MISC_REGISTERS_SPIO_5);
5421 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5428 /* clear PXP2 attentions */
5429 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5431 enable_blocks_attention(bp);
5433 if (!BP_NOMCP(bp)) {
5434 bnx2x_acquire_phy_lock(bp);
5435 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5436 bnx2x_release_phy_lock(bp);
5438 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5443 static int bnx2x_init_port(struct bnx2x *bp)
5445 int port = BP_PORT(bp);
5448 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5450 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5452 /* Port PXP comes here */
5453 /* Port PXP2 comes here */
5458 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5459 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5460 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5461 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5466 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5467 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5468 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5474 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5475 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5476 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5479 /* Port CMs come here */
5481 /* Port QM comes here */
5483 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5484 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5486 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5487 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5489 /* Port DQ comes here */
5490 /* Port BRB1 comes here */
5491 /* Port PRS comes here */
5492 /* Port TSDM comes here */
5493 /* Port CSDM comes here */
5494 /* Port USDM comes here */
5495 /* Port XSDM comes here */
5496 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5497 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5498 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5499 port ? USEM_PORT1_END : USEM_PORT0_END);
5500 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5501 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5502 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5503 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5504 /* Port UPB comes here */
5505 /* Port XPB comes here */
5507 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5508 port ? PBF_PORT1_END : PBF_PORT0_END);
5510 /* configure PBF to work without PAUSE mtu 9000 */
5511 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5513 /* update threshold */
5514 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5515 /* update init credit */
5516 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5519 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5521 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5524 /* tell the searcher where the T2 table is */
5525 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5527 wb_write[0] = U64_LO(bp->t2_mapping);
5528 wb_write[1] = U64_HI(bp->t2_mapping);
5529 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5530 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5531 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5532 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5534 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5535 /* Port SRCH comes here */
5537 /* Port CDU comes here */
5538 /* Port CFC comes here */
5540 if (CHIP_IS_E1(bp)) {
5541 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5542 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5544 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5545 port ? HC_PORT1_END : HC_PORT0_END);
5547 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5548 MISC_AEU_PORT0_START,
5549 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5550 /* init aeu_mask_attn_func_0/1:
5551 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553 * bits 4-7 are used for "per vn group attention" */
5554 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5555 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5557 /* Port PXPCS comes here */
5558 /* Port EMAC0 comes here */
5559 /* Port EMAC1 comes here */
5560 /* Port DBU comes here */
5561 /* Port DBG comes here */
5562 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5563 port ? NIG_PORT1_END : NIG_PORT0_END);
5565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5567 if (CHIP_IS_E1H(bp)) {
5569 struct cmng_struct_per_port m_cmng_port;
5572 /* 0x2 disable e1hov, 0x1 enable */
5573 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5574 (IS_E1HMF(bp) ? 0x1 : 0x2));
5576 /* Init RATE SHAPING and FAIRNESS contexts.
5577 Initialize as if there is 10G link. */
5578 wsum = bnx2x_calc_vn_wsum(bp);
5579 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582 bnx2x_init_vn_minmax(bp, 2*vn + port,
5583 wsum, 10000, &m_cmng_port);
5586 /* Port MCP comes here */
5587 /* Port DMAE comes here */
5589 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5590 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5591 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5592 /* add SPIO 5 to group 0 */
5593 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5594 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5595 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5602 bnx2x__link_reset(bp);
5607 #define ILT_PER_FUNC (768/2)
5608 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5609 /* the phys address is shifted right 12 bits and has an added
5610 1=valid bit added to the 53rd bit
5611 then since this is a wide register(TM)
5612 we split it into two 32 bit writes
5614 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5616 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5617 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5619 #define CNIC_ILT_LINES 0
5621 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5625 if (CHIP_IS_E1H(bp))
5626 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5628 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5630 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5633 static int bnx2x_init_func(struct bnx2x *bp)
5635 int port = BP_PORT(bp);
5636 int func = BP_FUNC(bp);
5639 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5641 i = FUNC_ILT_BASE(func);
5643 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5644 if (CHIP_IS_E1H(bp)) {
5645 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5646 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5648 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5649 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5652 if (CHIP_IS_E1H(bp)) {
5653 for (i = 0; i < 9; i++)
5654 bnx2x_init_block(bp,
5655 cm_start[func][i], cm_end[func][i]);
5657 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5658 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5661 /* HC init per function */
5662 if (CHIP_IS_E1H(bp)) {
5663 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5665 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5668 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5670 if (CHIP_IS_E1H(bp))
5671 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5673 /* Reset PCIE errors for debug */
5674 REG_WR(bp, 0x2114, 0xffffffff);
5675 REG_WR(bp, 0x2120, 0xffffffff);
5680 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5684 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5685 BP_FUNC(bp), load_code);
5688 mutex_init(&bp->dmae_mutex);
5689 bnx2x_gunzip_init(bp);
5691 switch (load_code) {
5692 case FW_MSG_CODE_DRV_LOAD_COMMON:
5693 rc = bnx2x_init_common(bp);
5698 case FW_MSG_CODE_DRV_LOAD_PORT:
5700 rc = bnx2x_init_port(bp);
5705 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5707 rc = bnx2x_init_func(bp);
5713 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5717 if (!BP_NOMCP(bp)) {
5718 int func = BP_FUNC(bp);
5720 bp->fw_drv_pulse_wr_seq =
5721 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5722 DRV_PULSE_SEQ_MASK);
5723 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5724 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5725 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5729 /* this needs to be done before gunzip end */
5730 bnx2x_zero_def_sb(bp);
5731 for_each_queue(bp, i)
5732 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5735 bnx2x_gunzip_end(bp);
5740 /* send the MCP a request, block until there is a reply */
5741 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5743 int func = BP_FUNC(bp);
5744 u32 seq = ++bp->fw_seq;
5747 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5749 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5750 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5753 /* let the FW do it's magic ... */
5756 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5758 /* Give the FW up to 2 second (200*10ms) */
5759 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5761 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762 cnt*delay, rc, seq);
5764 /* is this a reply to our command? */
5765 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5766 rc &= FW_MSG_CODE_MASK;
5770 BNX2X_ERR("FW failed to respond!\n");
5778 static void bnx2x_free_mem(struct bnx2x *bp)
5781 #define BNX2X_PCI_FREE(x, y, size) \
5784 pci_free_consistent(bp->pdev, size, x, y); \
5790 #define BNX2X_FREE(x) \
5801 for_each_queue(bp, i) {
5804 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5805 bnx2x_fp(bp, i, status_blk_mapping),
5806 sizeof(struct host_status_block) +
5807 sizeof(struct eth_tx_db_data));
5809 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
5813 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5815 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5817 bnx2x_fp(bp, i, rx_desc_mapping),
5818 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5820 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5821 bnx2x_fp(bp, i, rx_comp_mapping),
5822 sizeof(struct eth_fast_path_rx_cqe) *
5826 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5828 bnx2x_fp(bp, i, rx_sge_mapping),
5829 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5831 /* end of fastpath */
5833 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834 sizeof(struct host_def_status_block));
5836 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837 sizeof(struct bnx2x_slowpath));
5840 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5841 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5842 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5843 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5845 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5847 #undef BNX2X_PCI_FREE
5851 static int bnx2x_alloc_mem(struct bnx2x *bp)
5854 #define BNX2X_PCI_ALLOC(x, y, size) \
5856 x = pci_alloc_consistent(bp->pdev, size, y); \
5858 goto alloc_mem_err; \
5859 memset(x, 0, size); \
5862 #define BNX2X_ALLOC(x, size) \
5864 x = vmalloc(size); \
5866 goto alloc_mem_err; \
5867 memset(x, 0, size); \
5873 for_each_queue(bp, i) {
5874 bnx2x_fp(bp, i, bp) = bp;
5877 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5878 &bnx2x_fp(bp, i, status_blk_mapping),
5879 sizeof(struct host_status_block) +
5880 sizeof(struct eth_tx_db_data));
5882 bnx2x_fp(bp, i, hw_tx_prods) =
5883 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5885 bnx2x_fp(bp, i, tx_prods_mapping) =
5886 bnx2x_fp(bp, i, status_blk_mapping) +
5887 sizeof(struct host_status_block);
5889 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5891 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5892 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5893 &bnx2x_fp(bp, i, tx_desc_mapping),
5894 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5896 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5897 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5898 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5899 &bnx2x_fp(bp, i, rx_desc_mapping),
5900 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5902 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5903 &bnx2x_fp(bp, i, rx_comp_mapping),
5904 sizeof(struct eth_fast_path_rx_cqe) *
5908 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5909 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5911 &bnx2x_fp(bp, i, rx_sge_mapping),
5912 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5914 /* end of fastpath */
5916 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5917 sizeof(struct host_def_status_block));
5919 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920 sizeof(struct bnx2x_slowpath));
5923 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5926 for (i = 0; i < 64*1024; i += 64) {
5927 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5928 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5931 /* allocate searcher T2 table
5932 we allocate 1/4 of alloc num for T2
5933 (which is not entered into the ILT) */
5934 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5937 for (i = 0; i < 16*1024; i += 64)
5938 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5940 /* now fixup the last line in the block to point to the next block */
5941 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5943 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5946 /* QM queues (128*MAX_CONN) */
5947 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5950 /* Slow path ring */
5951 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5959 #undef BNX2X_PCI_ALLOC
5963 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5967 for_each_queue(bp, i) {
5968 struct bnx2x_fastpath *fp = &bp->fp[i];
5970 u16 bd_cons = fp->tx_bd_cons;
5971 u16 sw_prod = fp->tx_pkt_prod;
5972 u16 sw_cons = fp->tx_pkt_cons;
5974 while (sw_cons != sw_prod) {
5975 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5981 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5985 for_each_queue(bp, j) {
5986 struct bnx2x_fastpath *fp = &bp->fp[j];
5988 for (i = 0; i < NUM_RX_BD; i++) {
5989 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5990 struct sk_buff *skb = rx_buf->skb;
5995 pci_unmap_single(bp->pdev,
5996 pci_unmap_addr(rx_buf, mapping),
5998 PCI_DMA_FROMDEVICE);
6003 if (!fp->disable_tpa)
6004 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6005 ETH_MAX_AGGREGATION_QUEUES_E1 :
6006 ETH_MAX_AGGREGATION_QUEUES_E1H);
6010 static void bnx2x_free_skbs(struct bnx2x *bp)
6012 bnx2x_free_tx_skbs(bp);
6013 bnx2x_free_rx_skbs(bp);
6016 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6020 free_irq(bp->msix_table[0].vector, bp->dev);
6021 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6022 bp->msix_table[0].vector);
6024 for_each_queue(bp, i) {
6025 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6026 "state %x\n", i, bp->msix_table[i + offset].vector,
6027 bnx2x_fp(bp, i, state));
6029 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6030 BNX2X_ERR("IRQ of fp #%d being freed while "
6031 "state != closed\n", i);
6033 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6037 static void bnx2x_free_irq(struct bnx2x *bp)
6039 if (bp->flags & USING_MSIX_FLAG) {
6040 bnx2x_free_msix_irqs(bp);
6041 pci_disable_msix(bp->pdev);
6042 bp->flags &= ~USING_MSIX_FLAG;
6045 free_irq(bp->pdev->irq, bp->dev);
6048 static int bnx2x_enable_msix(struct bnx2x *bp)
6052 bp->msix_table[0].entry = 0;
6054 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6056 for_each_queue(bp, i) {
6057 int igu_vec = offset + i + BP_L_ID(bp);
6059 bp->msix_table[i + offset].entry = igu_vec;
6060 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6061 "(fastpath #%u)\n", i + offset, igu_vec, i);
6064 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6065 bp->num_queues + offset);
6067 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6070 bp->flags |= USING_MSIX_FLAG;
6075 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6077 int i, rc, offset = 1;
6079 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6080 bp->dev->name, bp->dev);
6082 BNX2X_ERR("request sp irq failed\n");
6086 for_each_queue(bp, i) {
6087 rc = request_irq(bp->msix_table[i + offset].vector,
6088 bnx2x_msix_fp_int, 0,
6089 bp->dev->name, &bp->fp[i]);
6091 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6093 bnx2x_free_msix_irqs(bp);
6097 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6103 static int bnx2x_req_irq(struct bnx2x *bp)
6107 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6108 bp->dev->name, bp->dev);
6110 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6115 static void bnx2x_napi_enable(struct bnx2x *bp)
6119 for_each_queue(bp, i)
6120 napi_enable(&bnx2x_fp(bp, i, napi));
6123 static void bnx2x_napi_disable(struct bnx2x *bp)
6127 for_each_queue(bp, i)
6128 napi_disable(&bnx2x_fp(bp, i, napi));
6131 static void bnx2x_netif_start(struct bnx2x *bp)
6133 if (atomic_dec_and_test(&bp->intr_sem)) {
6134 if (netif_running(bp->dev)) {
6135 if (bp->state == BNX2X_STATE_OPEN)
6136 netif_wake_queue(bp->dev);
6137 bnx2x_napi_enable(bp);
6138 bnx2x_int_enable(bp);
6143 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6145 bnx2x_int_disable_sync(bp, disable_hw);
6146 if (netif_running(bp->dev)) {
6147 bnx2x_napi_disable(bp);
6148 netif_tx_disable(bp->dev);
6149 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6154 * Init service functions
6157 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6159 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6160 int port = BP_PORT(bp);
6163 * unicasts 0-31:port0 32-63:port1
6164 * multicast 64-127:port0 128-191:port1
6166 config->hdr.length_6b = 2;
6167 config->hdr.offset = port ? 32 : 0;
6168 config->hdr.client_id = BP_CL_ID(bp);
6169 config->hdr.reserved1 = 0;
6172 config->config_table[0].cam_entry.msb_mac_addr =
6173 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6174 config->config_table[0].cam_entry.middle_mac_addr =
6175 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6176 config->config_table[0].cam_entry.lsb_mac_addr =
6177 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6178 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6180 config->config_table[0].target_table_entry.flags = 0;
6182 CAM_INVALIDATE(config->config_table[0]);
6183 config->config_table[0].target_table_entry.client_id = 0;
6184 config->config_table[0].target_table_entry.vlan_id = 0;
6186 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6187 (set ? "setting" : "clearing"),
6188 config->config_table[0].cam_entry.msb_mac_addr,
6189 config->config_table[0].cam_entry.middle_mac_addr,
6190 config->config_table[0].cam_entry.lsb_mac_addr);
6193 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6194 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6195 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6196 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6198 config->config_table[1].target_table_entry.flags =
6199 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6201 CAM_INVALIDATE(config->config_table[1]);
6202 config->config_table[1].target_table_entry.client_id = 0;
6203 config->config_table[1].target_table_entry.vlan_id = 0;
6205 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6206 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6207 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6210 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6212 struct mac_configuration_cmd_e1h *config =
6213 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6215 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6216 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6220 /* CAM allocation for E1H
6221 * unicasts: by func number
6222 * multicast: 20+FUNC*20, 20 each
6224 config->hdr.length_6b = 1;
6225 config->hdr.offset = BP_FUNC(bp);
6226 config->hdr.client_id = BP_CL_ID(bp);
6227 config->hdr.reserved1 = 0;
6230 config->config_table[0].msb_mac_addr =
6231 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6232 config->config_table[0].middle_mac_addr =
6233 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6234 config->config_table[0].lsb_mac_addr =
6235 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6236 config->config_table[0].client_id = BP_L_ID(bp);
6237 config->config_table[0].vlan_id = 0;
6238 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6240 config->config_table[0].flags = BP_PORT(bp);
6242 config->config_table[0].flags =
6243 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6245 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6246 (set ? "setting" : "clearing"),
6247 config->config_table[0].msb_mac_addr,
6248 config->config_table[0].middle_mac_addr,
6249 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6251 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6252 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6253 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6256 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6257 int *state_p, int poll)
6259 /* can take a while if any port is running */
6262 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6263 poll ? "polling" : "waiting", state, idx);
6268 bnx2x_rx_int(bp->fp, 10);
6269 /* if index is different from 0
6270 * the reply for some commands will
6271 * be on the non default queue
6274 bnx2x_rx_int(&bp->fp[idx], 10);
6277 mb(); /* state is changed by bnx2x_sp_event() */
6278 if (*state_p == state)
6285 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286 poll ? "polling" : "waiting", state, idx);
6287 #ifdef BNX2X_STOP_ON_ERROR
6294 static int bnx2x_setup_leading(struct bnx2x *bp)
6298 /* reset IGU state */
6299 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6302 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6304 /* Wait for completion */
6305 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6310 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6312 /* reset IGU state */
6313 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6316 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6317 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6319 /* Wait for completion */
6320 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6321 &(bp->fp[index].state), 0);
6324 static int bnx2x_poll(struct napi_struct *napi, int budget);
6325 static void bnx2x_set_rx_mode(struct net_device *dev);
6327 /* must be called with rtnl_lock */
6328 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6332 #ifdef BNX2X_STOP_ON_ERROR
6333 if (unlikely(bp->panic))
6337 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6339 /* Send LOAD_REQUEST command to MCP
6340 Returns the type of LOAD command:
6341 if it is the first port to be initialized
6342 common blocks should be initialized, otherwise - not
6344 if (!BP_NOMCP(bp)) {
6345 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6347 BNX2X_ERR("MCP response failure, aborting\n");
6350 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6351 return -EBUSY; /* other port in diagnostic mode */
6354 int port = BP_PORT(bp);
6356 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6357 load_count[0], load_count[1], load_count[2]);
6359 load_count[1 + port]++;
6360 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6361 load_count[0], load_count[1], load_count[2]);
6362 if (load_count[0] == 1)
6363 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6364 else if (load_count[1 + port] == 1)
6365 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6367 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6370 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6371 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6375 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6377 /* if we can't use MSI-X we only need one fp,
6378 * so try to enable MSI-X with the requested number of fp's
6379 * and fallback to inta with one fp
6385 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6386 /* user requested number */
6387 bp->num_queues = use_multi;
6390 bp->num_queues = min_t(u32, num_online_cpus(),
6395 if (bnx2x_enable_msix(bp)) {
6396 /* failed to enable MSI-X */
6399 BNX2X_ERR("Multi requested but failed"
6400 " to enable MSI-X\n");
6404 "set number of queues to %d\n", bp->num_queues);
6406 if (bnx2x_alloc_mem(bp))
6409 for_each_queue(bp, i)
6410 bnx2x_fp(bp, i, disable_tpa) =
6411 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6413 if (bp->flags & USING_MSIX_FLAG) {
6414 rc = bnx2x_req_msix_irqs(bp);
6416 pci_disable_msix(bp->pdev);
6421 rc = bnx2x_req_irq(bp);
6423 BNX2X_ERR("IRQ request failed, aborting\n");
6428 for_each_queue(bp, i)
6429 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6433 rc = bnx2x_init_hw(bp, load_code);
6435 BNX2X_ERR("HW init failed, aborting\n");
6436 goto load_int_disable;
6439 /* Setup NIC internals and enable interrupts */
6440 bnx2x_nic_init(bp, load_code);
6442 /* Send LOAD_DONE command to MCP */
6443 if (!BP_NOMCP(bp)) {
6444 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6446 BNX2X_ERR("MCP response failure, aborting\n");
6448 goto load_rings_free;
6452 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6454 rc = bnx2x_setup_leading(bp);
6456 BNX2X_ERR("Setup leading failed!\n");
6457 goto load_netif_stop;
6460 if (CHIP_IS_E1H(bp))
6461 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6462 BNX2X_ERR("!!! mf_cfg function disabled\n");
6463 bp->state = BNX2X_STATE_DISABLED;
6466 if (bp->state == BNX2X_STATE_OPEN)
6467 for_each_nondefault_queue(bp, i) {
6468 rc = bnx2x_setup_multi(bp, i);
6470 goto load_netif_stop;
6474 bnx2x_set_mac_addr_e1(bp, 1);
6476 bnx2x_set_mac_addr_e1h(bp, 1);
6479 bnx2x_initial_phy_init(bp);
6481 /* Start fast path */
6482 switch (load_mode) {
6484 /* Tx queue should be only reenabled */
6485 netif_wake_queue(bp->dev);
6486 bnx2x_set_rx_mode(bp->dev);
6490 netif_start_queue(bp->dev);
6491 bnx2x_set_rx_mode(bp->dev);
6492 if (bp->flags & USING_MSIX_FLAG)
6493 printk(KERN_INFO PFX "%s: using MSI-X\n",
6498 bnx2x_set_rx_mode(bp->dev);
6499 bp->state = BNX2X_STATE_DIAG;
6507 bnx2x__link_status_update(bp);
6509 /* start the timer */
6510 mod_timer(&bp->timer, jiffies + bp->current_interval);
6516 bnx2x_napi_disable(bp);
6518 /* Free SKBs, SGEs, TPA pool and driver internals */
6519 bnx2x_free_skbs(bp);
6520 for_each_queue(bp, i)
6521 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6523 bnx2x_int_disable_sync(bp, 1);
6530 /* TBD we really need to reset the chip
6531 if we want to recover from this */
6535 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6539 /* halt the connection */
6540 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6541 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6543 /* Wait for completion */
6544 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6545 &(bp->fp[index].state), 1);
6546 if (rc) /* timeout */
6549 /* delete cfc entry */
6550 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6552 /* Wait for completion */
6553 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6554 &(bp->fp[index].state), 1);
6558 static int bnx2x_stop_leading(struct bnx2x *bp)
6560 u16 dsb_sp_prod_idx;
6561 /* if the other port is handling traffic,
6562 this can take a lot of time */
6568 /* Send HALT ramrod */
6569 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6570 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6572 /* Wait for completion */
6573 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6574 &(bp->fp[0].state), 1);
6575 if (rc) /* timeout */
6578 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6580 /* Send PORT_DELETE ramrod */
6581 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6583 /* Wait for completion to arrive on default status block
6584 we are going to reset the chip anyway
6585 so there is not much to do if this times out
6587 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6589 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6590 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6591 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6592 #ifdef BNX2X_STOP_ON_ERROR
6602 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6603 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6608 static void bnx2x_reset_func(struct bnx2x *bp)
6610 int port = BP_PORT(bp);
6611 int func = BP_FUNC(bp);
6615 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6616 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6618 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6621 base = FUNC_ILT_BASE(func);
6622 for (i = base; i < base + ILT_PER_FUNC; i++)
6623 bnx2x_ilt_wr(bp, i, 0);
6626 static void bnx2x_reset_port(struct bnx2x *bp)
6628 int port = BP_PORT(bp);
6631 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6633 /* Do not rcv packets to BRB */
6634 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6635 /* Do not direct rcv packets that are not for MCP to the BRB */
6636 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6637 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6640 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6643 /* Check for BRB port occupancy */
6644 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6646 DP(NETIF_MSG_IFDOWN,
6647 "BRB1 is not empty %d blocks are occupied\n", val);
6649 /* TODO: Close Doorbell port? */
6652 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6654 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6655 BP_FUNC(bp), reset_code);
6657 switch (reset_code) {
6658 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6659 bnx2x_reset_port(bp);
6660 bnx2x_reset_func(bp);
6661 bnx2x_reset_common(bp);
6664 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6665 bnx2x_reset_port(bp);
6666 bnx2x_reset_func(bp);
6669 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6670 bnx2x_reset_func(bp);
6674 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6679 /* must be called with rtnl_lock */
6680 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6682 int port = BP_PORT(bp);
6686 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6688 bp->rx_mode = BNX2X_RX_MODE_NONE;
6689 bnx2x_set_storm_rx_mode(bp);
6691 bnx2x_netif_stop(bp, 1);
6692 if (!netif_running(bp->dev))
6693 bnx2x_napi_disable(bp);
6694 del_timer_sync(&bp->timer);
6695 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6696 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6697 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6702 /* Wait until tx fast path tasks complete */
6703 for_each_queue(bp, i) {
6704 struct bnx2x_fastpath *fp = &bp->fp[i];
6708 while (bnx2x_has_tx_work(fp)) {
6710 bnx2x_tx_int(fp, 1000);
6712 BNX2X_ERR("timeout waiting for queue[%d]\n",
6714 #ifdef BNX2X_STOP_ON_ERROR
6726 /* Give HW time to discard old tx messages */
6729 if (CHIP_IS_E1(bp)) {
6730 struct mac_configuration_cmd *config =
6731 bnx2x_sp(bp, mcast_config);
6733 bnx2x_set_mac_addr_e1(bp, 0);
6735 for (i = 0; i < config->hdr.length_6b; i++)
6736 CAM_INVALIDATE(config->config_table[i]);
6738 config->hdr.length_6b = i;
6739 if (CHIP_REV_IS_SLOW(bp))
6740 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6742 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6743 config->hdr.client_id = BP_CL_ID(bp);
6744 config->hdr.reserved1 = 0;
6746 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6747 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6748 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6751 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6753 bnx2x_set_mac_addr_e1h(bp, 0);
6755 for (i = 0; i < MC_HASH_SIZE; i++)
6756 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6759 if (unload_mode == UNLOAD_NORMAL)
6760 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 else if (bp->flags & NO_WOL_FLAG) {
6763 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6764 if (CHIP_IS_E1H(bp))
6765 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6767 } else if (bp->wol) {
6768 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6769 u8 *mac_addr = bp->dev->dev_addr;
6771 /* The mac address is written to entries 1-4 to
6772 preserve entry 0 which is used by the PMF */
6773 u8 entry = (BP_E1HVN(bp) + 1)*8;
6775 val = (mac_addr[0] << 8) | mac_addr[1];
6776 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6778 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6779 (mac_addr[4] << 8) | mac_addr[5];
6780 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6782 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6785 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6787 /* Close multi and leading connections
6788 Completions for ramrods are collected in a synchronous way */
6789 for_each_nondefault_queue(bp, i)
6790 if (bnx2x_stop_multi(bp, i))
6793 rc = bnx2x_stop_leading(bp);
6795 BNX2X_ERR("Stop leading failed!\n");
6796 #ifdef BNX2X_STOP_ON_ERROR
6805 reset_code = bnx2x_fw_command(bp, reset_code);
6807 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6808 load_count[0], load_count[1], load_count[2]);
6810 load_count[1 + port]--;
6811 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6812 load_count[0], load_count[1], load_count[2]);
6813 if (load_count[0] == 0)
6814 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6815 else if (load_count[1 + port] == 0)
6816 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6818 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6821 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6822 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6823 bnx2x__link_reset(bp);
6825 /* Reset the chip */
6826 bnx2x_reset_chip(bp, reset_code);
6828 /* Report UNLOAD_DONE to MCP */
6830 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6833 /* Free SKBs, SGEs, TPA pool and driver internals */
6834 bnx2x_free_skbs(bp);
6835 for_each_queue(bp, i)
6836 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6839 bp->state = BNX2X_STATE_CLOSED;
6841 netif_carrier_off(bp->dev);
6846 static void bnx2x_reset_task(struct work_struct *work)
6848 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6850 #ifdef BNX2X_STOP_ON_ERROR
6851 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6852 " so reset not done to allow debug dump,\n"
6853 KERN_ERR " you will need to reboot when done\n");
6859 if (!netif_running(bp->dev))
6860 goto reset_task_exit;
6862 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6863 bnx2x_nic_load(bp, LOAD_NORMAL);
6869 /* end of nic load/unload */
6874 * Init service functions
6877 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6881 /* Check if there is any driver already loaded */
6882 val = REG_RD(bp, MISC_REG_UNPREPARED);
6884 /* Check if it is the UNDI driver
6885 * UNDI driver initializes CID offset for normal bell to 0x7
6887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6888 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6890 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6892 int func = BP_FUNC(bp);
6896 /* clear the UNDI indication */
6897 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6899 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6901 /* try unload UNDI on port 0 */
6904 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6905 DRV_MSG_SEQ_NUMBER_MASK);
6906 reset_code = bnx2x_fw_command(bp, reset_code);
6908 /* if UNDI is loaded on the other port */
6909 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6911 /* send "DONE" for previous unload */
6912 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6914 /* unload UNDI on port 1 */
6917 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6918 DRV_MSG_SEQ_NUMBER_MASK);
6919 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6921 bnx2x_fw_command(bp, reset_code);
6924 /* now it's safe to release the lock */
6925 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6927 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6928 HC_REG_CONFIG_0), 0x1000);
6930 /* close input traffic and wait for it */
6931 /* Do not rcv packets to BRB */
6933 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6934 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6935 /* Do not direct rcv packets that are not for MCP to
6938 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6939 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6942 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6943 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6946 /* save NIG port swap info */
6947 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6948 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6951 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6954 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6956 /* take the NIG out of reset and restore swap values */
6958 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6959 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6960 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6961 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6963 /* send unload done to the MCP */
6964 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6966 /* restore our func and fw_seq */
6969 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6970 DRV_MSG_SEQ_NUMBER_MASK);
6973 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6977 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6979 u32 val, val2, val3, val4, id;
6982 /* Get the chip revision id and number. */
6983 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6984 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6985 id = ((val & 0xffff) << 16);
6986 val = REG_RD(bp, MISC_REG_CHIP_REV);
6987 id |= ((val & 0xf) << 12);
6988 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6989 id |= ((val & 0xff) << 4);
6990 val = REG_RD(bp, MISC_REG_BOND_ID);
6992 bp->common.chip_id = id;
6993 bp->link_params.chip_id = bp->common.chip_id;
6994 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6996 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6997 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6998 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6999 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7000 bp->common.flash_size, bp->common.flash_size);
7002 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7003 bp->link_params.shmem_base = bp->common.shmem_base;
7004 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7006 if (!bp->common.shmem_base ||
7007 (bp->common.shmem_base < 0xA0000) ||
7008 (bp->common.shmem_base >= 0xC0000)) {
7009 BNX2X_DEV_INFO("MCP not active\n");
7010 bp->flags |= NO_MCP_FLAG;
7014 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7015 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7016 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7017 BNX2X_ERR("BAD MCP validity signature\n");
7019 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7020 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7022 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7023 bp->common.hw_config, bp->common.board);
7025 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7026 SHARED_HW_CFG_LED_MODE_MASK) >>
7027 SHARED_HW_CFG_LED_MODE_SHIFT);
7029 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7030 bp->common.bc_ver = val;
7031 BNX2X_DEV_INFO("bc_ver %X\n", val);
7032 if (val < BNX2X_BC_VER) {
7033 /* for now only warn
7034 * later we might need to enforce this */
7035 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7036 " please upgrade BC\n", BNX2X_BC_VER, val);
7039 if (BP_E1HVN(bp) == 0) {
7040 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7041 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7043 /* no WOL capability for E1HVN != 0 */
7044 bp->flags |= NO_WOL_FLAG;
7046 BNX2X_DEV_INFO("%sWoL capable\n",
7047 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7049 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7050 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7051 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7052 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7054 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7055 val, val2, val3, val4);
7058 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7061 int port = BP_PORT(bp);
7064 switch (switch_cfg) {
7066 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7069 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7070 switch (ext_phy_type) {
7071 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7072 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7075 bp->port.supported |= (SUPPORTED_10baseT_Half |
7076 SUPPORTED_10baseT_Full |
7077 SUPPORTED_100baseT_Half |
7078 SUPPORTED_100baseT_Full |
7079 SUPPORTED_1000baseT_Full |
7080 SUPPORTED_2500baseX_Full |
7085 SUPPORTED_Asym_Pause);
7088 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7089 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7092 bp->port.supported |= (SUPPORTED_10baseT_Half |
7093 SUPPORTED_10baseT_Full |
7094 SUPPORTED_100baseT_Half |
7095 SUPPORTED_100baseT_Full |
7096 SUPPORTED_1000baseT_Full |
7101 SUPPORTED_Asym_Pause);
7105 BNX2X_ERR("NVRAM config error. "
7106 "BAD SerDes ext_phy_config 0x%x\n",
7107 bp->link_params.ext_phy_config);
7111 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7113 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7116 case SWITCH_CFG_10G:
7117 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7120 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7121 switch (ext_phy_type) {
7122 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7123 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7126 bp->port.supported |= (SUPPORTED_10baseT_Half |
7127 SUPPORTED_10baseT_Full |
7128 SUPPORTED_100baseT_Half |
7129 SUPPORTED_100baseT_Full |
7130 SUPPORTED_1000baseT_Full |
7131 SUPPORTED_2500baseX_Full |
7132 SUPPORTED_10000baseT_Full |
7137 SUPPORTED_Asym_Pause);
7140 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7141 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7144 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7147 SUPPORTED_Asym_Pause);
7150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7151 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7154 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7155 SUPPORTED_1000baseT_Full |
7158 SUPPORTED_Asym_Pause);
7161 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7162 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7165 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7166 SUPPORTED_1000baseT_Full |
7170 SUPPORTED_Asym_Pause);
7173 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7174 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7177 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7178 SUPPORTED_2500baseX_Full |
7179 SUPPORTED_1000baseT_Full |
7183 SUPPORTED_Asym_Pause);
7186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7187 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7190 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7194 SUPPORTED_Asym_Pause);
7197 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7198 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7199 bp->link_params.ext_phy_config);
7203 BNX2X_ERR("NVRAM config error. "
7204 "BAD XGXS ext_phy_config 0x%x\n",
7205 bp->link_params.ext_phy_config);
7209 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7211 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7216 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7217 bp->port.link_config);
7220 bp->link_params.phy_addr = bp->port.phy_addr;
7222 /* mask what we support according to speed_cap_mask */
7223 if (!(bp->link_params.speed_cap_mask &
7224 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7225 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7227 if (!(bp->link_params.speed_cap_mask &
7228 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7229 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7231 if (!(bp->link_params.speed_cap_mask &
7232 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7233 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7235 if (!(bp->link_params.speed_cap_mask &
7236 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7237 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7239 if (!(bp->link_params.speed_cap_mask &
7240 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7241 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7242 SUPPORTED_1000baseT_Full);
7244 if (!(bp->link_params.speed_cap_mask &
7245 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7246 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7248 if (!(bp->link_params.speed_cap_mask &
7249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7250 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7252 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7255 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7257 bp->link_params.req_duplex = DUPLEX_FULL;
7259 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7260 case PORT_FEATURE_LINK_SPEED_AUTO:
7261 if (bp->port.supported & SUPPORTED_Autoneg) {
7262 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7263 bp->port.advertising = bp->port.supported;
7266 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7268 if ((ext_phy_type ==
7269 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7271 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7272 /* force 10G, no AN */
7273 bp->link_params.req_line_speed = SPEED_10000;
7274 bp->port.advertising =
7275 (ADVERTISED_10000baseT_Full |
7279 BNX2X_ERR("NVRAM config error. "
7280 "Invalid link_config 0x%x"
7281 " Autoneg not supported\n",
7282 bp->port.link_config);
7287 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7288 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7289 bp->link_params.req_line_speed = SPEED_10;
7290 bp->port.advertising = (ADVERTISED_10baseT_Full |
7293 BNX2X_ERR("NVRAM config error. "
7294 "Invalid link_config 0x%x"
7295 " speed_cap_mask 0x%x\n",
7296 bp->port.link_config,
7297 bp->link_params.speed_cap_mask);
7302 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7303 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7304 bp->link_params.req_line_speed = SPEED_10;
7305 bp->link_params.req_duplex = DUPLEX_HALF;
7306 bp->port.advertising = (ADVERTISED_10baseT_Half |
7309 BNX2X_ERR("NVRAM config error. "
7310 "Invalid link_config 0x%x"
7311 " speed_cap_mask 0x%x\n",
7312 bp->port.link_config,
7313 bp->link_params.speed_cap_mask);
7318 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7319 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7320 bp->link_params.req_line_speed = SPEED_100;
7321 bp->port.advertising = (ADVERTISED_100baseT_Full |
7324 BNX2X_ERR("NVRAM config error. "
7325 "Invalid link_config 0x%x"
7326 " speed_cap_mask 0x%x\n",
7327 bp->port.link_config,
7328 bp->link_params.speed_cap_mask);
7333 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7334 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7335 bp->link_params.req_line_speed = SPEED_100;
7336 bp->link_params.req_duplex = DUPLEX_HALF;
7337 bp->port.advertising = (ADVERTISED_100baseT_Half |
7340 BNX2X_ERR("NVRAM config error. "
7341 "Invalid link_config 0x%x"
7342 " speed_cap_mask 0x%x\n",
7343 bp->port.link_config,
7344 bp->link_params.speed_cap_mask);
7349 case PORT_FEATURE_LINK_SPEED_1G:
7350 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7351 bp->link_params.req_line_speed = SPEED_1000;
7352 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7355 BNX2X_ERR("NVRAM config error. "
7356 "Invalid link_config 0x%x"
7357 " speed_cap_mask 0x%x\n",
7358 bp->port.link_config,
7359 bp->link_params.speed_cap_mask);
7364 case PORT_FEATURE_LINK_SPEED_2_5G:
7365 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7366 bp->link_params.req_line_speed = SPEED_2500;
7367 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7370 BNX2X_ERR("NVRAM config error. "
7371 "Invalid link_config 0x%x"
7372 " speed_cap_mask 0x%x\n",
7373 bp->port.link_config,
7374 bp->link_params.speed_cap_mask);
7379 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7380 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7381 case PORT_FEATURE_LINK_SPEED_10G_KR:
7382 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7383 bp->link_params.req_line_speed = SPEED_10000;
7384 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7387 BNX2X_ERR("NVRAM config error. "
7388 "Invalid link_config 0x%x"
7389 " speed_cap_mask 0x%x\n",
7390 bp->port.link_config,
7391 bp->link_params.speed_cap_mask);
7397 BNX2X_ERR("NVRAM config error. "
7398 "BAD link speed link_config 0x%x\n",
7399 bp->port.link_config);
7400 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7401 bp->port.advertising = bp->port.supported;
7405 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7406 PORT_FEATURE_FLOW_CONTROL_MASK);
7407 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7408 !(bp->port.supported & SUPPORTED_Autoneg))
7409 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7411 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7412 " advertising 0x%x\n",
7413 bp->link_params.req_line_speed,
7414 bp->link_params.req_duplex,
7415 bp->link_params.req_flow_ctrl, bp->port.advertising);
7418 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7420 int port = BP_PORT(bp);
7423 bp->link_params.bp = bp;
7424 bp->link_params.port = port;
7426 bp->link_params.serdes_config =
7427 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7428 bp->link_params.lane_config =
7429 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7430 bp->link_params.ext_phy_config =
7432 dev_info.port_hw_config[port].external_phy_config);
7433 bp->link_params.speed_cap_mask =
7435 dev_info.port_hw_config[port].speed_capability_mask);
7437 bp->port.link_config =
7438 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7440 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7441 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7442 " link_config 0x%08x\n",
7443 bp->link_params.serdes_config,
7444 bp->link_params.lane_config,
7445 bp->link_params.ext_phy_config,
7446 bp->link_params.speed_cap_mask, bp->port.link_config);
7448 bp->link_params.switch_cfg = (bp->port.link_config &
7449 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7450 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7452 bnx2x_link_settings_requested(bp);
7454 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7455 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7456 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7457 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7458 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7459 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7460 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7461 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7462 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7463 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7466 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7468 int func = BP_FUNC(bp);
7472 bnx2x_get_common_hwinfo(bp);
7476 if (CHIP_IS_E1H(bp)) {
7478 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7480 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7481 FUNC_MF_CFG_E1HOV_TAG_MASK);
7482 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7486 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7488 func, bp->e1hov, bp->e1hov);
7490 BNX2X_DEV_INFO("Single function mode\n");
7492 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7493 " aborting\n", func);
7499 if (!BP_NOMCP(bp)) {
7500 bnx2x_get_port_hwinfo(bp);
7502 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7503 DRV_MSG_SEQ_NUMBER_MASK);
7504 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7508 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7509 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7510 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7511 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7512 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7513 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7514 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7515 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7516 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7517 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7518 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7520 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7528 /* only supposed to happen on emulation/FPGA */
7529 BNX2X_ERR("warning random MAC workaround active\n");
7530 random_ether_addr(bp->dev->dev_addr);
7531 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7537 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7539 int func = BP_FUNC(bp);
7542 /* Disable interrupt handling until HW is initialized */
7543 atomic_set(&bp->intr_sem, 1);
7545 mutex_init(&bp->port.phy_mutex);
7547 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7548 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7550 rc = bnx2x_get_hwinfo(bp);
7552 /* need to reset chip if undi was active */
7554 bnx2x_undi_unload(bp);
7556 if (CHIP_REV_IS_FPGA(bp))
7557 printk(KERN_ERR PFX "FPGA detected\n");
7559 if (BP_NOMCP(bp) && (func == 0))
7561 "MCP disabled, must load devices in order!\n");
7565 bp->flags &= ~TPA_ENABLE_FLAG;
7566 bp->dev->features &= ~NETIF_F_LRO;
7568 bp->flags |= TPA_ENABLE_FLAG;
7569 bp->dev->features |= NETIF_F_LRO;
7573 bp->tx_ring_size = MAX_TX_AVAIL;
7574 bp->rx_ring_size = MAX_RX_AVAIL;
7582 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7583 bp->current_interval = (poll ? poll : bp->timer_interval);
7585 init_timer(&bp->timer);
7586 bp->timer.expires = jiffies + bp->current_interval;
7587 bp->timer.data = (unsigned long) bp;
7588 bp->timer.function = bnx2x_timer;
7594 * ethtool service functions
7597 /* All ethtool functions called with rtnl_lock */
7599 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7601 struct bnx2x *bp = netdev_priv(dev);
7603 cmd->supported = bp->port.supported;
7604 cmd->advertising = bp->port.advertising;
7606 if (netif_carrier_ok(dev)) {
7607 cmd->speed = bp->link_vars.line_speed;
7608 cmd->duplex = bp->link_vars.duplex;
7610 cmd->speed = bp->link_params.req_line_speed;
7611 cmd->duplex = bp->link_params.req_duplex;
7616 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7617 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7618 if (vn_max_rate < cmd->speed)
7619 cmd->speed = vn_max_rate;
7622 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7624 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7626 switch (ext_phy_type) {
7627 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7628 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7631 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7632 cmd->port = PORT_FIBRE;
7635 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7636 cmd->port = PORT_TP;
7639 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7640 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7641 bp->link_params.ext_phy_config);
7645 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7646 bp->link_params.ext_phy_config);
7650 cmd->port = PORT_TP;
7652 cmd->phy_address = bp->port.phy_addr;
7653 cmd->transceiver = XCVR_INTERNAL;
7655 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7656 cmd->autoneg = AUTONEG_ENABLE;
7658 cmd->autoneg = AUTONEG_DISABLE;
7663 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7664 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7665 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7666 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7667 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7668 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7669 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7674 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7676 struct bnx2x *bp = netdev_priv(dev);
7682 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7683 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7684 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7685 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7686 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7687 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7688 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7690 if (cmd->autoneg == AUTONEG_ENABLE) {
7691 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7692 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7696 /* advertise the requested speed and duplex if supported */
7697 cmd->advertising &= bp->port.supported;
7699 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7700 bp->link_params.req_duplex = DUPLEX_FULL;
7701 bp->port.advertising |= (ADVERTISED_Autoneg |
7704 } else { /* forced speed */
7705 /* advertise the requested speed and duplex if supported */
7706 switch (cmd->speed) {
7708 if (cmd->duplex == DUPLEX_FULL) {
7709 if (!(bp->port.supported &
7710 SUPPORTED_10baseT_Full)) {
7712 "10M full not supported\n");
7716 advertising = (ADVERTISED_10baseT_Full |
7719 if (!(bp->port.supported &
7720 SUPPORTED_10baseT_Half)) {
7722 "10M half not supported\n");
7726 advertising = (ADVERTISED_10baseT_Half |
7732 if (cmd->duplex == DUPLEX_FULL) {
7733 if (!(bp->port.supported &
7734 SUPPORTED_100baseT_Full)) {
7736 "100M full not supported\n");
7740 advertising = (ADVERTISED_100baseT_Full |
7743 if (!(bp->port.supported &
7744 SUPPORTED_100baseT_Half)) {
7746 "100M half not supported\n");
7750 advertising = (ADVERTISED_100baseT_Half |
7756 if (cmd->duplex != DUPLEX_FULL) {
7757 DP(NETIF_MSG_LINK, "1G half not supported\n");
7761 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7762 DP(NETIF_MSG_LINK, "1G full not supported\n");
7766 advertising = (ADVERTISED_1000baseT_Full |
7771 if (cmd->duplex != DUPLEX_FULL) {
7773 "2.5G half not supported\n");
7777 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7779 "2.5G full not supported\n");
7783 advertising = (ADVERTISED_2500baseX_Full |
7788 if (cmd->duplex != DUPLEX_FULL) {
7789 DP(NETIF_MSG_LINK, "10G half not supported\n");
7793 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7794 DP(NETIF_MSG_LINK, "10G full not supported\n");
7798 advertising = (ADVERTISED_10000baseT_Full |
7803 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7807 bp->link_params.req_line_speed = cmd->speed;
7808 bp->link_params.req_duplex = cmd->duplex;
7809 bp->port.advertising = advertising;
7812 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7813 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7814 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7815 bp->port.advertising);
7817 if (netif_running(dev)) {
7818 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7825 #define PHY_FW_VER_LEN 10
7827 static void bnx2x_get_drvinfo(struct net_device *dev,
7828 struct ethtool_drvinfo *info)
7830 struct bnx2x *bp = netdev_priv(dev);
7831 u8 phy_fw_ver[PHY_FW_VER_LEN];
7833 strcpy(info->driver, DRV_MODULE_NAME);
7834 strcpy(info->version, DRV_MODULE_VERSION);
7836 phy_fw_ver[0] = '\0';
7838 bnx2x_acquire_phy_lock(bp);
7839 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7840 (bp->state != BNX2X_STATE_CLOSED),
7841 phy_fw_ver, PHY_FW_VER_LEN);
7842 bnx2x_release_phy_lock(bp);
7845 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7846 (bp->common.bc_ver & 0xff0000) >> 16,
7847 (bp->common.bc_ver & 0xff00) >> 8,
7848 (bp->common.bc_ver & 0xff),
7849 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7850 strcpy(info->bus_info, pci_name(bp->pdev));
7851 info->n_stats = BNX2X_NUM_STATS;
7852 info->testinfo_len = BNX2X_NUM_TESTS;
7853 info->eedump_len = bp->common.flash_size;
7854 info->regdump_len = 0;
7857 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7859 struct bnx2x *bp = netdev_priv(dev);
7861 if (bp->flags & NO_WOL_FLAG) {
7865 wol->supported = WAKE_MAGIC;
7867 wol->wolopts = WAKE_MAGIC;
7871 memset(&wol->sopass, 0, sizeof(wol->sopass));
7874 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7876 struct bnx2x *bp = netdev_priv(dev);
7878 if (wol->wolopts & ~WAKE_MAGIC)
7881 if (wol->wolopts & WAKE_MAGIC) {
7882 if (bp->flags & NO_WOL_FLAG)
7892 static u32 bnx2x_get_msglevel(struct net_device *dev)
7894 struct bnx2x *bp = netdev_priv(dev);
7896 return bp->msglevel;
7899 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7901 struct bnx2x *bp = netdev_priv(dev);
7903 if (capable(CAP_NET_ADMIN))
7904 bp->msglevel = level;
7907 static int bnx2x_nway_reset(struct net_device *dev)
7909 struct bnx2x *bp = netdev_priv(dev);
7914 if (netif_running(dev)) {
7915 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7922 static int bnx2x_get_eeprom_len(struct net_device *dev)
7924 struct bnx2x *bp = netdev_priv(dev);
7926 return bp->common.flash_size;
7929 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7931 int port = BP_PORT(bp);
7935 /* adjust timeout for emulation/FPGA */
7936 count = NVRAM_TIMEOUT_COUNT;
7937 if (CHIP_REV_IS_SLOW(bp))
7940 /* request access to nvram interface */
7941 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7942 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7944 for (i = 0; i < count*10; i++) {
7945 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7946 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7952 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7953 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7960 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7962 int port = BP_PORT(bp);
7966 /* adjust timeout for emulation/FPGA */
7967 count = NVRAM_TIMEOUT_COUNT;
7968 if (CHIP_REV_IS_SLOW(bp))
7971 /* relinquish nvram interface */
7972 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7973 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7975 for (i = 0; i < count*10; i++) {
7976 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7977 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7983 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7984 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7991 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7995 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7997 /* enable both bits, even on read */
7998 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7999 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8000 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8003 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8007 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8009 /* disable both bits, even after read */
8010 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8011 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8012 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8015 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8021 /* build the command word */
8022 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8024 /* need to clear DONE bit separately */
8025 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8027 /* address of the NVRAM to read from */
8028 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8029 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8031 /* issue a read command */
8032 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8034 /* adjust timeout for emulation/FPGA */
8035 count = NVRAM_TIMEOUT_COUNT;
8036 if (CHIP_REV_IS_SLOW(bp))
8039 /* wait for completion */
8042 for (i = 0; i < count; i++) {
8044 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8046 if (val & MCPR_NVM_COMMAND_DONE) {
8047 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8048 /* we read nvram data in cpu order
8049 * but ethtool sees it as an array of bytes
8050 * converting to big-endian will do the work */
8051 val = cpu_to_be32(val);
8061 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8068 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8070 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8075 if (offset + buf_size > bp->common.flash_size) {
8076 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8077 " buf_size (0x%x) > flash_size (0x%x)\n",
8078 offset, buf_size, bp->common.flash_size);
8082 /* request access to nvram interface */
8083 rc = bnx2x_acquire_nvram_lock(bp);
8087 /* enable access to nvram interface */
8088 bnx2x_enable_nvram_access(bp);
8090 /* read the first word(s) */
8091 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8092 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8093 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8094 memcpy(ret_buf, &val, 4);
8096 /* advance to the next dword */
8097 offset += sizeof(u32);
8098 ret_buf += sizeof(u32);
8099 buf_size -= sizeof(u32);
8104 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8105 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8106 memcpy(ret_buf, &val, 4);
8109 /* disable access to nvram interface */
8110 bnx2x_disable_nvram_access(bp);
8111 bnx2x_release_nvram_lock(bp);
8116 static int bnx2x_get_eeprom(struct net_device *dev,
8117 struct ethtool_eeprom *eeprom, u8 *eebuf)
8119 struct bnx2x *bp = netdev_priv(dev);
8122 if (!netif_running(dev))
8125 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8126 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8127 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8128 eeprom->len, eeprom->len);
8130 /* parameters already validated in ethtool_get_eeprom */
8132 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8137 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8142 /* build the command word */
8143 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8145 /* need to clear DONE bit separately */
8146 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8148 /* write the data */
8149 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8151 /* address of the NVRAM to write to */
8152 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8153 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8155 /* issue the write command */
8156 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8158 /* adjust timeout for emulation/FPGA */
8159 count = NVRAM_TIMEOUT_COUNT;
8160 if (CHIP_REV_IS_SLOW(bp))
8163 /* wait for completion */
8165 for (i = 0; i < count; i++) {
8167 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8168 if (val & MCPR_NVM_COMMAND_DONE) {
8177 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8179 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8187 if (offset + buf_size > bp->common.flash_size) {
8188 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8189 " buf_size (0x%x) > flash_size (0x%x)\n",
8190 offset, buf_size, bp->common.flash_size);
8194 /* request access to nvram interface */
8195 rc = bnx2x_acquire_nvram_lock(bp);
8199 /* enable access to nvram interface */
8200 bnx2x_enable_nvram_access(bp);
8202 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8203 align_offset = (offset & ~0x03);
8204 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8207 val &= ~(0xff << BYTE_OFFSET(offset));
8208 val |= (*data_buf << BYTE_OFFSET(offset));
8210 /* nvram data is returned as an array of bytes
8211 * convert it back to cpu order */
8212 val = be32_to_cpu(val);
8214 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8218 /* disable access to nvram interface */
8219 bnx2x_disable_nvram_access(bp);
8220 bnx2x_release_nvram_lock(bp);
8225 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8233 if (buf_size == 1) /* ethtool */
8234 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8236 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8238 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8243 if (offset + buf_size > bp->common.flash_size) {
8244 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8245 " buf_size (0x%x) > flash_size (0x%x)\n",
8246 offset, buf_size, bp->common.flash_size);
8250 /* request access to nvram interface */
8251 rc = bnx2x_acquire_nvram_lock(bp);
8255 /* enable access to nvram interface */
8256 bnx2x_enable_nvram_access(bp);
8259 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8260 while ((written_so_far < buf_size) && (rc == 0)) {
8261 if (written_so_far == (buf_size - sizeof(u32)))
8262 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8263 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8264 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8265 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8266 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8268 memcpy(&val, data_buf, 4);
8270 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8272 /* advance to the next dword */
8273 offset += sizeof(u32);
8274 data_buf += sizeof(u32);
8275 written_so_far += sizeof(u32);
8279 /* disable access to nvram interface */
8280 bnx2x_disable_nvram_access(bp);
8281 bnx2x_release_nvram_lock(bp);
8286 static int bnx2x_set_eeprom(struct net_device *dev,
8287 struct ethtool_eeprom *eeprom, u8 *eebuf)
8289 struct bnx2x *bp = netdev_priv(dev);
8292 if (!netif_running(dev))
8295 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8296 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8297 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8298 eeprom->len, eeprom->len);
8300 /* parameters already validated in ethtool_set_eeprom */
8302 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8303 if (eeprom->magic == 0x00504859)
8306 bnx2x_acquire_phy_lock(bp);
8307 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8308 bp->link_params.ext_phy_config,
8309 (bp->state != BNX2X_STATE_CLOSED),
8310 eebuf, eeprom->len);
8311 if ((bp->state == BNX2X_STATE_OPEN) ||
8312 (bp->state == BNX2X_STATE_DISABLED)) {
8313 rc |= bnx2x_link_reset(&bp->link_params,
8315 rc |= bnx2x_phy_init(&bp->link_params,
8318 bnx2x_release_phy_lock(bp);
8320 } else /* Only the PMF can access the PHY */
8323 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8328 static int bnx2x_get_coalesce(struct net_device *dev,
8329 struct ethtool_coalesce *coal)
8331 struct bnx2x *bp = netdev_priv(dev);
8333 memset(coal, 0, sizeof(struct ethtool_coalesce));
8335 coal->rx_coalesce_usecs = bp->rx_ticks;
8336 coal->tx_coalesce_usecs = bp->tx_ticks;
8341 static int bnx2x_set_coalesce(struct net_device *dev,
8342 struct ethtool_coalesce *coal)
8344 struct bnx2x *bp = netdev_priv(dev);
8346 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8347 if (bp->rx_ticks > 3000)
8348 bp->rx_ticks = 3000;
8350 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8351 if (bp->tx_ticks > 0x3000)
8352 bp->tx_ticks = 0x3000;
8354 if (netif_running(dev))
8355 bnx2x_update_coalesce(bp);
8360 static void bnx2x_get_ringparam(struct net_device *dev,
8361 struct ethtool_ringparam *ering)
8363 struct bnx2x *bp = netdev_priv(dev);
8365 ering->rx_max_pending = MAX_RX_AVAIL;
8366 ering->rx_mini_max_pending = 0;
8367 ering->rx_jumbo_max_pending = 0;
8369 ering->rx_pending = bp->rx_ring_size;
8370 ering->rx_mini_pending = 0;
8371 ering->rx_jumbo_pending = 0;
8373 ering->tx_max_pending = MAX_TX_AVAIL;
8374 ering->tx_pending = bp->tx_ring_size;
8377 static int bnx2x_set_ringparam(struct net_device *dev,
8378 struct ethtool_ringparam *ering)
8380 struct bnx2x *bp = netdev_priv(dev);
8383 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8384 (ering->tx_pending > MAX_TX_AVAIL) ||
8385 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8388 bp->rx_ring_size = ering->rx_pending;
8389 bp->tx_ring_size = ering->tx_pending;
8391 if (netif_running(dev)) {
8392 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8393 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8399 static void bnx2x_get_pauseparam(struct net_device *dev,
8400 struct ethtool_pauseparam *epause)
8402 struct bnx2x *bp = netdev_priv(dev);
8404 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8405 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8407 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8408 BNX2X_FLOW_CTRL_RX);
8409 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8410 BNX2X_FLOW_CTRL_TX);
8412 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8413 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8414 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8417 static int bnx2x_set_pauseparam(struct net_device *dev,
8418 struct ethtool_pauseparam *epause)
8420 struct bnx2x *bp = netdev_priv(dev);
8425 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8426 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8427 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8429 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8431 if (epause->rx_pause)
8432 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8434 if (epause->tx_pause)
8435 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8437 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8438 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8440 if (epause->autoneg) {
8441 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8442 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8446 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8447 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8451 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8453 if (netif_running(dev)) {
8454 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8461 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8463 struct bnx2x *bp = netdev_priv(dev);
8467 /* TPA requires Rx CSUM offloading */
8468 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8469 if (!(dev->features & NETIF_F_LRO)) {
8470 dev->features |= NETIF_F_LRO;
8471 bp->flags |= TPA_ENABLE_FLAG;
8475 } else if (dev->features & NETIF_F_LRO) {
8476 dev->features &= ~NETIF_F_LRO;
8477 bp->flags &= ~TPA_ENABLE_FLAG;
8481 if (changed && netif_running(dev)) {
8482 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8483 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8489 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8491 struct bnx2x *bp = netdev_priv(dev);
8496 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8498 struct bnx2x *bp = netdev_priv(dev);
8503 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8504 TPA'ed packets will be discarded due to wrong TCP CSUM */
8506 u32 flags = ethtool_op_get_flags(dev);
8508 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8514 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8517 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8518 dev->features |= NETIF_F_TSO6;
8520 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8521 dev->features &= ~NETIF_F_TSO6;
8527 static const struct {
8528 char string[ETH_GSTRING_LEN];
8529 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8530 { "register_test (offline)" },
8531 { "memory_test (offline)" },
8532 { "loopback_test (offline)" },
8533 { "nvram_test (online)" },
8534 { "interrupt_test (online)" },
8535 { "link_test (online)" },
8536 { "idle check (online)" },
8537 { "MC errors (online)" }
8540 static int bnx2x_self_test_count(struct net_device *dev)
8542 return BNX2X_NUM_TESTS;
8545 static int bnx2x_test_registers(struct bnx2x *bp)
8547 int idx, i, rc = -ENODEV;
8549 int port = BP_PORT(bp);
8550 static const struct {
8555 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8556 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8557 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8558 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8559 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8560 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8561 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8562 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8563 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8564 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8565 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8566 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8567 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8568 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8569 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8570 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8571 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8572 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8573 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8574 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8575 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8576 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8577 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8578 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8579 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8580 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8581 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8582 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8583 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8584 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8585 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8586 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8587 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8588 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8589 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8590 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8591 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8592 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8594 { 0xffffffff, 0, 0x00000000 }
8597 if (!netif_running(bp->dev))
8600 /* Repeat the test twice:
8601 First by writing 0x00000000, second by writing 0xffffffff */
8602 for (idx = 0; idx < 2; idx++) {
8609 wr_val = 0xffffffff;
8613 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8614 u32 offset, mask, save_val, val;
8616 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8617 mask = reg_tbl[i].mask;
8619 save_val = REG_RD(bp, offset);
8621 REG_WR(bp, offset, wr_val);
8622 val = REG_RD(bp, offset);
8624 /* Restore the original register's value */
8625 REG_WR(bp, offset, save_val);
8627 /* verify that value is as expected value */
8628 if ((val & mask) != (wr_val & mask))
8639 static int bnx2x_test_memory(struct bnx2x *bp)
8641 int i, j, rc = -ENODEV;
8643 static const struct {
8647 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8648 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8649 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8650 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8651 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8652 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8653 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8657 static const struct {
8663 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8664 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8665 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8666 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8667 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8668 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8670 { NULL, 0xffffffff, 0, 0 }
8673 if (!netif_running(bp->dev))
8676 /* Go through all the memories */
8677 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8678 for (j = 0; j < mem_tbl[i].size; j++)
8679 REG_RD(bp, mem_tbl[i].offset + j*4);
8681 /* Check the parity status */
8682 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8683 val = REG_RD(bp, prty_tbl[i].offset);
8684 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8685 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8687 "%s is 0x%x\n", prty_tbl[i].name, val);
8698 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8703 while (bnx2x_link_test(bp) && cnt--)
8707 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8709 unsigned int pkt_size, num_pkts, i;
8710 struct sk_buff *skb;
8711 unsigned char *packet;
8712 struct bnx2x_fastpath *fp = &bp->fp[0];
8713 u16 tx_start_idx, tx_idx;
8714 u16 rx_start_idx, rx_idx;
8716 struct sw_tx_bd *tx_buf;
8717 struct eth_tx_bd *tx_bd;
8719 union eth_rx_cqe *cqe;
8721 struct sw_rx_bd *rx_buf;
8725 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8726 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8727 bnx2x_acquire_phy_lock(bp);
8728 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8729 bnx2x_release_phy_lock(bp);
8731 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8732 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8733 bnx2x_acquire_phy_lock(bp);
8734 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8735 bnx2x_release_phy_lock(bp);
8736 /* wait until link state is restored */
8737 bnx2x_wait_for_link(bp, link_up);
8743 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8746 goto test_loopback_exit;
8748 packet = skb_put(skb, pkt_size);
8749 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8750 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8751 for (i = ETH_HLEN; i < pkt_size; i++)
8752 packet[i] = (unsigned char) (i & 0xff);
8755 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8756 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8758 pkt_prod = fp->tx_pkt_prod++;
8759 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8760 tx_buf->first_bd = fp->tx_bd_prod;
8763 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8764 mapping = pci_map_single(bp->pdev, skb->data,
8765 skb_headlen(skb), PCI_DMA_TODEVICE);
8766 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8767 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8768 tx_bd->nbd = cpu_to_le16(1);
8769 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8770 tx_bd->vlan = cpu_to_le16(pkt_prod);
8771 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8772 ETH_TX_BD_FLAGS_END_BD);
8773 tx_bd->general_data = ((UNICAST_ADDRESS <<
8774 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8778 fp->hw_tx_prods->bds_prod =
8779 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8780 mb(); /* FW restriction: must not reorder writing nbd and packets */
8781 fp->hw_tx_prods->packets_prod =
8782 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8783 DOORBELL(bp, FP_IDX(fp), 0);
8789 bp->dev->trans_start = jiffies;
8793 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8794 if (tx_idx != tx_start_idx + num_pkts)
8795 goto test_loopback_exit;
8797 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8798 if (rx_idx != rx_start_idx + num_pkts)
8799 goto test_loopback_exit;
8801 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8802 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8803 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8804 goto test_loopback_rx_exit;
8806 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8807 if (len != pkt_size)
8808 goto test_loopback_rx_exit;
8810 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8812 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8813 for (i = ETH_HLEN; i < pkt_size; i++)
8814 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8815 goto test_loopback_rx_exit;
8819 test_loopback_rx_exit:
8821 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8822 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8823 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8824 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8826 /* Update producers */
8827 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8831 bp->link_params.loopback_mode = LOOPBACK_NONE;
8836 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8840 if (!netif_running(bp->dev))
8841 return BNX2X_LOOPBACK_FAILED;
8843 bnx2x_netif_stop(bp, 1);
8845 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8846 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8847 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8850 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8851 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8852 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8855 bnx2x_netif_start(bp);
8860 #define CRC32_RESIDUAL 0xdebb20e3
8862 static int bnx2x_test_nvram(struct bnx2x *bp)
8864 static const struct {
8868 { 0, 0x14 }, /* bootstrap */
8869 { 0x14, 0xec }, /* dir */
8870 { 0x100, 0x350 }, /* manuf_info */
8871 { 0x450, 0xf0 }, /* feature_info */
8872 { 0x640, 0x64 }, /* upgrade_key_info */
8874 { 0x708, 0x70 }, /* manuf_key_info */
8879 u8 *data = (u8 *)buf;
8883 rc = bnx2x_nvram_read(bp, 0, data, 4);
8885 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8886 goto test_nvram_exit;
8889 magic = be32_to_cpu(buf[0]);
8890 if (magic != 0x669955aa) {
8891 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8893 goto test_nvram_exit;
8896 for (i = 0; nvram_tbl[i].size; i++) {
8898 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8902 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8903 goto test_nvram_exit;
8906 csum = ether_crc_le(nvram_tbl[i].size, data);
8907 if (csum != CRC32_RESIDUAL) {
8909 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8911 goto test_nvram_exit;
8919 static int bnx2x_test_intr(struct bnx2x *bp)
8921 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8924 if (!netif_running(bp->dev))
8927 config->hdr.length_6b = 0;
8929 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8931 config->hdr.offset = BP_FUNC(bp);
8932 config->hdr.client_id = BP_CL_ID(bp);
8933 config->hdr.reserved1 = 0;
8935 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8936 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8937 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8939 bp->set_mac_pending++;
8940 for (i = 0; i < 10; i++) {
8941 if (!bp->set_mac_pending)
8943 msleep_interruptible(10);
8952 static void bnx2x_self_test(struct net_device *dev,
8953 struct ethtool_test *etest, u64 *buf)
8955 struct bnx2x *bp = netdev_priv(dev);
8957 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8959 if (!netif_running(dev))
8962 /* offline tests are not supported in MF mode */
8964 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8966 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8969 link_up = bp->link_vars.link_up;
8970 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8971 bnx2x_nic_load(bp, LOAD_DIAG);
8972 /* wait until link state is restored */
8973 bnx2x_wait_for_link(bp, link_up);
8975 if (bnx2x_test_registers(bp) != 0) {
8977 etest->flags |= ETH_TEST_FL_FAILED;
8979 if (bnx2x_test_memory(bp) != 0) {
8981 etest->flags |= ETH_TEST_FL_FAILED;
8983 buf[2] = bnx2x_test_loopback(bp, link_up);
8985 etest->flags |= ETH_TEST_FL_FAILED;
8987 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8988 bnx2x_nic_load(bp, LOAD_NORMAL);
8989 /* wait until link state is restored */
8990 bnx2x_wait_for_link(bp, link_up);
8992 if (bnx2x_test_nvram(bp) != 0) {
8994 etest->flags |= ETH_TEST_FL_FAILED;
8996 if (bnx2x_test_intr(bp) != 0) {
8998 etest->flags |= ETH_TEST_FL_FAILED;
9001 if (bnx2x_link_test(bp) != 0) {
9003 etest->flags |= ETH_TEST_FL_FAILED;
9005 buf[7] = bnx2x_mc_assert(bp);
9007 etest->flags |= ETH_TEST_FL_FAILED;
9009 #ifdef BNX2X_EXTRA_DEBUG
9010 bnx2x_panic_dump(bp);
9014 static const struct {
9018 #define STATS_FLAGS_PORT 1
9019 #define STATS_FLAGS_FUNC 2
9020 u8 string[ETH_GSTRING_LEN];
9021 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9022 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9023 8, STATS_FLAGS_FUNC, "rx_bytes" },
9024 { STATS_OFFSET32(error_bytes_received_hi),
9025 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9026 { STATS_OFFSET32(total_bytes_transmitted_hi),
9027 8, STATS_FLAGS_FUNC, "tx_bytes" },
9028 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9029 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9030 { STATS_OFFSET32(total_unicast_packets_received_hi),
9031 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9032 { STATS_OFFSET32(total_multicast_packets_received_hi),
9033 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9034 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9035 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9036 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9037 8, STATS_FLAGS_FUNC, "tx_packets" },
9038 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9039 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9040 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9041 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9042 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9043 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9044 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9045 8, STATS_FLAGS_PORT, "rx_align_errors" },
9046 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9047 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9048 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9049 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9050 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9051 8, STATS_FLAGS_PORT, "tx_deferred" },
9052 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9053 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9054 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9055 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9056 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9057 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9058 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9059 8, STATS_FLAGS_PORT, "rx_fragments" },
9060 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9061 8, STATS_FLAGS_PORT, "rx_jabbers" },
9062 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9063 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9064 { STATS_OFFSET32(jabber_packets_received),
9065 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9066 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9067 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9068 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9069 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9070 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9071 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9072 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9073 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9074 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9075 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9076 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9077 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9078 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9079 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9080 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9081 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9082 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9083 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9084 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9085 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9086 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9087 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9088 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9089 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9090 { STATS_OFFSET32(mac_filter_discard),
9091 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9092 { STATS_OFFSET32(no_buff_discard),
9093 4, STATS_FLAGS_FUNC, "rx_discards" },
9094 { STATS_OFFSET32(xxoverflow_discard),
9095 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9096 { STATS_OFFSET32(brb_drop_hi),
9097 8, STATS_FLAGS_PORT, "brb_discard" },
9098 { STATS_OFFSET32(brb_truncate_hi),
9099 8, STATS_FLAGS_PORT, "brb_truncate" },
9100 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9101 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9102 { STATS_OFFSET32(rx_skb_alloc_failed),
9103 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9104 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9105 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9108 #define IS_NOT_E1HMF_STAT(bp, i) \
9109 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9111 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9113 struct bnx2x *bp = netdev_priv(dev);
9116 switch (stringset) {
9118 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9119 if (IS_NOT_E1HMF_STAT(bp, i))
9121 strcpy(buf + j*ETH_GSTRING_LEN,
9122 bnx2x_stats_arr[i].string);
9128 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9133 static int bnx2x_get_stats_count(struct net_device *dev)
9135 struct bnx2x *bp = netdev_priv(dev);
9136 int i, num_stats = 0;
9138 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9139 if (IS_NOT_E1HMF_STAT(bp, i))
9146 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9147 struct ethtool_stats *stats, u64 *buf)
9149 struct bnx2x *bp = netdev_priv(dev);
9150 u32 *hw_stats = (u32 *)&bp->eth_stats;
9153 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9154 if (IS_NOT_E1HMF_STAT(bp, i))
9157 if (bnx2x_stats_arr[i].size == 0) {
9158 /* skip this counter */
9163 if (bnx2x_stats_arr[i].size == 4) {
9164 /* 4-byte counter */
9165 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9169 /* 8-byte counter */
9170 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9171 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9176 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9178 struct bnx2x *bp = netdev_priv(dev);
9179 int port = BP_PORT(bp);
9182 if (!netif_running(dev))
9191 for (i = 0; i < (data * 2); i++) {
9193 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9194 bp->link_params.hw_led_mode,
9195 bp->link_params.chip_id);
9197 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9198 bp->link_params.hw_led_mode,
9199 bp->link_params.chip_id);
9201 msleep_interruptible(500);
9202 if (signal_pending(current))
9206 if (bp->link_vars.link_up)
9207 bnx2x_set_led(bp, port, LED_MODE_OPER,
9208 bp->link_vars.line_speed,
9209 bp->link_params.hw_led_mode,
9210 bp->link_params.chip_id);
9215 static struct ethtool_ops bnx2x_ethtool_ops = {
9216 .get_settings = bnx2x_get_settings,
9217 .set_settings = bnx2x_set_settings,
9218 .get_drvinfo = bnx2x_get_drvinfo,
9219 .get_wol = bnx2x_get_wol,
9220 .set_wol = bnx2x_set_wol,
9221 .get_msglevel = bnx2x_get_msglevel,
9222 .set_msglevel = bnx2x_set_msglevel,
9223 .nway_reset = bnx2x_nway_reset,
9224 .get_link = ethtool_op_get_link,
9225 .get_eeprom_len = bnx2x_get_eeprom_len,
9226 .get_eeprom = bnx2x_get_eeprom,
9227 .set_eeprom = bnx2x_set_eeprom,
9228 .get_coalesce = bnx2x_get_coalesce,
9229 .set_coalesce = bnx2x_set_coalesce,
9230 .get_ringparam = bnx2x_get_ringparam,
9231 .set_ringparam = bnx2x_set_ringparam,
9232 .get_pauseparam = bnx2x_get_pauseparam,
9233 .set_pauseparam = bnx2x_set_pauseparam,
9234 .get_rx_csum = bnx2x_get_rx_csum,
9235 .set_rx_csum = bnx2x_set_rx_csum,
9236 .get_tx_csum = ethtool_op_get_tx_csum,
9237 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9238 .set_flags = bnx2x_set_flags,
9239 .get_flags = ethtool_op_get_flags,
9240 .get_sg = ethtool_op_get_sg,
9241 .set_sg = ethtool_op_set_sg,
9242 .get_tso = ethtool_op_get_tso,
9243 .set_tso = bnx2x_set_tso,
9244 .self_test_count = bnx2x_self_test_count,
9245 .self_test = bnx2x_self_test,
9246 .get_strings = bnx2x_get_strings,
9247 .phys_id = bnx2x_phys_id,
9248 .get_stats_count = bnx2x_get_stats_count,
9249 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9252 /* end of ethtool_ops */
9254 /****************************************************************************
9255 * General service functions
9256 ****************************************************************************/
9258 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9262 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9266 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9267 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9268 PCI_PM_CTRL_PME_STATUS));
9270 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9271 /* delay required during transition out of D3hot */
9276 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9280 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9282 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9285 /* No more memory access after this point until
9286 * device is brought back to D0.
9296 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9300 /* Tell compiler that status block fields can change */
9302 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9303 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9305 return (fp->rx_comp_cons != rx_cons_sb);
9309 * net_device service functions
9312 static int bnx2x_poll(struct napi_struct *napi, int budget)
9314 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9316 struct bnx2x *bp = fp->bp;
9319 #ifdef BNX2X_STOP_ON_ERROR
9320 if (unlikely(bp->panic))
9324 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9325 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9326 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9328 bnx2x_update_fpsb_idx(fp);
9330 if (bnx2x_has_tx_work(fp))
9331 bnx2x_tx_int(fp, budget);
9333 if (bnx2x_has_rx_work(fp))
9334 work_done = bnx2x_rx_int(fp, budget);
9335 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9337 /* must not complete if we consumed full budget */
9338 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9340 #ifdef BNX2X_STOP_ON_ERROR
9343 netif_rx_complete(napi);
9345 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9346 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9347 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9348 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9354 /* we split the first BD into headers and data BDs
9355 * to ease the pain of our fellow microcode engineers
9356 * we use one mapping for both BDs
9357 * So far this has only been observed to happen
9358 * in Other Operating Systems(TM)
9360 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9361 struct bnx2x_fastpath *fp,
9362 struct eth_tx_bd **tx_bd, u16 hlen,
9363 u16 bd_prod, int nbd)
9365 struct eth_tx_bd *h_tx_bd = *tx_bd;
9366 struct eth_tx_bd *d_tx_bd;
9368 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9370 /* first fix first BD */
9371 h_tx_bd->nbd = cpu_to_le16(nbd);
9372 h_tx_bd->nbytes = cpu_to_le16(hlen);
9374 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9375 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9376 h_tx_bd->addr_lo, h_tx_bd->nbd);
9378 /* now get a new data BD
9379 * (after the pbd) and fill it */
9380 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9381 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9383 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9384 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9386 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9387 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9388 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9390 /* this marks the BD as one that has no individual mapping
9391 * the FW ignores this flag in a BD not marked start
9393 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9394 DP(NETIF_MSG_TX_QUEUED,
9395 "TSO split data size is %d (%x:%x)\n",
9396 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9398 /* update tx_bd for marking the last BD flag */
9404 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9407 csum = (u16) ~csum_fold(csum_sub(csum,
9408 csum_partial(t_header - fix, fix, 0)));
9411 csum = (u16) ~csum_fold(csum_add(csum,
9412 csum_partial(t_header, -fix, 0)));
9414 return swab16(csum);
9417 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9421 if (skb->ip_summed != CHECKSUM_PARTIAL)
9425 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9427 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9428 rc |= XMIT_CSUM_TCP;
9432 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9433 rc |= XMIT_CSUM_TCP;
9437 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9440 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9446 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9447 /* check if packet requires linearization (packet is too fragmented) */
9448 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9453 int first_bd_sz = 0;
9455 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9456 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9458 if (xmit_type & XMIT_GSO) {
9459 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9460 /* Check if LSO packet needs to be copied:
9461 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9462 int wnd_size = MAX_FETCH_BD - 3;
9463 /* Number of windows to check */
9464 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9469 /* Headers length */
9470 hlen = (int)(skb_transport_header(skb) - skb->data) +
9473 /* Amount of data (w/o headers) on linear part of SKB*/
9474 first_bd_sz = skb_headlen(skb) - hlen;
9476 wnd_sum = first_bd_sz;
9478 /* Calculate the first sum - it's special */
9479 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9481 skb_shinfo(skb)->frags[frag_idx].size;
9483 /* If there was data on linear skb data - check it */
9484 if (first_bd_sz > 0) {
9485 if (unlikely(wnd_sum < lso_mss)) {
9490 wnd_sum -= first_bd_sz;
9493 /* Others are easier: run through the frag list and
9494 check all windows */
9495 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9497 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9499 if (unlikely(wnd_sum < lso_mss)) {
9504 skb_shinfo(skb)->frags[wnd_idx].size;
9508 /* in non-LSO too fragmented packet should always
9515 if (unlikely(to_copy))
9516 DP(NETIF_MSG_TX_QUEUED,
9517 "Linearization IS REQUIRED for %s packet. "
9518 "num_frags %d hlen %d first_bd_sz %d\n",
9519 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9520 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9526 /* called with netif_tx_lock
9527 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9528 * netif_wake_queue()
9530 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9532 struct bnx2x *bp = netdev_priv(dev);
9533 struct bnx2x_fastpath *fp;
9534 struct sw_tx_bd *tx_buf;
9535 struct eth_tx_bd *tx_bd;
9536 struct eth_tx_parse_bd *pbd = NULL;
9537 u16 pkt_prod, bd_prod;
9540 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9541 int vlan_off = (bp->e1hov ? 4 : 0);
9545 #ifdef BNX2X_STOP_ON_ERROR
9546 if (unlikely(bp->panic))
9547 return NETDEV_TX_BUSY;
9550 fp_index = (smp_processor_id() % bp->num_queues);
9551 fp = &bp->fp[fp_index];
9553 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9554 bp->eth_stats.driver_xoff++,
9555 netif_stop_queue(dev);
9556 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9557 return NETDEV_TX_BUSY;
9560 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9561 " gso type %x xmit_type %x\n",
9562 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9563 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9565 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9566 /* First, check if we need to linearize the skb
9567 (due to FW restrictions) */
9568 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9569 /* Statistics of linearization */
9571 if (skb_linearize(skb) != 0) {
9572 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9573 "silently dropping this SKB\n");
9574 dev_kfree_skb_any(skb);
9575 return NETDEV_TX_OK;
9581 Please read carefully. First we use one BD which we mark as start,
9582 then for TSO or xsum we have a parsing info BD,
9583 and only then we have the rest of the TSO BDs.
9584 (don't forget to mark the last one as last,
9585 and to unmap only AFTER you write to the BD ...)
9586 And above all, all pdb sizes are in words - NOT DWORDS!
9589 pkt_prod = fp->tx_pkt_prod++;
9590 bd_prod = TX_BD(fp->tx_bd_prod);
9592 /* get a tx_buf and first BD */
9593 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9594 tx_bd = &fp->tx_desc_ring[bd_prod];
9596 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9597 tx_bd->general_data = (UNICAST_ADDRESS <<
9598 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9600 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9602 /* remember the first BD of the packet */
9603 tx_buf->first_bd = fp->tx_bd_prod;
9606 DP(NETIF_MSG_TX_QUEUED,
9607 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9608 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9611 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9612 (bp->flags & HW_VLAN_TX_FLAG)) {
9613 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9614 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9618 tx_bd->vlan = cpu_to_le16(pkt_prod);
9621 /* turn on parsing and get a BD */
9622 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9623 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9625 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9628 if (xmit_type & XMIT_CSUM) {
9629 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9631 /* for now NS flag is not used in Linux */
9632 pbd->global_data = (hlen |
9633 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9634 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9636 pbd->ip_hlen = (skb_transport_header(skb) -
9637 skb_network_header(skb)) / 2;
9639 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9641 pbd->total_hlen = cpu_to_le16(hlen);
9642 hlen = hlen*2 - vlan_off;
9644 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9646 if (xmit_type & XMIT_CSUM_V4)
9647 tx_bd->bd_flags.as_bitfield |=
9648 ETH_TX_BD_FLAGS_IP_CSUM;
9650 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9652 if (xmit_type & XMIT_CSUM_TCP) {
9653 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9656 s8 fix = SKB_CS_OFF(skb); /* signed! */
9658 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9659 pbd->cs_offset = fix / 2;
9661 DP(NETIF_MSG_TX_QUEUED,
9662 "hlen %d offset %d fix %d csum before fix %x\n",
9663 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9666 /* HW bug: fixup the CSUM */
9667 pbd->tcp_pseudo_csum =
9668 bnx2x_csum_fix(skb_transport_header(skb),
9671 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9672 pbd->tcp_pseudo_csum);
9676 mapping = pci_map_single(bp->pdev, skb->data,
9677 skb_headlen(skb), PCI_DMA_TODEVICE);
9679 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9680 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9681 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9682 tx_bd->nbd = cpu_to_le16(nbd);
9683 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9685 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9686 " nbytes %d flags %x vlan %x\n",
9687 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9688 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9689 le16_to_cpu(tx_bd->vlan));
9691 if (xmit_type & XMIT_GSO) {
9693 DP(NETIF_MSG_TX_QUEUED,
9694 "TSO packet len %d hlen %d total len %d tso size %d\n",
9695 skb->len, hlen, skb_headlen(skb),
9696 skb_shinfo(skb)->gso_size);
9698 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9700 if (unlikely(skb_headlen(skb) > hlen))
9701 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9704 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9705 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9706 pbd->tcp_flags = pbd_tcp_flags(skb);
9708 if (xmit_type & XMIT_GSO_V4) {
9709 pbd->ip_id = swab16(ip_hdr(skb)->id);
9710 pbd->tcp_pseudo_csum =
9711 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9713 0, IPPROTO_TCP, 0));
9716 pbd->tcp_pseudo_csum =
9717 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9718 &ipv6_hdr(skb)->daddr,
9719 0, IPPROTO_TCP, 0));
9721 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9724 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9725 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9727 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9728 tx_bd = &fp->tx_desc_ring[bd_prod];
9730 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9731 frag->size, PCI_DMA_TODEVICE);
9733 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9734 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9735 tx_bd->nbytes = cpu_to_le16(frag->size);
9736 tx_bd->vlan = cpu_to_le16(pkt_prod);
9737 tx_bd->bd_flags.as_bitfield = 0;
9739 DP(NETIF_MSG_TX_QUEUED,
9740 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9741 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9742 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9745 /* now at last mark the BD as the last BD */
9746 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9748 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9749 tx_bd, tx_bd->bd_flags.as_bitfield);
9751 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9753 /* now send a tx doorbell, counting the next BD
9754 * if the packet contains or ends with it
9756 if (TX_BD_POFF(bd_prod) < nbd)
9760 DP(NETIF_MSG_TX_QUEUED,
9761 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9762 " tcp_flags %x xsum %x seq %u hlen %u\n",
9763 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9764 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9765 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9767 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9770 * Make sure that the BD data is updated before updating the producer
9771 * since FW might read the BD right after the producer is updated.
9772 * This is only applicable for weak-ordered memory model archs such
9773 * as IA-64. The following barrier is also mandatory since FW will
9774 * assumes packets must have BDs.
9778 fp->hw_tx_prods->bds_prod =
9779 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9780 mb(); /* FW restriction: must not reorder writing nbd and packets */
9781 fp->hw_tx_prods->packets_prod =
9782 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9783 DOORBELL(bp, FP_IDX(fp), 0);
9787 fp->tx_bd_prod += nbd;
9788 dev->trans_start = jiffies;
9790 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9791 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9792 if we put Tx into XOFF state. */
9794 netif_stop_queue(dev);
9795 bp->eth_stats.driver_xoff++;
9796 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9797 netif_wake_queue(dev);
9801 return NETDEV_TX_OK;
9804 /* called with rtnl_lock */
9805 static int bnx2x_open(struct net_device *dev)
9807 struct bnx2x *bp = netdev_priv(dev);
9809 bnx2x_set_power_state(bp, PCI_D0);
9811 return bnx2x_nic_load(bp, LOAD_OPEN);
9814 /* called with rtnl_lock */
9815 static int bnx2x_close(struct net_device *dev)
9817 struct bnx2x *bp = netdev_priv(dev);
9819 /* Unload the driver, release IRQs */
9820 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9821 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9822 if (!CHIP_REV_IS_SLOW(bp))
9823 bnx2x_set_power_state(bp, PCI_D3hot);
9828 /* called with netif_tx_lock from set_multicast */
9829 static void bnx2x_set_rx_mode(struct net_device *dev)
9831 struct bnx2x *bp = netdev_priv(dev);
9832 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9833 int port = BP_PORT(bp);
9835 if (bp->state != BNX2X_STATE_OPEN) {
9836 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9840 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9842 if (dev->flags & IFF_PROMISC)
9843 rx_mode = BNX2X_RX_MODE_PROMISC;
9845 else if ((dev->flags & IFF_ALLMULTI) ||
9846 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9847 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9849 else { /* some multicasts */
9850 if (CHIP_IS_E1(bp)) {
9852 struct dev_mc_list *mclist;
9853 struct mac_configuration_cmd *config =
9854 bnx2x_sp(bp, mcast_config);
9856 for (i = 0, mclist = dev->mc_list;
9857 mclist && (i < dev->mc_count);
9858 i++, mclist = mclist->next) {
9860 config->config_table[i].
9861 cam_entry.msb_mac_addr =
9862 swab16(*(u16 *)&mclist->dmi_addr[0]);
9863 config->config_table[i].
9864 cam_entry.middle_mac_addr =
9865 swab16(*(u16 *)&mclist->dmi_addr[2]);
9866 config->config_table[i].
9867 cam_entry.lsb_mac_addr =
9868 swab16(*(u16 *)&mclist->dmi_addr[4]);
9869 config->config_table[i].cam_entry.flags =
9871 config->config_table[i].
9872 target_table_entry.flags = 0;
9873 config->config_table[i].
9874 target_table_entry.client_id = 0;
9875 config->config_table[i].
9876 target_table_entry.vlan_id = 0;
9879 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9880 config->config_table[i].
9881 cam_entry.msb_mac_addr,
9882 config->config_table[i].
9883 cam_entry.middle_mac_addr,
9884 config->config_table[i].
9885 cam_entry.lsb_mac_addr);
9887 old = config->hdr.length_6b;
9889 for (; i < old; i++) {
9890 if (CAM_IS_INVALID(config->
9892 /* already invalidated */
9896 CAM_INVALIDATE(config->
9901 if (CHIP_REV_IS_SLOW(bp))
9902 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9904 offset = BNX2X_MAX_MULTICAST*(1 + port);
9906 config->hdr.length_6b = i;
9907 config->hdr.offset = offset;
9908 config->hdr.client_id = BP_CL_ID(bp);
9909 config->hdr.reserved1 = 0;
9911 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9912 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9913 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9916 /* Accept one or more multicasts */
9917 struct dev_mc_list *mclist;
9918 u32 mc_filter[MC_HASH_SIZE];
9919 u32 crc, bit, regidx;
9922 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9924 for (i = 0, mclist = dev->mc_list;
9925 mclist && (i < dev->mc_count);
9926 i++, mclist = mclist->next) {
9928 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9931 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9932 bit = (crc >> 24) & 0xff;
9935 mc_filter[regidx] |= (1 << bit);
9938 for (i = 0; i < MC_HASH_SIZE; i++)
9939 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9944 bp->rx_mode = rx_mode;
9945 bnx2x_set_storm_rx_mode(bp);
9948 /* called with rtnl_lock */
9949 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9951 struct sockaddr *addr = p;
9952 struct bnx2x *bp = netdev_priv(dev);
9954 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9957 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9958 if (netif_running(dev)) {
9960 bnx2x_set_mac_addr_e1(bp, 1);
9962 bnx2x_set_mac_addr_e1h(bp, 1);
9968 /* called with rtnl_lock */
9969 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9971 struct mii_ioctl_data *data = if_mii(ifr);
9972 struct bnx2x *bp = netdev_priv(dev);
9973 int port = BP_PORT(bp);
9978 data->phy_id = bp->port.phy_addr;
9985 if (!netif_running(dev))
9988 mutex_lock(&bp->port.phy_mutex);
9989 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9990 DEFAULT_PHY_DEV_ADDR,
9991 (data->reg_num & 0x1f), &mii_regval);
9992 data->val_out = mii_regval;
9993 mutex_unlock(&bp->port.phy_mutex);
9998 if (!capable(CAP_NET_ADMIN))
10001 if (!netif_running(dev))
10004 mutex_lock(&bp->port.phy_mutex);
10005 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10006 DEFAULT_PHY_DEV_ADDR,
10007 (data->reg_num & 0x1f), data->val_in);
10008 mutex_unlock(&bp->port.phy_mutex);
10016 return -EOPNOTSUPP;
10019 /* called with rtnl_lock */
10020 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10022 struct bnx2x *bp = netdev_priv(dev);
10025 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10026 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10029 /* This does not race with packet allocation
10030 * because the actual alloc size is
10031 * only updated as part of load
10033 dev->mtu = new_mtu;
10035 if (netif_running(dev)) {
10036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10037 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10043 static void bnx2x_tx_timeout(struct net_device *dev)
10045 struct bnx2x *bp = netdev_priv(dev);
10047 #ifdef BNX2X_STOP_ON_ERROR
10051 /* This allows the netif to be shutdown gracefully before resetting */
10052 schedule_work(&bp->reset_task);
10056 /* called with rtnl_lock */
10057 static void bnx2x_vlan_rx_register(struct net_device *dev,
10058 struct vlan_group *vlgrp)
10060 struct bnx2x *bp = netdev_priv(dev);
10064 /* Set flags according to the required capabilities */
10065 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10067 if (dev->features & NETIF_F_HW_VLAN_TX)
10068 bp->flags |= HW_VLAN_TX_FLAG;
10070 if (dev->features & NETIF_F_HW_VLAN_RX)
10071 bp->flags |= HW_VLAN_RX_FLAG;
10073 if (netif_running(dev))
10074 bnx2x_set_client_config(bp);
10079 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10080 static void poll_bnx2x(struct net_device *dev)
10082 struct bnx2x *bp = netdev_priv(dev);
10084 disable_irq(bp->pdev->irq);
10085 bnx2x_interrupt(bp->pdev->irq, dev);
10086 enable_irq(bp->pdev->irq);
10090 static const struct net_device_ops bnx2x_netdev_ops = {
10091 .ndo_open = bnx2x_open,
10092 .ndo_stop = bnx2x_close,
10093 .ndo_start_xmit = bnx2x_start_xmit,
10094 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10095 .ndo_set_mac_address = bnx2x_change_mac_addr,
10096 .ndo_validate_addr = eth_validate_addr,
10097 .ndo_do_ioctl = bnx2x_ioctl,
10098 .ndo_change_mtu = bnx2x_change_mtu,
10099 .ndo_tx_timeout = bnx2x_tx_timeout,
10101 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10103 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10104 .ndo_poll_controller = poll_bnx2x,
10109 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10110 struct net_device *dev)
10115 SET_NETDEV_DEV(dev, &pdev->dev);
10116 bp = netdev_priv(dev);
10121 bp->func = PCI_FUNC(pdev->devfn);
10123 rc = pci_enable_device(pdev);
10125 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10129 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10130 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10133 goto err_out_disable;
10136 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10137 printk(KERN_ERR PFX "Cannot find second PCI device"
10138 " base address, aborting\n");
10140 goto err_out_disable;
10143 if (atomic_read(&pdev->enable_cnt) == 1) {
10144 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10146 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10148 goto err_out_disable;
10151 pci_set_master(pdev);
10152 pci_save_state(pdev);
10155 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10156 if (bp->pm_cap == 0) {
10157 printk(KERN_ERR PFX "Cannot find power management"
10158 " capability, aborting\n");
10160 goto err_out_release;
10163 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10164 if (bp->pcie_cap == 0) {
10165 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10168 goto err_out_release;
10171 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10172 bp->flags |= USING_DAC_FLAG;
10173 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10174 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10175 " failed, aborting\n");
10177 goto err_out_release;
10180 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10181 printk(KERN_ERR PFX "System does not support DMA,"
10184 goto err_out_release;
10187 dev->mem_start = pci_resource_start(pdev, 0);
10188 dev->base_addr = dev->mem_start;
10189 dev->mem_end = pci_resource_end(pdev, 0);
10191 dev->irq = pdev->irq;
10193 bp->regview = pci_ioremap_bar(pdev, 0);
10194 if (!bp->regview) {
10195 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10197 goto err_out_release;
10200 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10201 min_t(u64, BNX2X_DB_SIZE,
10202 pci_resource_len(pdev, 2)));
10203 if (!bp->doorbells) {
10204 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10206 goto err_out_unmap;
10209 bnx2x_set_power_state(bp, PCI_D0);
10211 /* clean indirect addresses */
10212 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10213 PCICFG_VENDOR_ID_OFFSET);
10214 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10215 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10216 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10217 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10219 dev->watchdog_timeo = TX_TIMEOUT;
10221 dev->netdev_ops = &bnx2x_netdev_ops;
10222 dev->ethtool_ops = &bnx2x_ethtool_ops;
10223 dev->features |= NETIF_F_SG;
10224 dev->features |= NETIF_F_HW_CSUM;
10225 if (bp->flags & USING_DAC_FLAG)
10226 dev->features |= NETIF_F_HIGHDMA;
10228 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10229 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10231 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10232 dev->features |= NETIF_F_TSO6;
10238 iounmap(bp->regview);
10239 bp->regview = NULL;
10241 if (bp->doorbells) {
10242 iounmap(bp->doorbells);
10243 bp->doorbells = NULL;
10247 if (atomic_read(&pdev->enable_cnt) == 1)
10248 pci_release_regions(pdev);
10251 pci_disable_device(pdev);
10252 pci_set_drvdata(pdev, NULL);
10258 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10260 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10262 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10266 /* return value of 1=2.5GHz 2=5GHz */
10267 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10269 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10271 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10275 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10276 const struct pci_device_id *ent)
10278 static int version_printed;
10279 struct net_device *dev = NULL;
10283 if (version_printed++ == 0)
10284 printk(KERN_INFO "%s", version);
10286 /* dev zeroed in init_etherdev */
10287 dev = alloc_etherdev(sizeof(*bp));
10289 printk(KERN_ERR PFX "Cannot allocate net device\n");
10293 bp = netdev_priv(dev);
10294 bp->msglevel = debug;
10296 rc = bnx2x_init_dev(pdev, dev);
10302 pci_set_drvdata(pdev, dev);
10304 rc = bnx2x_init_bp(bp);
10306 goto init_one_exit;
10308 rc = register_netdev(dev);
10310 dev_err(&pdev->dev, "Cannot register net device\n");
10311 goto init_one_exit;
10314 netif_carrier_off(dev);
10316 bp->common.name = board_info[ent->driver_data].name;
10317 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10318 " IRQ %d, ", dev->name, bp->common.name,
10319 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10320 bnx2x_get_pcie_width(bp),
10321 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10322 dev->base_addr, bp->pdev->irq);
10323 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10328 iounmap(bp->regview);
10331 iounmap(bp->doorbells);
10335 if (atomic_read(&pdev->enable_cnt) == 1)
10336 pci_release_regions(pdev);
10338 pci_disable_device(pdev);
10339 pci_set_drvdata(pdev, NULL);
10344 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10346 struct net_device *dev = pci_get_drvdata(pdev);
10350 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10353 bp = netdev_priv(dev);
10355 unregister_netdev(dev);
10358 iounmap(bp->regview);
10361 iounmap(bp->doorbells);
10365 if (atomic_read(&pdev->enable_cnt) == 1)
10366 pci_release_regions(pdev);
10368 pci_disable_device(pdev);
10369 pci_set_drvdata(pdev, NULL);
10372 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10374 struct net_device *dev = pci_get_drvdata(pdev);
10378 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10381 bp = netdev_priv(dev);
10385 pci_save_state(pdev);
10387 if (!netif_running(dev)) {
10392 netif_device_detach(dev);
10394 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10396 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10403 static int bnx2x_resume(struct pci_dev *pdev)
10405 struct net_device *dev = pci_get_drvdata(pdev);
10410 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10413 bp = netdev_priv(dev);
10417 pci_restore_state(pdev);
10419 if (!netif_running(dev)) {
10424 bnx2x_set_power_state(bp, PCI_D0);
10425 netif_device_attach(dev);
10427 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10434 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10438 bp->state = BNX2X_STATE_ERROR;
10440 bp->rx_mode = BNX2X_RX_MODE_NONE;
10442 bnx2x_netif_stop(bp, 0);
10444 del_timer_sync(&bp->timer);
10445 bp->stats_state = STATS_STATE_DISABLED;
10446 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10449 bnx2x_free_irq(bp);
10451 if (CHIP_IS_E1(bp)) {
10452 struct mac_configuration_cmd *config =
10453 bnx2x_sp(bp, mcast_config);
10455 for (i = 0; i < config->hdr.length_6b; i++)
10456 CAM_INVALIDATE(config->config_table[i]);
10459 /* Free SKBs, SGEs, TPA pool and driver internals */
10460 bnx2x_free_skbs(bp);
10461 for_each_queue(bp, i)
10462 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10463 bnx2x_free_mem(bp);
10465 bp->state = BNX2X_STATE_CLOSED;
10467 netif_carrier_off(bp->dev);
10472 static void bnx2x_eeh_recover(struct bnx2x *bp)
10476 mutex_init(&bp->port.phy_mutex);
10478 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10479 bp->link_params.shmem_base = bp->common.shmem_base;
10480 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10482 if (!bp->common.shmem_base ||
10483 (bp->common.shmem_base < 0xA0000) ||
10484 (bp->common.shmem_base >= 0xC0000)) {
10485 BNX2X_DEV_INFO("MCP not active\n");
10486 bp->flags |= NO_MCP_FLAG;
10490 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10491 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10492 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10493 BNX2X_ERR("BAD MCP validity signature\n");
10495 if (!BP_NOMCP(bp)) {
10496 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10497 & DRV_MSG_SEQ_NUMBER_MASK);
10498 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10503 * bnx2x_io_error_detected - called when PCI error is detected
10504 * @pdev: Pointer to PCI device
10505 * @state: The current pci connection state
10507 * This function is called after a PCI bus error affecting
10508 * this device has been detected.
10510 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10511 pci_channel_state_t state)
10513 struct net_device *dev = pci_get_drvdata(pdev);
10514 struct bnx2x *bp = netdev_priv(dev);
10518 netif_device_detach(dev);
10520 if (netif_running(dev))
10521 bnx2x_eeh_nic_unload(bp);
10523 pci_disable_device(pdev);
10527 /* Request a slot reset */
10528 return PCI_ERS_RESULT_NEED_RESET;
10532 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10533 * @pdev: Pointer to PCI device
10535 * Restart the card from scratch, as if from a cold-boot.
10537 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10539 struct net_device *dev = pci_get_drvdata(pdev);
10540 struct bnx2x *bp = netdev_priv(dev);
10544 if (pci_enable_device(pdev)) {
10545 dev_err(&pdev->dev,
10546 "Cannot re-enable PCI device after reset\n");
10548 return PCI_ERS_RESULT_DISCONNECT;
10551 pci_set_master(pdev);
10552 pci_restore_state(pdev);
10554 if (netif_running(dev))
10555 bnx2x_set_power_state(bp, PCI_D0);
10559 return PCI_ERS_RESULT_RECOVERED;
10563 * bnx2x_io_resume - called when traffic can start flowing again
10564 * @pdev: Pointer to PCI device
10566 * This callback is called when the error recovery driver tells us that
10567 * its OK to resume normal operation.
10569 static void bnx2x_io_resume(struct pci_dev *pdev)
10571 struct net_device *dev = pci_get_drvdata(pdev);
10572 struct bnx2x *bp = netdev_priv(dev);
10576 bnx2x_eeh_recover(bp);
10578 if (netif_running(dev))
10579 bnx2x_nic_load(bp, LOAD_NORMAL);
10581 netif_device_attach(dev);
10586 static struct pci_error_handlers bnx2x_err_handler = {
10587 .error_detected = bnx2x_io_error_detected,
10588 .slot_reset = bnx2x_io_slot_reset,
10589 .resume = bnx2x_io_resume,
10592 static struct pci_driver bnx2x_pci_driver = {
10593 .name = DRV_MODULE_NAME,
10594 .id_table = bnx2x_pci_tbl,
10595 .probe = bnx2x_init_one,
10596 .remove = __devexit_p(bnx2x_remove_one),
10597 .suspend = bnx2x_suspend,
10598 .resume = bnx2x_resume,
10599 .err_handler = &bnx2x_err_handler,
10602 static int __init bnx2x_init(void)
10604 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10605 if (bnx2x_wq == NULL) {
10606 printk(KERN_ERR PFX "Cannot create workqueue\n");
10610 return pci_register_driver(&bnx2x_pci_driver);
10613 static void __exit bnx2x_cleanup(void)
10615 pci_unregister_driver(&bnx2x_pci_driver);
10617 destroy_workqueue(bnx2x_wq);
10620 module_init(bnx2x_init);
10621 module_exit(bnx2x_cleanup);