1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.23"
61 #define DRV_MODULE_RELDATE "2008/11/03"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 /* enable nig attention */
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_buf->first_bd = 0;
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
818 #ifdef BNX2X_STOP_ON_ERROR
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 return (s16)(fp->bp->tx_ring_size) - used;
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
841 while (sw_cons != hw_cons) {
844 pkt_cons = TX_BD(sw_cons);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
877 netif_tx_lock(bp->dev);
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
884 netif_tx_unlock(bp->dev);
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
909 fp->state = BNX2X_FP_STATE_OPEN;
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
915 fp->state = BNX2X_FP_STATE_HALTED;
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948 bp->set_mac_pending = 0;
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
970 /* Skip "next page" elements */
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 if (unlikely(page == NULL))
1003 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032 PCI_DMA_FROMDEVICE);
1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 last_max = fp->last_max_sge;
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1102 u16 last_max, last_elem, first_elem;
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1196 fp->tpa_queue_used);
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1214 /* This is needed in order to enable forwarding support */
1216 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1219 #ifdef BNX2X_STOP_ON_ERROR
1221 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
1245 bp->eth_stats.rx_skb_alloc_failed++;
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1260 frag_size -= frag_len;
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1286 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287 PARSING_FLAGS_VLAN);
1288 int is_not_hwaccel_vlan_cqe =
1289 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1293 prefetch(((char *)(skb)) + 128);
1295 #ifdef BNX2X_STOP_ON_ERROR
1296 if (pad + len > bp->rx_buf_size) {
1297 BNX2X_ERR("skb_put is about to fail... "
1298 "pad %d len %d rx_buf_size %d\n",
1299 pad, len, bp->rx_buf_size);
1305 skb_reserve(skb, pad);
1308 skb->protocol = eth_type_trans(skb, bp->dev);
1309 skb->ip_summed = CHECKSUM_UNNECESSARY;
1314 iph = (struct iphdr *)skb->data;
1316 /* If there is no Rx VLAN offloading -
1317 take VLAN tag into an account */
1318 if (unlikely(is_not_hwaccel_vlan_cqe))
1319 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1322 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1325 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326 &cqe->fast_path_cqe, cqe_idx)) {
1328 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329 (!is_not_hwaccel_vlan_cqe))
1330 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331 le16_to_cpu(cqe->fast_path_cqe.
1335 netif_receive_skb(skb);
1337 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338 " - dropping packet!\n");
1343 /* put new skb in bin */
1344 fp->tpa_pool[queue].skb = new_skb;
1347 /* else drop the packet and keep the buffer in the bin */
1348 DP(NETIF_MSG_RX_STATUS,
1349 "Failed to allocate new skb - dropping packet!\n");
1350 bp->eth_stats.rx_skb_alloc_failed++;
1353 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357 struct bnx2x_fastpath *fp,
1358 u16 bd_prod, u16 rx_comp_prod,
1361 struct tstorm_eth_rx_producers rx_prods = {0};
1364 /* Update producers */
1365 rx_prods.bd_prod = bd_prod;
1366 rx_prods.cqe_prod = rx_comp_prod;
1367 rx_prods.sge_prod = rx_sge_prod;
1370 * Make sure that the BD and SGE data is updated before updating the
1371 * producers since FW might read the BD/SGE right after the producer
1373 * This is only applicable for weak-ordered memory model archs such
1374 * as IA-64. The following barrier is also mandatory since FW will
1375 * assumes BDs must have buffers.
1379 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382 ((u32 *)&rx_prods)[i]);
1384 mmiowb(); /* keep prod updates ordered */
1386 DP(NETIF_MSG_RX_STATUS,
1387 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1388 bd_prod, rx_comp_prod, rx_sge_prod);
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1393 struct bnx2x *bp = fp->bp;
1394 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1398 #ifdef BNX2X_STOP_ON_ERROR
1399 if (unlikely(bp->panic))
1403 /* CQ "next element" is of the size of the regular element,
1404 that's why it's ok here */
1405 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1409 bd_cons = fp->rx_bd_cons;
1410 bd_prod = fp->rx_bd_prod;
1411 bd_prod_fw = bd_prod;
1412 sw_comp_cons = fp->rx_comp_cons;
1413 sw_comp_prod = fp->rx_comp_prod;
1415 /* Memory barrier necessary as speculative reads of the rx
1416 * buffer can be ahead of the index in the status block
1420 DP(NETIF_MSG_RX_STATUS,
1421 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1422 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1424 while (sw_comp_cons != hw_comp_cons) {
1425 struct sw_rx_bd *rx_buf = NULL;
1426 struct sk_buff *skb;
1427 union eth_rx_cqe *cqe;
1431 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432 bd_prod = RX_BD(bd_prod);
1433 bd_cons = RX_BD(bd_cons);
1435 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1438 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1439 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1440 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1445 /* is this a slowpath msg? */
1446 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447 bnx2x_sp_event(fp, cqe);
1450 /* this is an rx packet */
1452 rx_buf = &fp->rx_buf_ring[bd_cons];
1454 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455 pad = cqe->fast_path_cqe.placement_offset;
1457 /* If CQE is marked both TPA_START and TPA_END
1458 it is a non-TPA CQE */
1459 if ((!fp->disable_tpa) &&
1460 (TPA_TYPE(cqe_fp_flags) !=
1461 (TPA_TYPE_START | TPA_TYPE_END))) {
1462 u16 queue = cqe->fast_path_cqe.queue_index;
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_start on queue %d\n",
1469 bnx2x_tpa_start(fp, queue, skb,
1474 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475 DP(NETIF_MSG_RX_STATUS,
1476 "calling tpa_stop on queue %d\n",
1479 if (!BNX2X_RX_SUM_FIX(cqe))
1480 BNX2X_ERR("STOP on none TCP "
1483 /* This is a size of the linear data
1485 len = le16_to_cpu(cqe->fast_path_cqe.
1487 bnx2x_tpa_stop(bp, fp, queue, pad,
1488 len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1494 bnx2x_update_sge_prod(fp,
1495 &cqe->fast_path_cqe);
1500 pci_dma_sync_single_for_device(bp->pdev,
1501 pci_unmap_addr(rx_buf, mapping),
1502 pad + RX_COPY_THRESH,
1503 PCI_DMA_FROMDEVICE);
1505 prefetch(((char *)(skb)) + 128);
1507 /* is this an error packet? */
1508 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509 DP(NETIF_MSG_RX_ERR,
1510 "ERROR flags %x rx packet %u\n",
1511 cqe_fp_flags, sw_comp_cons);
1512 bp->eth_stats.rx_err_discard_pkt++;
1516 /* Since we don't have a jumbo ring
1517 * copy small packets if mtu > 1500
1519 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520 (len <= RX_COPY_THRESH)) {
1521 struct sk_buff *new_skb;
1523 new_skb = netdev_alloc_skb(bp->dev,
1525 if (new_skb == NULL) {
1526 DP(NETIF_MSG_RX_ERR,
1527 "ERROR packet dropped "
1528 "because of alloc failure\n");
1529 bp->eth_stats.rx_skb_alloc_failed++;
1534 skb_copy_from_linear_data_offset(skb, pad,
1535 new_skb->data + pad, len);
1536 skb_reserve(new_skb, pad);
1537 skb_put(new_skb, len);
1539 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1543 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544 pci_unmap_single(bp->pdev,
1545 pci_unmap_addr(rx_buf, mapping),
1547 PCI_DMA_FROMDEVICE);
1548 skb_reserve(skb, pad);
1552 DP(NETIF_MSG_RX_ERR,
1553 "ERROR packet dropped because "
1554 "of alloc failure\n");
1555 bp->eth_stats.rx_skb_alloc_failed++;
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1561 skb->protocol = eth_type_trans(skb, bp->dev);
1563 skb->ip_summed = CHECKSUM_NONE;
1565 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
1568 bp->eth_stats.hw_csum_err++;
1573 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575 PARSING_FLAGS_VLAN))
1576 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1580 netif_receive_skb(skb);
1586 bd_cons = NEXT_RX_IDX(bd_cons);
1587 bd_prod = NEXT_RX_IDX(bd_prod);
1588 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1591 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1594 if (rx_pkt == budget)
1598 fp->rx_bd_cons = bd_cons;
1599 fp->rx_bd_prod = bd_prod_fw;
1600 fp->rx_comp_cons = sw_comp_cons;
1601 fp->rx_comp_prod = sw_comp_prod;
1603 /* Update producers */
1604 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1607 fp->rx_pkt += rx_pkt;
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 int index = FP_IDX(fp);
1619 /* Return here if interrupt is disabled */
1620 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1625 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626 index, FP_SB_ID(fp));
1627 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1629 #ifdef BNX2X_STOP_ON_ERROR
1630 if (unlikely(bp->panic))
1634 prefetch(fp->rx_cons_sb);
1635 prefetch(fp->tx_cons_sb);
1636 prefetch(&fp->status_blk->c_status_block.status_block_index);
1637 prefetch(&fp->status_blk->u_status_block.status_block_index);
1639 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1646 struct net_device *dev = dev_instance;
1647 struct bnx2x *bp = netdev_priv(dev);
1648 u16 status = bnx2x_ack_int(bp);
1651 /* Return here if interrupt is shared and it's not for us */
1652 if (unlikely(status == 0)) {
1653 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1656 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1658 /* Return here if interrupt is disabled */
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1664 #ifdef BNX2X_STOP_ON_ERROR
1665 if (unlikely(bp->panic))
1669 mask = 0x2 << bp->fp[0].sb_id;
1670 if (status & mask) {
1671 struct bnx2x_fastpath *fp = &bp->fp[0];
1673 prefetch(fp->rx_cons_sb);
1674 prefetch(fp->tx_cons_sb);
1675 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676 prefetch(&fp->status_blk->u_status_block.status_block_index);
1678 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1684 if (unlikely(status & 0x1)) {
1685 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1693 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1699 /* end of fast path */
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1706 * General service functions
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1712 u32 resource_bit = (1 << resource);
1713 int func = BP_FUNC(bp);
1714 u32 hw_lock_control_reg;
1717 /* Validating that the resource is within range */
1718 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1720 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1726 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1728 hw_lock_control_reg =
1729 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1732 /* Validating that the resource is not already taken */
1733 lock_status = REG_RD(bp, hw_lock_control_reg);
1734 if (lock_status & resource_bit) {
1735 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1736 lock_status, resource_bit);
1740 /* Try for 5 second every 5ms */
1741 for (cnt = 0; cnt < 1000; cnt++) {
1742 /* Try to acquire the lock */
1743 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
1745 if (lock_status & resource_bit)
1750 DP(NETIF_MSG_HW, "Timeout\n");
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1757 u32 resource_bit = (1 << resource);
1758 int func = BP_FUNC(bp);
1759 u32 hw_lock_control_reg;
1761 /* Validating that the resource is within range */
1762 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1764 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1770 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1772 hw_lock_control_reg =
1773 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1776 /* Validating that the resource is currently taken */
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
1778 if (!(lock_status & resource_bit)) {
1779 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1780 lock_status, resource_bit);
1784 REG_WR(bp, hw_lock_control_reg, resource_bit);
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1791 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1793 mutex_lock(&bp->port.phy_mutex);
1795 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1804 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1808 mutex_unlock(&bp->port.phy_mutex);
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1813 /* The GPIO should be swapped if swap register is set and active */
1814 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816 int gpio_shift = gpio_num +
1817 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818 u32 gpio_mask = (1 << gpio_shift);
1821 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827 /* read GPIO and mask except the float bits */
1828 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1831 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833 gpio_num, gpio_shift);
1834 /* clear FLOAT and set CLR */
1835 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1839 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841 gpio_num, gpio_shift);
1842 /* clear FLOAT and set SET */
1843 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1847 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849 gpio_num, gpio_shift);
1851 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1858 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1866 u32 spio_mask = (1 << spio_num);
1869 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870 (spio_num > MISC_REGISTERS_SPIO_7)) {
1871 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876 /* read SPIO and mask except the float bits */
1877 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1880 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882 /* clear FLOAT and set CLR */
1883 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1887 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889 /* clear FLOAT and set SET */
1890 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1894 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1897 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1904 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1912 switch (bp->link_vars.ieee_fc &
1913 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1918 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1922 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923 bp->port.advertising |= ADVERTISED_Asym_Pause;
1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1932 static void bnx2x_link_report(struct bnx2x *bp)
1934 if (bp->link_vars.link_up) {
1935 if (bp->state == BNX2X_STATE_OPEN)
1936 netif_carrier_on(bp->dev);
1937 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1939 printk("%d Mbps ", bp->link_vars.line_speed);
1941 if (bp->link_vars.duplex == DUPLEX_FULL)
1942 printk("full duplex");
1944 printk("half duplex");
1946 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948 printk(", receive ");
1949 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950 printk("& transmit ");
1952 printk(", transmit ");
1954 printk("flow control ON");
1958 } else { /* link_down */
1959 netif_carrier_off(bp->dev);
1960 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1966 if (!BP_NOMCP(bp)) {
1969 /* Initialize link parameters structure variables */
1970 /* It is recommended to turn off RX FC for jumbo frames
1971 for better performance */
1973 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974 else if (bp->dev->mtu > 5000)
1975 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1977 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1979 bnx2x_acquire_phy_lock(bp);
1980 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981 bnx2x_release_phy_lock(bp);
1983 bnx2x_calc_fc_adv(bp);
1985 if (bp->link_vars.link_up)
1986 bnx2x_link_report(bp);
1991 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1995 static void bnx2x_link_set(struct bnx2x *bp)
1997 if (!BP_NOMCP(bp)) {
1998 bnx2x_acquire_phy_lock(bp);
1999 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000 bnx2x_release_phy_lock(bp);
2002 bnx2x_calc_fc_adv(bp);
2004 BNX2X_ERR("Bootcode is missing -not setting link\n");
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2009 if (!BP_NOMCP(bp)) {
2010 bnx2x_acquire_phy_lock(bp);
2011 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012 bnx2x_release_phy_lock(bp);
2014 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2021 bnx2x_acquire_phy_lock(bp);
2022 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023 bnx2x_release_phy_lock(bp);
2028 /* Calculates the sum of vn_min_rates.
2029 It's needed for further normalizing of the min_rates.
2034 0 - if all the min_rates are 0.
2035 In the later case fairness algorithm should be deactivated.
2036 If not all min_rates are zero then those that are zeroes will
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2041 int i, port = BP_PORT(bp);
2045 for (i = 0; i < E1HVN_MAX; i++) {
2047 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051 /* If min rate is zero - set it to 1 */
2053 vn_min_rate = DEF_MIN_RATE;
2057 wsum += vn_min_rate;
2061 /* ... only if all min rates are zeros - disable FAIRNESS */
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2071 struct cmng_struct_per_port *m_cmng_port)
2073 u32 r_param = port_rate / 8;
2074 int port = BP_PORT(bp);
2077 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2079 /* Enable minmax only if we are in e1hmf mode */
2081 u32 fair_periodic_timeout_usec;
2084 /* Enable rate shaping and fairness */
2085 m_cmng_port->flags.cmng_vn_enable = 1;
2086 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087 m_cmng_port->flags.rate_shaping_enable = 1;
2090 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091 " fairness will be disabled\n");
2093 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094 m_cmng_port->rs_vars.rs_periodic_timeout =
2095 RS_PERIODIC_TIMEOUT_USEC / 4;
2097 /* this is the threshold below which no timer arming will occur
2098 1.25 coefficient is for the threshold to be a little bigger
2099 than the real time, to compensate for timer in-accuracy */
2100 m_cmng_port->rs_vars.rs_threshold =
2101 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2103 /* resolution of fairness timer */
2104 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106 t_fair = T_FAIR_COEF / port_rate;
2108 /* this is the threshold below which we won't arm
2109 the timer anymore */
2110 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2112 /* we multiply by 1e3/8 to get bytes/msec.
2113 We don't want the credits to pass a credit
2114 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115 m_cmng_port->fair_vars.upper_bound =
2116 r_param * t_fair * FAIR_MEM;
2117 /* since each tick is 4 usec */
2118 m_cmng_port->fair_vars.fairness_timeout =
2119 fair_periodic_timeout_usec / 4;
2122 /* Disable rate shaping and fairness */
2123 m_cmng_port->flags.cmng_vn_enable = 0;
2124 m_cmng_port->flags.fairness_enable = 0;
2125 m_cmng_port->flags.rate_shaping_enable = 0;
2128 "Single function mode minmax will be disabled\n");
2131 /* Store it to internal memory */
2132 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135 ((u32 *)(m_cmng_port))[i]);
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139 u32 wsum, u16 port_rate,
2140 struct cmng_struct_per_port *m_cmng_port)
2142 struct rate_shaping_vars_per_vn m_rs_vn;
2143 struct fairness_vars_per_vn m_fair_vn;
2144 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145 u16 vn_min_rate, vn_max_rate;
2148 /* If function is hidden - set min and max to zeroes */
2149 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2154 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157 if current min rate is zero - set it to 1.
2158 This is a requirement of the algorithm. */
2159 if ((vn_min_rate == 0) && wsum)
2160 vn_min_rate = DEF_MIN_RATE;
2161 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2165 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2166 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2168 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2171 /* global vn counter - maximal Mbps for this vn */
2172 m_rs_vn.vn_counter.rate = vn_max_rate;
2174 /* quota - number of bytes transmitted in this period */
2175 m_rs_vn.vn_counter.quota =
2176 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2178 #ifdef BNX2X_PER_PROT_QOS
2179 /* per protocol counter */
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181 /* maximal Mbps for this protocol */
2182 m_rs_vn.protocol_counters[protocol].rate =
2183 protocol_max_rate[protocol];
2184 /* the quota in each timer period -
2185 number of bytes transmitted in this period */
2186 m_rs_vn.protocol_counters[protocol].quota =
2187 (u32)(rs_periodic_timeout_usec *
2189 protocol_counters[protocol].rate/8));
2194 /* credit for each period of the fairness algorithm:
2195 number of bytes in T_FAIR (the vn share the port rate).
2196 wsum should not be larger than 10000, thus
2197 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198 m_fair_vn.vn_credit_delta =
2199 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202 m_fair_vn.vn_credit_delta);
2205 #ifdef BNX2X_PER_PROT_QOS
2207 u32 protocolWeightSum = 0;
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210 protocolWeightSum +=
2211 drvInit.protocol_min_rate[protocol];
2212 /* per protocol counter -
2213 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214 if (protocolWeightSum > 0) {
2216 protocol < NUM_OF_PROTOCOLS; protocol++)
2217 /* credit for each period of the
2218 fairness algorithm - number of bytes in
2219 T_FAIR (the protocol share the vn rate) */
2220 m_fair_vn.protocol_credit_delta[protocol] =
2221 (u32)((vn_min_rate / 8) * t_fair *
2222 protocol_min_rate / protocolWeightSum);
2227 /* Store it to internal memory */
2228 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231 ((u32 *)(&m_rs_vn))[i]);
2233 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236 ((u32 *)(&m_fair_vn))[i]);
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2244 /* Make sure that we are synced with the current statistics */
2245 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2247 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2249 if (bp->link_vars.link_up) {
2251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252 struct host_port_stats *pstats;
2254 pstats = bnx2x_sp(bp, port_stats);
2255 /* reset old bmac stats */
2256 memset(&(pstats->mac_stx[0]), 0,
2257 sizeof(struct mac_stx));
2259 if ((bp->state == BNX2X_STATE_OPEN) ||
2260 (bp->state == BNX2X_STATE_DISABLED))
2261 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2264 /* indicate link status */
2265 bnx2x_link_report(bp);
2270 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271 if (vn == BP_E1HVN(bp))
2274 func = ((vn << 1) | BP_PORT(bp));
2276 /* Set the attention towards other drivers
2278 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2283 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284 struct cmng_struct_per_port m_cmng_port;
2286 int port = BP_PORT(bp);
2288 /* Init RATE SHAPING and FAIRNESS contexts */
2289 wsum = bnx2x_calc_vn_wsum(bp);
2290 bnx2x_init_port_minmax(bp, (int)wsum,
2291 bp->link_vars.line_speed,
2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296 wsum, bp->link_vars.line_speed,
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2303 if (bp->state != BNX2X_STATE_OPEN)
2306 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2308 if (bp->link_vars.link_up)
2309 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2311 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313 /* indicate link status */
2314 bnx2x_link_report(bp);
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2319 int port = BP_PORT(bp);
2323 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325 /* enable nig attention */
2326 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2330 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2338 * General service functions
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343 u32 data_hi, u32 data_lo, int common)
2345 int func = BP_FUNC(bp);
2347 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2349 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353 #ifdef BNX2X_STOP_ON_ERROR
2354 if (unlikely(bp->panic))
2358 spin_lock_bh(&bp->spq_lock);
2360 if (!bp->spq_left) {
2361 BNX2X_ERR("BUG! SPQ ring full!\n");
2362 spin_unlock_bh(&bp->spq_lock);
2367 /* CID needs port number to be encoded int it */
2368 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2371 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2373 bp->spq_prod_bd->hdr.type |=
2374 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2381 if (bp->spq_prod_bd == bp->spq_last_bd) {
2382 bp->spq_prod_bd = bp->spq;
2383 bp->spq_prod_idx = 0;
2384 DP(NETIF_MSG_TIMER, "end of spq\n");
2391 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2394 spin_unlock_bh(&bp->spq_lock);
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2406 for (j = 0; j < i*10; j++) {
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410 if (val & (1L << 31))
2415 if (!(val & (1L << 31))) {
2416 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2428 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433 struct host_def_status_block *def_sb = bp->def_status_blk;
2436 barrier(); /* status block is written to by the chip */
2437 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2441 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2445 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2449 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2453 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2461 * slow path service functions
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466 int port = BP_PORT(bp);
2467 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468 COMMAND_REG_ATTN_BITS_SET);
2469 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472 NIG_REG_MASK_INTERRUPT_PORT0;
2475 if (bp->attn_state & asserted)
2476 BNX2X_ERR("IGU ERROR\n");
2478 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479 aeu_mask = REG_RD(bp, aeu_addr);
2481 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2482 aeu_mask, asserted);
2483 aeu_mask &= ~(asserted & 0xff);
2484 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2486 REG_WR(bp, aeu_addr, aeu_mask);
2487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2489 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490 bp->attn_state |= asserted;
2491 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2493 if (asserted & ATTN_HARD_WIRED_MASK) {
2494 if (asserted & ATTN_NIG_FOR_FUNC) {
2496 bnx2x_acquire_phy_lock(bp);
2498 /* save nig interrupt mask */
2499 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500 REG_WR(bp, nig_int_mask_addr, 0);
2502 bnx2x_link_attn(bp);
2504 /* handle unicore attn? */
2506 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2509 if (asserted & GPIO_2_FUNC)
2510 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2512 if (asserted & GPIO_3_FUNC)
2513 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2515 if (asserted & GPIO_4_FUNC)
2516 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2519 if (asserted & ATTN_GENERAL_ATTN_1) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2523 if (asserted & ATTN_GENERAL_ATTN_2) {
2524 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2527 if (asserted & ATTN_GENERAL_ATTN_3) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2532 if (asserted & ATTN_GENERAL_ATTN_4) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2536 if (asserted & ATTN_GENERAL_ATTN_5) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2540 if (asserted & ATTN_GENERAL_ATTN_6) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2546 } /* if hardwired */
2548 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2550 REG_WR(bp, hc_addr, asserted);
2552 /* now set back the mask */
2553 if (asserted & ATTN_NIG_FOR_FUNC) {
2554 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555 bnx2x_release_phy_lock(bp);
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2561 int port = BP_PORT(bp);
2565 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2568 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2570 val = REG_RD(bp, reg_offset);
2571 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572 REG_WR(bp, reg_offset, val);
2574 BNX2X_ERR("SPIO5 hw attention\n");
2576 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579 /* Fan failure attention */
2581 /* The PHY reset is controlled by GPIO 1 */
2582 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584 /* Low power mode is controlled by GPIO 2 */
2585 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587 /* mark the failure */
2588 bp->link_params.ext_phy_config &=
2589 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590 bp->link_params.ext_phy_config |=
2591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2593 dev_info.port_hw_config[port].
2594 external_phy_config,
2595 bp->link_params.ext_phy_config);
2596 /* log the failure */
2597 printk(KERN_ERR PFX "Fan Failure on Network"
2598 " Controller %s has caused the driver to"
2599 " shutdown the card to prevent permanent"
2600 " damage. Please contact Dell Support for"
2601 " assistance\n", bp->dev->name);
2609 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2611 val = REG_RD(bp, reg_offset);
2612 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613 REG_WR(bp, reg_offset, val);
2615 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616 (attn & HW_INTERRUT_ASSERT_SET_0));
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2625 if (attn & BNX2X_DOORQ_ASSERT) {
2627 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629 /* DORQ discard attention */
2631 BNX2X_ERR("FATAL error from DORQ\n");
2634 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2636 int port = BP_PORT(bp);
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644 REG_WR(bp, reg_offset, val);
2646 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_1));
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2656 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2658 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660 /* CFC error attention */
2662 BNX2X_ERR("FATAL error from CFC\n");
2665 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2667 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669 /* RQ_USDMDP_FIFO_OVERFLOW */
2671 BNX2X_ERR("FATAL error from PXP\n");
2674 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2676 int port = BP_PORT(bp);
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684 REG_WR(bp, reg_offset, val);
2686 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_2));
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2696 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2698 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699 int func = BP_FUNC(bp);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702 bnx2x__link_status_update(bp);
2703 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2705 bnx2x_pmf_update(bp);
2707 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2709 BNX2X_ERR("MC assert!\n");
2710 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2716 } else if (attn & BNX2X_MCP_ASSERT) {
2718 BNX2X_ERR("MCP assert!\n");
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2723 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2726 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728 if (attn & BNX2X_GRC_TIMEOUT) {
2729 val = CHIP_IS_E1H(bp) ?
2730 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2733 if (attn & BNX2X_GRC_RSV) {
2734 val = CHIP_IS_E1H(bp) ?
2735 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2738 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2744 struct attn_route attn;
2745 struct attn_route group_mask;
2746 int port = BP_PORT(bp);
2752 /* need to take HW lock because MCP or other port might also
2753 try to handle this event */
2754 bnx2x_acquire_alr(bp);
2756 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2763 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764 if (deasserted & (1 << index)) {
2765 group_mask = bp->attn_group[index];
2767 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768 index, group_mask.sig[0], group_mask.sig[1],
2769 group_mask.sig[2], group_mask.sig[3]);
2771 bnx2x_attn_int_deasserted3(bp,
2772 attn.sig[3] & group_mask.sig[3]);
2773 bnx2x_attn_int_deasserted1(bp,
2774 attn.sig[1] & group_mask.sig[1]);
2775 bnx2x_attn_int_deasserted2(bp,
2776 attn.sig[2] & group_mask.sig[2]);
2777 bnx2x_attn_int_deasserted0(bp,
2778 attn.sig[0] & group_mask.sig[0]);
2780 if ((attn.sig[0] & group_mask.sig[0] &
2781 HW_PRTY_ASSERT_SET_0) ||
2782 (attn.sig[1] & group_mask.sig[1] &
2783 HW_PRTY_ASSERT_SET_1) ||
2784 (attn.sig[2] & group_mask.sig[2] &
2785 HW_PRTY_ASSERT_SET_2))
2786 BNX2X_ERR("FATAL HW block parity attention\n");
2790 bnx2x_release_alr(bp);
2792 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2795 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2797 REG_WR(bp, reg_addr, val);
2799 if (~bp->attn_state & deasserted)
2800 BNX2X_ERR("IGU ERROR\n");
2802 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806 aeu_mask = REG_RD(bp, reg_addr);
2808 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2809 aeu_mask, deasserted);
2810 aeu_mask |= (deasserted & 0xff);
2811 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2813 REG_WR(bp, reg_addr, aeu_mask);
2814 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2816 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817 bp->attn_state &= ~deasserted;
2818 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2823 /* read local copy of bits */
2824 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2826 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2828 u32 attn_state = bp->attn_state;
2830 /* look for changed bits */
2831 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2832 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2835 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2836 attn_bits, attn_ack, asserted, deasserted);
2838 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839 BNX2X_ERR("BAD attention state\n");
2841 /* handle bits that were raised */
2843 bnx2x_attn_int_asserted(bp, asserted);
2846 bnx2x_attn_int_deasserted(bp, deasserted);
2849 static void bnx2x_sp_task(struct work_struct *work)
2851 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2855 /* Return here if interrupt is disabled */
2856 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2861 status = bnx2x_update_dsb_idx(bp);
2862 /* if (status == 0) */
2863 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2865 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2871 /* CStorm events: query_stats, port delete ramrod */
2873 bp->stats_pending = 0;
2875 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2877 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2879 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2881 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2883 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 struct net_device *dev = dev_instance;
2891 struct bnx2x *bp = netdev_priv(dev);
2893 /* Return here if interrupt is disabled */
2894 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2901 #ifdef BNX2X_STOP_ON_ERROR
2902 if (unlikely(bp->panic))
2906 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2911 /* end of slow path */
2915 /****************************************************************************
2917 ****************************************************************************/
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2923 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2929 if (m_lo < s_lo) { \
2931 d_hi = m_hi - s_hi; \
2933 /* we can 'loan' 1 */ \
2935 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2937 /* m_hi <= s_hi */ \
2942 /* m_lo >= s_lo */ \
2943 if (m_hi < s_hi) { \
2947 /* m_hi >= s_hi */ \
2948 d_hi = m_hi - s_hi; \
2949 d_lo = m_lo - s_lo; \
2954 #define UPDATE_STAT64(s, t) \
2956 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961 pstats->mac_stx[1].t##_lo, diff.lo); \
2964 #define UPDATE_STAT64_NIG(s, t) \
2966 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967 diff.lo, new->s##_lo, old->s##_lo); \
2968 ADD_64(estats->t##_hi, diff.hi, \
2969 estats->t##_lo, diff.lo); \
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2976 s_hi += (s_lo < a) ? 1 : 0; \
2979 #define UPDATE_EXTEND_STAT(s) \
2981 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982 pstats->mac_stx[1].s##_lo, \
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2988 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989 old_tclient->s = le32_to_cpu(tclient->s); \
2990 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2995 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996 old_xclient->s = le32_to_cpu(xclient->s); \
2997 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3001 * General service functions
3004 static inline long bnx2x_hilo(u32 *hiref)
3006 u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3010 return HILO_U64(hi, lo);
3017 * Init service functions
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3022 if (!bp->stats_pending) {
3023 struct eth_query_ramrod_data ramrod_data = {0};
3026 ramrod_data.drv_counter = bp->stats_counter++;
3027 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3030 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031 ((u32 *)&ramrod_data)[1],
3032 ((u32 *)&ramrod_data)[0], 0);
3034 /* stats ramrod has it's own slot on the spq */
3036 bp->stats_pending = 1;
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3043 int port = BP_PORT(bp);
3045 bp->executer_idx = 0;
3046 bp->stats_counter = 0;
3050 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3052 bp->port.port_stx = 0;
3053 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3055 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056 bp->port.old_nig_stats.brb_discard =
3057 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058 bp->port.old_nig_stats.brb_truncate =
3059 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3065 /* function stats */
3066 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3071 bp->stats_state = STATS_STATE_DISABLED;
3072 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3078 struct dmae_command *dmae = &bp->stats_dmae;
3079 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3081 *stats_comp = DMAE_COMP_VAL;
3084 if (bp->executer_idx) {
3085 int loader_idx = PMF_DMAE_C(bp);
3087 memset(dmae, 0, sizeof(struct dmae_command));
3089 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091 DMAE_CMD_DST_RESET |
3093 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3095 DMAE_CMD_ENDIANITY_DW_SWAP |
3097 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3099 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103 sizeof(struct dmae_command) *
3104 (loader_idx + 1)) >> 2;
3105 dmae->dst_addr_hi = 0;
3106 dmae->len = sizeof(struct dmae_command) >> 2;
3109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110 dmae->comp_addr_hi = 0;
3114 bnx2x_post_dmae(bp, dmae, loader_idx);
3116 } else if (bp->func_stx) {
3118 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3124 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3128 while (*stats_comp != DMAE_COMP_VAL) {
3130 BNX2X_ERR("timeout waiting for stats finished\n");
3140 * Statistics service functions
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3145 struct dmae_command *dmae;
3147 int loader_idx = PMF_DMAE_C(bp);
3148 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152 BNX2X_ERR("BUG!\n");
3156 bp->executer_idx = 0;
3158 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3164 DMAE_CMD_ENDIANITY_DW_SWAP |
3166 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171 dmae->src_addr_lo = bp->port.port_stx >> 2;
3172 dmae->src_addr_hi = 0;
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175 dmae->len = DMAE_LEN32_RD_MAX;
3176 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177 dmae->comp_addr_hi = 0;
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185 DMAE_LEN32_RD_MAX * 4);
3186 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187 DMAE_LEN32_RD_MAX * 4);
3188 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191 dmae->comp_val = DMAE_COMP_VAL;
3194 bnx2x_hw_stats_post(bp);
3195 bnx2x_stats_comp(bp);
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3200 struct dmae_command *dmae;
3201 int port = BP_PORT(bp);
3202 int vn = BP_E1HVN(bp);
3204 int loader_idx = PMF_DMAE_C(bp);
3206 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209 if (!bp->link_vars.link_up || !bp->port.pmf) {
3210 BNX2X_ERR("BUG!\n");
3214 bp->executer_idx = 0;
3217 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3221 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3223 DMAE_CMD_ENDIANITY_DW_SWAP |
3225 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226 (vn << DMAE_CMD_E1HVN_SHIFT));
3228 if (bp->port.port_stx) {
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
3232 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235 dmae->dst_addr_hi = 0;
3236 dmae->len = sizeof(struct host_port_stats) >> 2;
3237 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238 dmae->comp_addr_hi = 0;
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = opcode;
3246 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248 dmae->dst_addr_lo = bp->func_stx >> 2;
3249 dmae->dst_addr_hi = 0;
3250 dmae->len = sizeof(struct host_func_stats) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3257 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3261 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3263 DMAE_CMD_ENDIANITY_DW_SWAP |
3265 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266 (vn << DMAE_CMD_E1HVN_SHIFT));
3268 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3270 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271 NIG_REG_INGRESS_BMAC0_MEM);
3273 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274 BIGMAC_REGISTER_TX_STAT_GTBYT */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3288 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3305 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3307 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3309 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311 dmae->opcode = opcode;
3312 dmae->src_addr_lo = (mac_addr +
3313 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314 dmae->src_addr_hi = 0;
3315 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3322 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3337 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (mac_addr +
3341 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375 dmae->len = (2*sizeof(u32)) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3385 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3387 DMAE_CMD_ENDIANITY_DW_SWAP |
3389 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390 (vn << DMAE_CMD_E1HVN_SHIFT));
3391 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393 dmae->src_addr_hi = 0;
3394 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398 dmae->len = (2*sizeof(u32)) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3408 struct dmae_command *dmae = &bp->stats_dmae;
3409 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3412 if (!bp->func_stx) {
3413 BNX2X_ERR("BUG!\n");
3417 bp->executer_idx = 0;
3418 memset(dmae, 0, sizeof(struct dmae_command));
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432 dmae->dst_addr_lo = bp->func_stx >> 2;
3433 dmae->dst_addr_hi = 0;
3434 dmae->len = sizeof(struct host_func_stats) >> 2;
3435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437 dmae->comp_val = DMAE_COMP_VAL;
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3445 bnx2x_port_stats_init(bp);
3447 else if (bp->func_stx)
3448 bnx2x_func_stats_init(bp);
3450 bnx2x_hw_stats_post(bp);
3451 bnx2x_storm_stats_post(bp);
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3456 bnx2x_stats_comp(bp);
3457 bnx2x_stats_pmf_update(bp);
3458 bnx2x_stats_start(bp);
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3463 bnx2x_stats_comp(bp);
3464 bnx2x_stats_start(bp);
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3469 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471 struct regpair diff;
3473 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485 UPDATE_STAT64(tx_stat_gt127,
3486 tx_stat_etherstatspkts65octetsto127octets);
3487 UPDATE_STAT64(tx_stat_gt255,
3488 tx_stat_etherstatspkts128octetsto255octets);
3489 UPDATE_STAT64(tx_stat_gt511,
3490 tx_stat_etherstatspkts256octetsto511octets);
3491 UPDATE_STAT64(tx_stat_gt1023,
3492 tx_stat_etherstatspkts512octetsto1023octets);
3493 UPDATE_STAT64(tx_stat_gt1518,
3494 tx_stat_etherstatspkts1024octetsto1522octets);
3495 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499 UPDATE_STAT64(tx_stat_gterr,
3500 tx_stat_dot3statsinternalmactransmiterrors);
3501 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3506 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3509 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3544 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545 struct nig_stats *old = &(bp->port.old_nig_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548 struct regpair diff;
3550 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551 bnx2x_bmac_stats_update(bp);
3553 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554 bnx2x_emac_stats_update(bp);
3556 else { /* unreached */
3557 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3561 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562 new->brb_discard - old->brb_discard);
3563 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564 new->brb_truncate - old->brb_truncate);
3566 UPDATE_STAT64_NIG(egress_mac_pkt0,
3567 etherstatspkts1024octetsto1522octets);
3568 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3570 memcpy(old, new, sizeof(struct nig_stats));
3572 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573 sizeof(struct mac_stx));
3574 estats->brb_drop_hi = pstats->brb_drop_hi;
3575 estats->brb_drop_lo = pstats->brb_drop_lo;
3577 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3584 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585 int cl_id = BP_CL_ID(bp);
3586 struct tstorm_per_port_stats *tport =
3587 &stats->tstorm_common.port_statistics;
3588 struct tstorm_per_client_stats *tclient =
3589 &stats->tstorm_common.client_statistics[cl_id];
3590 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591 struct xstorm_per_client_stats *xclient =
3592 &stats->xstorm_common.client_statistics[cl_id];
3593 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3598 /* are storm stats valid? */
3599 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600 bp->stats_counter) {
3601 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602 " tstorm counter (%d) != stats_counter (%d)\n",
3603 tclient->stats_counter, bp->stats_counter);
3606 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607 bp->stats_counter) {
3608 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609 " xstorm counter (%d) != stats_counter (%d)\n",
3610 xclient->stats_counter, bp->stats_counter);
3614 fstats->total_bytes_received_hi =
3615 fstats->valid_bytes_received_hi =
3616 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617 fstats->total_bytes_received_lo =
3618 fstats->valid_bytes_received_lo =
3619 le32_to_cpu(tclient->total_rcv_bytes.lo);
3621 estats->error_bytes_received_hi =
3622 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623 estats->error_bytes_received_lo =
3624 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625 ADD_64(estats->error_bytes_received_hi,
3626 estats->rx_stat_ifhcinbadoctets_hi,
3627 estats->error_bytes_received_lo,
3628 estats->rx_stat_ifhcinbadoctets_lo);
3630 ADD_64(fstats->total_bytes_received_hi,
3631 estats->error_bytes_received_hi,
3632 fstats->total_bytes_received_lo,
3633 estats->error_bytes_received_lo);
3635 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637 total_multicast_packets_received);
3638 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639 total_broadcast_packets_received);
3641 fstats->total_bytes_transmitted_hi =
3642 le32_to_cpu(xclient->total_sent_bytes.hi);
3643 fstats->total_bytes_transmitted_lo =
3644 le32_to_cpu(xclient->total_sent_bytes.lo);
3646 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647 total_unicast_packets_transmitted);
3648 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649 total_multicast_packets_transmitted);
3650 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651 total_broadcast_packets_transmitted);
3653 memcpy(estats, &(fstats->total_bytes_received_hi),
3654 sizeof(struct host_func_stats) - 2*sizeof(u32));
3656 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658 estats->brb_truncate_discard =
3659 le32_to_cpu(tport->brb_truncate_discard);
3660 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3662 old_tclient->rcv_unicast_bytes.hi =
3663 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664 old_tclient->rcv_unicast_bytes.lo =
3665 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666 old_tclient->rcv_broadcast_bytes.hi =
3667 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668 old_tclient->rcv_broadcast_bytes.lo =
3669 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670 old_tclient->rcv_multicast_bytes.hi =
3671 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672 old_tclient->rcv_multicast_bytes.lo =
3673 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3676 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677 old_tclient->packets_too_big_discard =
3678 le32_to_cpu(tclient->packets_too_big_discard);
3679 estats->no_buff_discard =
3680 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3683 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684 old_xclient->unicast_bytes_sent.hi =
3685 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686 old_xclient->unicast_bytes_sent.lo =
3687 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688 old_xclient->multicast_bytes_sent.hi =
3689 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690 old_xclient->multicast_bytes_sent.lo =
3691 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692 old_xclient->broadcast_bytes_sent.hi =
3693 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694 old_xclient->broadcast_bytes_sent.lo =
3695 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3697 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3704 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706 struct net_device_stats *nstats = &bp->dev->stats;
3708 nstats->rx_packets =
3709 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3713 nstats->tx_packets =
3714 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3718 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3720 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3722 nstats->rx_dropped = old_tclient->checksum_discard +
3723 estats->mac_discard;
3724 nstats->tx_dropped = 0;
3727 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3729 nstats->collisions =
3730 estats->tx_stat_dot3statssinglecollisionframes_lo +
3731 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732 estats->tx_stat_dot3statslatecollisions_lo +
3733 estats->tx_stat_dot3statsexcessivecollisions_lo;
3735 estats->jabber_packets_received =
3736 old_tclient->packets_too_big_discard +
3737 estats->rx_stat_dot3statsframestoolong_lo;
3739 nstats->rx_length_errors =
3740 estats->rx_stat_etherstatsundersizepkts_lo +
3741 estats->jabber_packets_received;
3742 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746 nstats->rx_missed_errors = estats->xxoverflow_discard;
3748 nstats->rx_errors = nstats->rx_length_errors +
3749 nstats->rx_over_errors +
3750 nstats->rx_crc_errors +
3751 nstats->rx_frame_errors +
3752 nstats->rx_fifo_errors +
3753 nstats->rx_missed_errors;
3755 nstats->tx_aborted_errors =
3756 estats->tx_stat_dot3statslatecollisions_lo +
3757 estats->tx_stat_dot3statsexcessivecollisions_lo;
3758 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759 nstats->tx_fifo_errors = 0;
3760 nstats->tx_heartbeat_errors = 0;
3761 nstats->tx_window_errors = 0;
3763 nstats->tx_errors = nstats->tx_aborted_errors +
3764 nstats->tx_carrier_errors;
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3769 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3772 if (*stats_comp != DMAE_COMP_VAL)
3776 update = (bnx2x_hw_stats_update(bp) == 0);
3778 update |= (bnx2x_storm_stats_update(bp) == 0);
3781 bnx2x_net_stats_update(bp);
3784 if (bp->stats_pending) {
3785 bp->stats_pending++;
3786 if (bp->stats_pending == 3) {
3787 BNX2X_ERR("stats not updated for 3 times\n");
3794 if (bp->msglevel & NETIF_MSG_TIMER) {
3795 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797 struct net_device_stats *nstats = &bp->dev->stats;
3800 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3803 bnx2x_tx_avail(bp->fp),
3804 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3807 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808 bp->fp->rx_comp_cons),
3809 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3811 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812 estats->driver_xoff, estats->brb_drop_lo);
3813 printk(KERN_DEBUG "tstats: checksum_discard %u "
3814 "packets_too_big_discard %u no_buff_discard %u "
3815 "mac_discard %u mac_filter_discard %u "
3816 "xxovrflow_discard %u brb_truncate_discard %u "
3817 "ttl0_discard %u\n",
3818 old_tclient->checksum_discard,
3819 old_tclient->packets_too_big_discard,
3820 old_tclient->no_buff_discard, estats->mac_discard,
3821 estats->mac_filter_discard, estats->xxoverflow_discard,
3822 estats->brb_truncate_discard,
3823 old_tclient->ttl0_discard);
3825 for_each_queue(bp, i) {
3826 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827 bnx2x_fp(bp, i, tx_pkt),
3828 bnx2x_fp(bp, i, rx_pkt),
3829 bnx2x_fp(bp, i, rx_calls));
3833 bnx2x_hw_stats_post(bp);
3834 bnx2x_storm_stats_post(bp);
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3839 struct dmae_command *dmae;
3841 int loader_idx = PMF_DMAE_C(bp);
3842 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3844 bp->executer_idx = 0;
3846 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3848 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3850 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3852 DMAE_CMD_ENDIANITY_DW_SWAP |
3854 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3857 if (bp->port.port_stx) {
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3863 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867 dmae->dst_addr_hi = 0;
3868 dmae->len = sizeof(struct host_port_stats) >> 2;
3870 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871 dmae->comp_addr_hi = 0;
3874 dmae->comp_addr_lo =
3875 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876 dmae->comp_addr_hi =
3877 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
3886 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890 dmae->dst_addr_lo = bp->func_stx >> 2;
3891 dmae->dst_addr_hi = 0;
3892 dmae->len = sizeof(struct host_func_stats) >> 2;
3893 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_val = DMAE_COMP_VAL;
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3905 bnx2x_stats_comp(bp);
3908 update = (bnx2x_hw_stats_update(bp) == 0);
3910 update |= (bnx2x_storm_stats_update(bp) == 0);
3913 bnx2x_net_stats_update(bp);
3916 bnx2x_port_stats_stop(bp);
3918 bnx2x_hw_stats_post(bp);
3919 bnx2x_stats_comp(bp);
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3927 static const struct {
3928 void (*action)(struct bnx2x *bp);
3929 enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3933 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3935 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3939 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3940 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3941 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3942 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3948 enum bnx2x_stats_state state = bp->stats_state;
3950 bnx2x_stats_stm[state][event].action(bp);
3951 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3953 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955 state, event, bp->stats_state);
3958 static void bnx2x_timer(unsigned long data)
3960 struct bnx2x *bp = (struct bnx2x *) data;
3962 if (!netif_running(bp->dev))
3965 if (atomic_read(&bp->intr_sem) != 0)
3969 struct bnx2x_fastpath *fp = &bp->fp[0];
3972 bnx2x_tx_int(fp, 1000);
3973 rc = bnx2x_rx_int(fp, 1000);
3976 if (!BP_NOMCP(bp)) {
3977 int func = BP_FUNC(bp);
3981 ++bp->fw_drv_pulse_wr_seq;
3982 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983 /* TBD - add SYSTEM_TIME */
3984 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3987 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988 MCP_PULSE_SEQ_MASK);
3989 /* The delta between driver pulse and mcp response
3990 * should be 1 (before mcp response) or 0 (after mcp response)
3992 if ((drv_pulse != mcp_pulse) &&
3993 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994 /* someone lost a heartbeat... */
3995 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996 drv_pulse, mcp_pulse);
4000 if ((bp->state == BNX2X_STATE_OPEN) ||
4001 (bp->state == BNX2X_STATE_DISABLED))
4002 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4005 mod_timer(&bp->timer, jiffies + bp->current_interval);
4008 /* end of Statistics */
4013 * nic init service functions
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4018 int port = BP_PORT(bp);
4020 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022 sizeof(struct ustorm_status_block)/4);
4023 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025 sizeof(struct cstorm_status_block)/4);
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029 dma_addr_t mapping, int sb_id)
4031 int port = BP_PORT(bp);
4032 int func = BP_FUNC(bp);
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4039 sb->u_status_block.status_block_id = sb_id;
4041 REG_WR(bp, BAR_USTRORM_INTMEM +
4042 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043 REG_WR(bp, BAR_USTRORM_INTMEM +
4044 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4046 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4049 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4054 section = ((u64)mapping) + offsetof(struct host_status_block,
4056 sb->c_status_block.status_block_id = sb_id;
4058 REG_WR(bp, BAR_CSTRORM_INTMEM +
4059 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060 REG_WR(bp, BAR_CSTRORM_INTMEM +
4061 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4063 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4066 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4070 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4075 int func = BP_FUNC(bp);
4077 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079 sizeof(struct ustorm_def_status_block)/4);
4080 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082 sizeof(struct cstorm_def_status_block)/4);
4083 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085 sizeof(struct xstorm_def_status_block)/4);
4086 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088 sizeof(struct tstorm_def_status_block)/4);
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092 struct host_def_status_block *def_sb,
4093 dma_addr_t mapping, int sb_id)
4095 int port = BP_PORT(bp);
4096 int func = BP_FUNC(bp);
4097 int index, val, reg_offset;
4101 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102 atten_status_block);
4103 def_sb->atten_status_block.status_block_id = sb_id;
4107 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4110 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111 bp->attn_group[index].sig[0] = REG_RD(bp,
4112 reg_offset + 0x10*index);
4113 bp->attn_group[index].sig[1] = REG_RD(bp,
4114 reg_offset + 0x4 + 0x10*index);
4115 bp->attn_group[index].sig[2] = REG_RD(bp,
4116 reg_offset + 0x8 + 0x10*index);
4117 bp->attn_group[index].sig[3] = REG_RD(bp,
4118 reg_offset + 0xc + 0x10*index);
4121 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122 HC_REG_ATTN_MSG0_ADDR_L);
4124 REG_WR(bp, reg_offset, U64_LO(section));
4125 REG_WR(bp, reg_offset + 4, U64_HI(section));
4127 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4129 val = REG_RD(bp, reg_offset);
4131 REG_WR(bp, reg_offset, val);
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 u_def_status_block);
4136 def_sb->u_def_status_block.status_block_id = sb_id;
4138 REG_WR(bp, BAR_USTRORM_INTMEM +
4139 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140 REG_WR(bp, BAR_USTRORM_INTMEM +
4141 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4143 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4146 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 c_def_status_block);
4153 def_sb->c_def_status_block.status_block_id = sb_id;
4155 REG_WR(bp, BAR_CSTRORM_INTMEM +
4156 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157 REG_WR(bp, BAR_CSTRORM_INTMEM +
4158 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4160 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4168 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169 t_def_status_block);
4170 def_sb->t_def_status_block.status_block_id = sb_id;
4172 REG_WR(bp, BAR_TSTRORM_INTMEM +
4173 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174 REG_WR(bp, BAR_TSTRORM_INTMEM +
4175 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4177 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4180 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4185 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186 x_def_status_block);
4187 def_sb->x_def_status_block.status_block_id = sb_id;
4189 REG_WR(bp, BAR_XSTRORM_INTMEM +
4190 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191 REG_WR(bp, BAR_XSTRORM_INTMEM +
4192 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4194 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4201 bp->stats_pending = 0;
4202 bp->set_mac_pending = 0;
4204 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 int port = BP_PORT(bp);
4212 for_each_queue(bp, i) {
4213 int sb_id = bp->fp[i].sb_id;
4215 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218 U_SB_ETH_RX_CQ_INDEX),
4220 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222 U_SB_ETH_RX_CQ_INDEX),
4223 bp->rx_ticks ? 0 : 1);
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 U_SB_ETH_RX_BD_INDEX),
4227 bp->rx_ticks ? 0 : 1);
4229 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232 C_SB_ETH_TX_CQ_INDEX),
4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236 C_SB_ETH_TX_CQ_INDEX),
4237 bp->tx_ticks ? 0 : 1);
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242 struct bnx2x_fastpath *fp, int last)
4246 for (i = 0; i < last; i++) {
4247 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248 struct sk_buff *skb = rx_buf->skb;
4251 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4255 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256 pci_unmap_single(bp->pdev,
4257 pci_unmap_addr(rx_buf, mapping),
4259 PCI_DMA_FROMDEVICE);
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4268 int func = BP_FUNC(bp);
4269 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270 ETH_MAX_AGGREGATION_QUEUES_E1H;
4271 u16 ring_prod, cqe_ring_prod;
4274 bp->rx_buf_size = bp->dev->mtu;
4275 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276 BCM_RX_ETH_PAYLOAD_ALIGN;
4278 if (bp->flags & TPA_ENABLE_FLAG) {
4280 "rx_buf_size %d effective_mtu %d\n",
4281 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4283 for_each_queue(bp, j) {
4284 struct bnx2x_fastpath *fp = &bp->fp[j];
4286 for (i = 0; i < max_agg_queues; i++) {
4287 fp->tpa_pool[i].skb =
4288 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289 if (!fp->tpa_pool[i].skb) {
4290 BNX2X_ERR("Failed to allocate TPA "
4291 "skb pool for queue[%d] - "
4292 "disabling TPA on this "
4294 bnx2x_free_tpa_pool(bp, fp, i);
4295 fp->disable_tpa = 1;
4298 pci_unmap_addr_set((struct sw_rx_bd *)
4299 &bp->fp->tpa_pool[i],
4301 fp->tpa_state[i] = BNX2X_TPA_STOP;
4306 for_each_queue(bp, j) {
4307 struct bnx2x_fastpath *fp = &bp->fp[j];
4310 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4313 /* "next page" elements initialization */
4315 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316 struct eth_rx_sge *sge;
4318 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4320 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4323 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4327 bnx2x_init_sge_ring_bit_mask(fp);
4330 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331 struct eth_rx_bd *rx_bd;
4333 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4335 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4338 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4343 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344 struct eth_rx_cqe_next_page *nextpg;
4346 nextpg = (struct eth_rx_cqe_next_page *)
4347 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4349 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4352 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4356 /* Allocate SGEs and initialize the ring elements */
4357 for (i = 0, ring_prod = 0;
4358 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4360 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361 BNX2X_ERR("was only able to allocate "
4363 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364 /* Cleanup already allocated elements */
4365 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367 fp->disable_tpa = 1;
4371 ring_prod = NEXT_SGE_IDX(ring_prod);
4373 fp->rx_sge_prod = ring_prod;
4375 /* Allocate BDs and initialize BD ring */
4376 fp->rx_comp_cons = 0;
4377 cqe_ring_prod = ring_prod = 0;
4378 for (i = 0; i < bp->rx_ring_size; i++) {
4379 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380 BNX2X_ERR("was only able to allocate "
4382 bp->eth_stats.rx_skb_alloc_failed++;
4385 ring_prod = NEXT_RX_IDX(ring_prod);
4386 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387 WARN_ON(ring_prod <= i);
4390 fp->rx_bd_prod = ring_prod;
4391 /* must not have more available CQEs than BDs */
4392 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4394 fp->rx_pkt = fp->rx_calls = 0;
4397 * this will generate an interrupt (to the TSTORM)
4398 * must only be done after chip is initialized
4400 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4405 REG_WR(bp, BAR_USTRORM_INTMEM +
4406 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407 U64_LO(fp->rx_comp_mapping));
4408 REG_WR(bp, BAR_USTRORM_INTMEM +
4409 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410 U64_HI(fp->rx_comp_mapping));
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4418 for_each_queue(bp, j) {
4419 struct bnx2x_fastpath *fp = &bp->fp[j];
4421 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422 struct eth_tx_bd *tx_bd =
4423 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4426 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4429 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4433 fp->tx_pkt_prod = 0;
4434 fp->tx_pkt_cons = 0;
4437 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4444 int func = BP_FUNC(bp);
4446 spin_lock_init(&bp->spq_lock);
4448 bp->spq_left = MAX_SPQ_PENDING;
4449 bp->spq_prod_idx = 0;
4450 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451 bp->spq_prod_bd = bp->spq;
4452 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4454 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455 U64_LO(bp->spq_mapping));
4457 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458 U64_HI(bp->spq_mapping));
4460 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4464 static void bnx2x_init_context(struct bnx2x *bp)
4468 for_each_queue(bp, i) {
4469 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470 struct bnx2x_fastpath *fp = &bp->fp[i];
4471 u8 sb_id = FP_SB_ID(fp);
4473 context->xstorm_st_context.tx_bd_page_base_hi =
4474 U64_HI(fp->tx_desc_mapping);
4475 context->xstorm_st_context.tx_bd_page_base_lo =
4476 U64_LO(fp->tx_desc_mapping);
4477 context->xstorm_st_context.db_data_addr_hi =
4478 U64_HI(fp->tx_prods_mapping);
4479 context->xstorm_st_context.db_data_addr_lo =
4480 U64_LO(fp->tx_prods_mapping);
4481 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4484 context->ustorm_st_context.common.sb_index_numbers =
4485 BNX2X_RX_SB_INDEX_NUM;
4486 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487 context->ustorm_st_context.common.status_block_id = sb_id;
4488 context->ustorm_st_context.common.flags =
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490 context->ustorm_st_context.common.mc_alignment_size =
4491 BCM_RX_ETH_PAYLOAD_ALIGN;
4492 context->ustorm_st_context.common.bd_buff_size =
4494 context->ustorm_st_context.common.bd_page_base_hi =
4495 U64_HI(fp->rx_desc_mapping);
4496 context->ustorm_st_context.common.bd_page_base_lo =
4497 U64_LO(fp->rx_desc_mapping);
4498 if (!fp->disable_tpa) {
4499 context->ustorm_st_context.common.flags |=
4500 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502 context->ustorm_st_context.common.sge_buff_size =
4503 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504 context->ustorm_st_context.common.sge_page_base_hi =
4505 U64_HI(fp->rx_sge_mapping);
4506 context->ustorm_st_context.common.sge_page_base_lo =
4507 U64_LO(fp->rx_sge_mapping);
4510 context->cstorm_st_context.sb_index_number =
4511 C_SB_ETH_TX_CQ_INDEX;
4512 context->cstorm_st_context.status_block_id = sb_id;
4514 context->xstorm_ag_context.cdu_reserved =
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516 CDU_REGION_NUMBER_XCM_AG,
4517 ETH_CONNECTION_TYPE);
4518 context->ustorm_ag_context.cdu_usage =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_UCM_AG,
4521 ETH_CONNECTION_TYPE);
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4527 int func = BP_FUNC(bp);
4533 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4537 BP_CL_ID(bp) + (i % bp->num_queues));
4540 static void bnx2x_set_client_config(struct bnx2x *bp)
4542 struct tstorm_eth_client_config tstorm_client = {0};
4543 int port = BP_PORT(bp);
4546 tstorm_client.mtu = bp->dev->mtu;
4547 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4548 tstorm_client.config_flags =
4549 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4551 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4552 tstorm_client.config_flags |=
4553 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4554 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4558 if (bp->flags & TPA_ENABLE_FLAG) {
4559 tstorm_client.max_sges_for_packet =
4560 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4561 tstorm_client.max_sges_for_packet =
4562 ((tstorm_client.max_sges_for_packet +
4563 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4564 PAGES_PER_SGE_SHIFT;
4566 tstorm_client.config_flags |=
4567 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4570 for_each_queue(bp, i) {
4571 REG_WR(bp, BAR_TSTRORM_INTMEM +
4572 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4573 ((u32 *)&tstorm_client)[0]);
4574 REG_WR(bp, BAR_TSTRORM_INTMEM +
4575 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4576 ((u32 *)&tstorm_client)[1]);
4579 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4580 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4583 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4585 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4586 int mode = bp->rx_mode;
4587 int mask = (1 << BP_L_ID(bp));
4588 int func = BP_FUNC(bp);
4591 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4594 case BNX2X_RX_MODE_NONE: /* no Rx */
4595 tstorm_mac_filter.ucast_drop_all = mask;
4596 tstorm_mac_filter.mcast_drop_all = mask;
4597 tstorm_mac_filter.bcast_drop_all = mask;
4599 case BNX2X_RX_MODE_NORMAL:
4600 tstorm_mac_filter.bcast_accept_all = mask;
4602 case BNX2X_RX_MODE_ALLMULTI:
4603 tstorm_mac_filter.mcast_accept_all = mask;
4604 tstorm_mac_filter.bcast_accept_all = mask;
4606 case BNX2X_RX_MODE_PROMISC:
4607 tstorm_mac_filter.ucast_accept_all = mask;
4608 tstorm_mac_filter.mcast_accept_all = mask;
4609 tstorm_mac_filter.bcast_accept_all = mask;
4612 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4616 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4617 REG_WR(bp, BAR_TSTRORM_INTMEM +
4618 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4619 ((u32 *)&tstorm_mac_filter)[i]);
4621 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4622 ((u32 *)&tstorm_mac_filter)[i]); */
4625 if (mode != BNX2X_RX_MODE_NONE)
4626 bnx2x_set_client_config(bp);
4629 static void bnx2x_init_internal_common(struct bnx2x *bp)
4633 if (bp->flags & TPA_ENABLE_FLAG) {
4634 struct tstorm_eth_tpa_exist tpa = {0};
4638 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4640 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4644 /* Zero this manually as its initialization is
4645 currently missing in the initTool */
4646 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4647 REG_WR(bp, BAR_USTRORM_INTMEM +
4648 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4651 static void bnx2x_init_internal_port(struct bnx2x *bp)
4653 int port = BP_PORT(bp);
4655 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4656 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4657 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4661 static void bnx2x_init_internal_func(struct bnx2x *bp)
4663 struct tstorm_eth_function_common_config tstorm_config = {0};
4664 struct stats_indication_flags stats_flags = {0};
4665 int port = BP_PORT(bp);
4666 int func = BP_FUNC(bp);
4671 tstorm_config.config_flags = MULTI_FLAGS;
4672 tstorm_config.rss_result_mask = MULTI_MASK;
4675 tstorm_config.leading_client_id = BP_L_ID(bp);
4677 REG_WR(bp, BAR_TSTRORM_INTMEM +
4678 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4679 (*(u32 *)&tstorm_config));
4681 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4682 bnx2x_set_storm_rx_mode(bp);
4684 /* reset xstorm per client statistics */
4685 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4686 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4690 /* reset tstorm per client statistics */
4691 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4697 /* Init statistics related context */
4698 stats_flags.collect_eth = 1;
4700 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4701 ((u32 *)&stats_flags)[0]);
4702 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703 ((u32 *)&stats_flags)[1]);
4705 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4706 ((u32 *)&stats_flags)[0]);
4707 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4708 ((u32 *)&stats_flags)[1]);
4710 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4711 ((u32 *)&stats_flags)[0]);
4712 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4713 ((u32 *)&stats_flags)[1]);
4715 REG_WR(bp, BAR_XSTRORM_INTMEM +
4716 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4717 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4718 REG_WR(bp, BAR_XSTRORM_INTMEM +
4719 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4720 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4722 REG_WR(bp, BAR_TSTRORM_INTMEM +
4723 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4724 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4725 REG_WR(bp, BAR_TSTRORM_INTMEM +
4726 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4727 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4729 if (CHIP_IS_E1H(bp)) {
4730 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4732 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4734 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4736 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4739 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4743 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4745 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4746 SGE_PAGE_SIZE * PAGES_PER_SGE),
4748 for_each_queue(bp, i) {
4749 struct bnx2x_fastpath *fp = &bp->fp[i];
4751 REG_WR(bp, BAR_USTRORM_INTMEM +
4752 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4753 U64_LO(fp->rx_comp_mapping));
4754 REG_WR(bp, BAR_USTRORM_INTMEM +
4755 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4756 U64_HI(fp->rx_comp_mapping));
4758 REG_WR16(bp, BAR_USTRORM_INTMEM +
4759 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4764 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4766 switch (load_code) {
4767 case FW_MSG_CODE_DRV_LOAD_COMMON:
4768 bnx2x_init_internal_common(bp);
4771 case FW_MSG_CODE_DRV_LOAD_PORT:
4772 bnx2x_init_internal_port(bp);
4775 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4776 bnx2x_init_internal_func(bp);
4780 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4785 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4789 for_each_queue(bp, i) {
4790 struct bnx2x_fastpath *fp = &bp->fp[i];
4793 fp->state = BNX2X_FP_STATE_CLOSED;
4795 fp->cl_id = BP_L_ID(bp) + i;
4796 fp->sb_id = fp->cl_id;
4798 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4799 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4800 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4802 bnx2x_update_fpsb_idx(fp);
4805 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4807 bnx2x_update_dsb_idx(bp);
4808 bnx2x_update_coalesce(bp);
4809 bnx2x_init_rx_rings(bp);
4810 bnx2x_init_tx_ring(bp);
4811 bnx2x_init_sp_ring(bp);
4812 bnx2x_init_context(bp);
4813 bnx2x_init_internal(bp, load_code);
4814 bnx2x_init_ind_table(bp);
4815 bnx2x_stats_init(bp);
4817 /* At this point, we are ready for interrupts */
4818 atomic_set(&bp->intr_sem, 0);
4820 /* flush all before enabling interrupts */
4824 bnx2x_int_enable(bp);
4827 /* end of nic init */
4830 * gzip service functions
4833 static int bnx2x_gunzip_init(struct bnx2x *bp)
4835 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4836 &bp->gunzip_mapping);
4837 if (bp->gunzip_buf == NULL)
4840 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4841 if (bp->strm == NULL)
4844 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4846 if (bp->strm->workspace == NULL)
4856 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4857 bp->gunzip_mapping);
4858 bp->gunzip_buf = NULL;
4861 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4862 " un-compression\n", bp->dev->name);
4866 static void bnx2x_gunzip_end(struct bnx2x *bp)
4868 kfree(bp->strm->workspace);
4873 if (bp->gunzip_buf) {
4874 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4875 bp->gunzip_mapping);
4876 bp->gunzip_buf = NULL;
4880 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4884 /* check gzip header */
4885 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4892 if (zbuf[3] & FNAME)
4893 while ((zbuf[n++] != 0) && (n < len));
4895 bp->strm->next_in = zbuf + n;
4896 bp->strm->avail_in = len - n;
4897 bp->strm->next_out = bp->gunzip_buf;
4898 bp->strm->avail_out = FW_BUF_SIZE;
4900 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4904 rc = zlib_inflate(bp->strm, Z_FINISH);
4905 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4906 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4907 bp->dev->name, bp->strm->msg);
4909 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4910 if (bp->gunzip_outlen & 0x3)
4911 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4912 " gunzip_outlen (%d) not aligned\n",
4913 bp->dev->name, bp->gunzip_outlen);
4914 bp->gunzip_outlen >>= 2;
4916 zlib_inflateEnd(bp->strm);
4918 if (rc == Z_STREAM_END)
4924 /* nic load/unload */
4927 * General service functions
4930 /* send a NIG loopback debug packet */
4931 static void bnx2x_lb_pckt(struct bnx2x *bp)
4935 /* Ethernet source and destination addresses */
4936 wb_write[0] = 0x55555555;
4937 wb_write[1] = 0x55555555;
4938 wb_write[2] = 0x20; /* SOP */
4939 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4941 /* NON-IP protocol */
4942 wb_write[0] = 0x09000000;
4943 wb_write[1] = 0x55555555;
4944 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4945 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4948 /* some of the internal memories
4949 * are not directly readable from the driver
4950 * to test them we send debug packets
4952 static int bnx2x_int_mem_test(struct bnx2x *bp)
4958 if (CHIP_REV_IS_FPGA(bp))
4960 else if (CHIP_REV_IS_EMUL(bp))
4965 DP(NETIF_MSG_HW, "start part1\n");
4967 /* Disable inputs of parser neighbor blocks */
4968 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4969 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4970 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4971 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4973 /* Write 0 to parser credits for CFC search request */
4974 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4976 /* send Ethernet packet */
4979 /* TODO do i reset NIG statistic? */
4980 /* Wait until NIG register shows 1 packet of size 0x10 */
4981 count = 1000 * factor;
4984 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4985 val = *bnx2x_sp(bp, wb_data[0]);
4993 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4997 /* Wait until PRS register shows 1 packet */
4998 count = 1000 * factor;
5000 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5008 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5012 /* Reset and init BRB, PRS */
5013 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5015 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5017 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5018 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5020 DP(NETIF_MSG_HW, "part2\n");
5022 /* Disable inputs of parser neighbor blocks */
5023 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5024 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5025 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5026 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5028 /* Write 0 to parser credits for CFC search request */
5029 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5031 /* send 10 Ethernet packets */
5032 for (i = 0; i < 10; i++)
5035 /* Wait until NIG register shows 10 + 1
5036 packets of size 11*0x10 = 0xb0 */
5037 count = 1000 * factor;
5040 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5041 val = *bnx2x_sp(bp, wb_data[0]);
5049 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5053 /* Wait until PRS register shows 2 packets */
5054 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5056 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5058 /* Write 1 to parser credits for CFC search request */
5059 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5061 /* Wait until PRS register shows 3 packets */
5062 msleep(10 * factor);
5063 /* Wait until NIG register shows 1 packet of size 0x10 */
5064 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5068 /* clear NIG EOP FIFO */
5069 for (i = 0; i < 11; i++)
5070 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5071 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5073 BNX2X_ERR("clear of NIG failed\n");
5077 /* Reset and init BRB, PRS, NIG */
5078 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5082 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5083 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5086 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5089 /* Enable inputs of parser neighbor blocks */
5090 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5091 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5092 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5093 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5095 DP(NETIF_MSG_HW, "done\n");
5100 static void enable_blocks_attention(struct bnx2x *bp)
5102 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5103 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5104 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5105 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5106 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5107 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5108 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5109 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5110 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5111 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5112 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5113 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5114 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5115 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5116 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5117 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5118 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5119 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5120 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5121 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5122 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5123 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5124 if (CHIP_REV_IS_FPGA(bp))
5125 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5127 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5128 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5129 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5130 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5131 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5132 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5133 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5134 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5135 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5136 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5140 static int bnx2x_init_common(struct bnx2x *bp)
5144 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5149 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5150 if (CHIP_IS_E1H(bp))
5151 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5153 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5155 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5157 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5158 if (CHIP_IS_E1(bp)) {
5159 /* enable HW interrupt from PXP on USDM overflow
5160 bit 16 on INT_MASK_0 */
5161 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5164 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5168 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5169 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5170 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5171 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5172 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5174 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5175 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5176 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5177 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5178 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5181 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5183 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5184 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5185 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5188 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5189 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5191 /* let the HW do it's magic ... */
5193 /* finish PXP init */
5194 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5196 BNX2X_ERR("PXP2 CFG failed\n");
5199 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5201 BNX2X_ERR("PXP2 RD_INIT failed\n");
5205 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5206 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5208 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5210 /* clean the DMAE memory */
5212 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5214 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5215 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5216 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5217 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5219 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5220 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5221 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5222 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5224 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5225 /* soft reset pulse */
5226 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5227 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5230 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5233 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5234 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5235 if (!CHIP_REV_IS_SLOW(bp)) {
5236 /* enable hw interrupt from doorbell Q */
5237 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5240 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5241 if (CHIP_REV_IS_SLOW(bp)) {
5242 /* fix for emulation and FPGA for no pause */
5243 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5244 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5245 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5246 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5249 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5250 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5252 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5253 if (CHIP_IS_E1H(bp))
5254 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5256 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5257 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5258 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5259 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5261 if (CHIP_IS_E1H(bp)) {
5262 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5263 STORM_INTMEM_SIZE_E1H/2);
5265 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5266 0, STORM_INTMEM_SIZE_E1H/2);
5267 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5268 STORM_INTMEM_SIZE_E1H/2);
5270 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5271 0, STORM_INTMEM_SIZE_E1H/2);
5272 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5273 STORM_INTMEM_SIZE_E1H/2);
5275 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5276 0, STORM_INTMEM_SIZE_E1H/2);
5277 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5278 STORM_INTMEM_SIZE_E1H/2);
5280 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5281 0, STORM_INTMEM_SIZE_E1H/2);
5283 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5284 STORM_INTMEM_SIZE_E1);
5285 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5286 STORM_INTMEM_SIZE_E1);
5287 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5288 STORM_INTMEM_SIZE_E1);
5289 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5290 STORM_INTMEM_SIZE_E1);
5293 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5294 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5295 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5296 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5299 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5301 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5304 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5305 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5306 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5308 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5309 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5310 REG_WR(bp, i, 0xc0cac01a);
5311 /* TODO: replace with something meaningful */
5313 if (CHIP_IS_E1H(bp))
5314 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5315 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5317 if (sizeof(union cdu_context) != 1024)
5318 /* we currently assume that a context is 1024 bytes */
5319 printk(KERN_ALERT PFX "please adjust the size of"
5320 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5322 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5323 val = (4 << 24) + (0 << 12) + 1024;
5324 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5325 if (CHIP_IS_E1(bp)) {
5326 /* !!! fix pxp client crdit until excel update */
5327 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5328 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5331 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5332 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5334 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5335 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5337 /* PXPCS COMMON comes here */
5338 /* Reset PCIE errors for debug */
5339 REG_WR(bp, 0x2814, 0xffffffff);
5340 REG_WR(bp, 0x3820, 0xffffffff);
5342 /* EMAC0 COMMON comes here */
5343 /* EMAC1 COMMON comes here */
5344 /* DBU COMMON comes here */
5345 /* DBG COMMON comes here */
5347 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5348 if (CHIP_IS_E1H(bp)) {
5349 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5350 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5353 if (CHIP_REV_IS_SLOW(bp))
5356 /* finish CFC init */
5357 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5359 BNX2X_ERR("CFC LL_INIT failed\n");
5362 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5364 BNX2X_ERR("CFC AC_INIT failed\n");
5367 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5369 BNX2X_ERR("CFC CAM_INIT failed\n");
5372 REG_WR(bp, CFC_REG_DEBUG0, 0);
5374 /* read NIG statistic
5375 to see if this is our first up since powerup */
5376 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5377 val = *bnx2x_sp(bp, wb_data[0]);
5379 /* do internal memory self test */
5380 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5381 BNX2X_ERR("internal mem self test failed\n");
5385 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5386 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5387 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5388 /* Fan failure is indicated by SPIO 5 */
5389 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5390 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5392 /* set to active low mode */
5393 val = REG_RD(bp, MISC_REG_SPIO_INT);
5394 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5395 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5396 REG_WR(bp, MISC_REG_SPIO_INT, val);
5398 /* enable interrupt to signal the IGU */
5399 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5400 val |= (1 << MISC_REGISTERS_SPIO_5);
5401 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5408 /* clear PXP2 attentions */
5409 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5411 enable_blocks_attention(bp);
5413 if (!BP_NOMCP(bp)) {
5414 bnx2x_acquire_phy_lock(bp);
5415 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5416 bnx2x_release_phy_lock(bp);
5418 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5423 static int bnx2x_init_port(struct bnx2x *bp)
5425 int port = BP_PORT(bp);
5428 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5430 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5432 /* Port PXP comes here */
5433 /* Port PXP2 comes here */
5438 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5439 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5440 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5446 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5447 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5448 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5454 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5455 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5456 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5457 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5459 /* Port CMs come here */
5461 /* Port QM comes here */
5463 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5464 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5466 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5467 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5469 /* Port DQ comes here */
5470 /* Port BRB1 comes here */
5471 /* Port PRS comes here */
5472 /* Port TSDM comes here */
5473 /* Port CSDM comes here */
5474 /* Port USDM comes here */
5475 /* Port XSDM comes here */
5476 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5477 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5478 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5479 port ? USEM_PORT1_END : USEM_PORT0_END);
5480 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5481 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5482 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5483 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5484 /* Port UPB comes here */
5485 /* Port XPB comes here */
5487 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5488 port ? PBF_PORT1_END : PBF_PORT0_END);
5490 /* configure PBF to work without PAUSE mtu 9000 */
5491 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5493 /* update threshold */
5494 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5495 /* update init credit */
5496 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5499 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5501 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5504 /* tell the searcher where the T2 table is */
5505 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5507 wb_write[0] = U64_LO(bp->t2_mapping);
5508 wb_write[1] = U64_HI(bp->t2_mapping);
5509 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5510 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5511 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5512 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5514 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5515 /* Port SRCH comes here */
5517 /* Port CDU comes here */
5518 /* Port CFC comes here */
5520 if (CHIP_IS_E1(bp)) {
5521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5522 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5524 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5525 port ? HC_PORT1_END : HC_PORT0_END);
5527 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5528 MISC_AEU_PORT0_START,
5529 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5530 /* init aeu_mask_attn_func_0/1:
5531 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5532 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5533 * bits 4-7 are used for "per vn group attention" */
5534 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5535 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5537 /* Port PXPCS comes here */
5538 /* Port EMAC0 comes here */
5539 /* Port EMAC1 comes here */
5540 /* Port DBU comes here */
5541 /* Port DBG comes here */
5542 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5543 port ? NIG_PORT1_END : NIG_PORT0_END);
5545 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5547 if (CHIP_IS_E1H(bp)) {
5549 struct cmng_struct_per_port m_cmng_port;
5552 /* 0x2 disable e1hov, 0x1 enable */
5553 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5554 (IS_E1HMF(bp) ? 0x1 : 0x2));
5556 /* Init RATE SHAPING and FAIRNESS contexts.
5557 Initialize as if there is 10G link. */
5558 wsum = bnx2x_calc_vn_wsum(bp);
5559 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5561 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5562 bnx2x_init_vn_minmax(bp, 2*vn + port,
5563 wsum, 10000, &m_cmng_port);
5566 /* Port MCP comes here */
5567 /* Port DMAE comes here */
5569 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5570 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5571 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5572 /* add SPIO 5 to group 0 */
5573 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5574 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5575 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5582 bnx2x__link_reset(bp);
5587 #define ILT_PER_FUNC (768/2)
5588 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5589 /* the phys address is shifted right 12 bits and has an added
5590 1=valid bit added to the 53rd bit
5591 then since this is a wide register(TM)
5592 we split it into two 32 bit writes
5594 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5595 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5596 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5597 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5599 #define CNIC_ILT_LINES 0
5601 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5605 if (CHIP_IS_E1H(bp))
5606 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5608 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5610 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5613 static int bnx2x_init_func(struct bnx2x *bp)
5615 int port = BP_PORT(bp);
5616 int func = BP_FUNC(bp);
5619 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5621 i = FUNC_ILT_BASE(func);
5623 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5624 if (CHIP_IS_E1H(bp)) {
5625 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5626 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5628 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5629 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5632 if (CHIP_IS_E1H(bp)) {
5633 for (i = 0; i < 9; i++)
5634 bnx2x_init_block(bp,
5635 cm_start[func][i], cm_end[func][i]);
5637 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5638 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5641 /* HC init per function */
5642 if (CHIP_IS_E1H(bp)) {
5643 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5645 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5646 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5648 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5650 if (CHIP_IS_E1H(bp))
5651 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5653 /* Reset PCIE errors for debug */
5654 REG_WR(bp, 0x2114, 0xffffffff);
5655 REG_WR(bp, 0x2120, 0xffffffff);
5660 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5664 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5665 BP_FUNC(bp), load_code);
5668 mutex_init(&bp->dmae_mutex);
5669 bnx2x_gunzip_init(bp);
5671 switch (load_code) {
5672 case FW_MSG_CODE_DRV_LOAD_COMMON:
5673 rc = bnx2x_init_common(bp);
5678 case FW_MSG_CODE_DRV_LOAD_PORT:
5680 rc = bnx2x_init_port(bp);
5685 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5687 rc = bnx2x_init_func(bp);
5693 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5697 if (!BP_NOMCP(bp)) {
5698 int func = BP_FUNC(bp);
5700 bp->fw_drv_pulse_wr_seq =
5701 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5702 DRV_PULSE_SEQ_MASK);
5703 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5704 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5705 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5709 /* this needs to be done before gunzip end */
5710 bnx2x_zero_def_sb(bp);
5711 for_each_queue(bp, i)
5712 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5715 bnx2x_gunzip_end(bp);
5720 /* send the MCP a request, block until there is a reply */
5721 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5723 int func = BP_FUNC(bp);
5724 u32 seq = ++bp->fw_seq;
5727 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5729 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5730 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5733 /* let the FW do it's magic ... */
5736 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5738 /* Give the FW up to 2 second (200*10ms) */
5739 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5741 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5742 cnt*delay, rc, seq);
5744 /* is this a reply to our command? */
5745 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5746 rc &= FW_MSG_CODE_MASK;
5750 BNX2X_ERR("FW failed to respond!\n");
5758 static void bnx2x_free_mem(struct bnx2x *bp)
5761 #define BNX2X_PCI_FREE(x, y, size) \
5764 pci_free_consistent(bp->pdev, size, x, y); \
5770 #define BNX2X_FREE(x) \
5781 for_each_queue(bp, i) {
5784 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5785 bnx2x_fp(bp, i, status_blk_mapping),
5786 sizeof(struct host_status_block) +
5787 sizeof(struct eth_tx_db_data));
5789 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5790 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5791 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5792 bnx2x_fp(bp, i, tx_desc_mapping),
5793 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5795 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5796 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5797 bnx2x_fp(bp, i, rx_desc_mapping),
5798 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5800 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5801 bnx2x_fp(bp, i, rx_comp_mapping),
5802 sizeof(struct eth_fast_path_rx_cqe) *
5806 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5807 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5808 bnx2x_fp(bp, i, rx_sge_mapping),
5809 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5811 /* end of fastpath */
5813 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5814 sizeof(struct host_def_status_block));
5816 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5817 sizeof(struct bnx2x_slowpath));
5820 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5821 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5822 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5823 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5825 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5827 #undef BNX2X_PCI_FREE
5831 static int bnx2x_alloc_mem(struct bnx2x *bp)
5834 #define BNX2X_PCI_ALLOC(x, y, size) \
5836 x = pci_alloc_consistent(bp->pdev, size, y); \
5838 goto alloc_mem_err; \
5839 memset(x, 0, size); \
5842 #define BNX2X_ALLOC(x, size) \
5844 x = vmalloc(size); \
5846 goto alloc_mem_err; \
5847 memset(x, 0, size); \
5853 for_each_queue(bp, i) {
5854 bnx2x_fp(bp, i, bp) = bp;
5857 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5858 &bnx2x_fp(bp, i, status_blk_mapping),
5859 sizeof(struct host_status_block) +
5860 sizeof(struct eth_tx_db_data));
5862 bnx2x_fp(bp, i, hw_tx_prods) =
5863 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5865 bnx2x_fp(bp, i, tx_prods_mapping) =
5866 bnx2x_fp(bp, i, status_blk_mapping) +
5867 sizeof(struct host_status_block);
5869 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5870 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5871 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5872 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5873 &bnx2x_fp(bp, i, tx_desc_mapping),
5874 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5876 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5877 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5878 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5879 &bnx2x_fp(bp, i, rx_desc_mapping),
5880 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5882 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5883 &bnx2x_fp(bp, i, rx_comp_mapping),
5884 sizeof(struct eth_fast_path_rx_cqe) *
5888 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5889 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5890 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5891 &bnx2x_fp(bp, i, rx_sge_mapping),
5892 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5894 /* end of fastpath */
5896 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5897 sizeof(struct host_def_status_block));
5899 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5900 sizeof(struct bnx2x_slowpath));
5903 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5906 for (i = 0; i < 64*1024; i += 64) {
5907 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5908 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5911 /* allocate searcher T2 table
5912 we allocate 1/4 of alloc num for T2
5913 (which is not entered into the ILT) */
5914 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5917 for (i = 0; i < 16*1024; i += 64)
5918 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5920 /* now fixup the last line in the block to point to the next block */
5921 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5923 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5924 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5926 /* QM queues (128*MAX_CONN) */
5927 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5930 /* Slow path ring */
5931 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5939 #undef BNX2X_PCI_ALLOC
5943 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5947 for_each_queue(bp, i) {
5948 struct bnx2x_fastpath *fp = &bp->fp[i];
5950 u16 bd_cons = fp->tx_bd_cons;
5951 u16 sw_prod = fp->tx_pkt_prod;
5952 u16 sw_cons = fp->tx_pkt_cons;
5954 while (sw_cons != sw_prod) {
5955 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5961 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5965 for_each_queue(bp, j) {
5966 struct bnx2x_fastpath *fp = &bp->fp[j];
5968 for (i = 0; i < NUM_RX_BD; i++) {
5969 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5970 struct sk_buff *skb = rx_buf->skb;
5975 pci_unmap_single(bp->pdev,
5976 pci_unmap_addr(rx_buf, mapping),
5978 PCI_DMA_FROMDEVICE);
5983 if (!fp->disable_tpa)
5984 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5985 ETH_MAX_AGGREGATION_QUEUES_E1 :
5986 ETH_MAX_AGGREGATION_QUEUES_E1H);
5990 static void bnx2x_free_skbs(struct bnx2x *bp)
5992 bnx2x_free_tx_skbs(bp);
5993 bnx2x_free_rx_skbs(bp);
5996 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6000 free_irq(bp->msix_table[0].vector, bp->dev);
6001 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6002 bp->msix_table[0].vector);
6004 for_each_queue(bp, i) {
6005 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6006 "state %x\n", i, bp->msix_table[i + offset].vector,
6007 bnx2x_fp(bp, i, state));
6009 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6010 BNX2X_ERR("IRQ of fp #%d being freed while "
6011 "state != closed\n", i);
6013 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6017 static void bnx2x_free_irq(struct bnx2x *bp)
6019 if (bp->flags & USING_MSIX_FLAG) {
6020 bnx2x_free_msix_irqs(bp);
6021 pci_disable_msix(bp->pdev);
6022 bp->flags &= ~USING_MSIX_FLAG;
6025 free_irq(bp->pdev->irq, bp->dev);
6028 static int bnx2x_enable_msix(struct bnx2x *bp)
6032 bp->msix_table[0].entry = 0;
6034 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6036 for_each_queue(bp, i) {
6037 int igu_vec = offset + i + BP_L_ID(bp);
6039 bp->msix_table[i + offset].entry = igu_vec;
6040 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6041 "(fastpath #%u)\n", i + offset, igu_vec, i);
6044 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6045 bp->num_queues + offset);
6047 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6050 bp->flags |= USING_MSIX_FLAG;
6055 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6057 int i, rc, offset = 1;
6059 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6060 bp->dev->name, bp->dev);
6062 BNX2X_ERR("request sp irq failed\n");
6066 for_each_queue(bp, i) {
6067 rc = request_irq(bp->msix_table[i + offset].vector,
6068 bnx2x_msix_fp_int, 0,
6069 bp->dev->name, &bp->fp[i]);
6071 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6073 bnx2x_free_msix_irqs(bp);
6077 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6083 static int bnx2x_req_irq(struct bnx2x *bp)
6087 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6088 bp->dev->name, bp->dev);
6090 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6095 static void bnx2x_napi_enable(struct bnx2x *bp)
6099 for_each_queue(bp, i)
6100 napi_enable(&bnx2x_fp(bp, i, napi));
6103 static void bnx2x_napi_disable(struct bnx2x *bp)
6107 for_each_queue(bp, i)
6108 napi_disable(&bnx2x_fp(bp, i, napi));
6111 static void bnx2x_netif_start(struct bnx2x *bp)
6113 if (atomic_dec_and_test(&bp->intr_sem)) {
6114 if (netif_running(bp->dev)) {
6115 if (bp->state == BNX2X_STATE_OPEN)
6116 netif_wake_queue(bp->dev);
6117 bnx2x_napi_enable(bp);
6118 bnx2x_int_enable(bp);
6123 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6125 bnx2x_int_disable_sync(bp, disable_hw);
6126 if (netif_running(bp->dev)) {
6127 bnx2x_napi_disable(bp);
6128 netif_tx_disable(bp->dev);
6129 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6134 * Init service functions
6137 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6139 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6140 int port = BP_PORT(bp);
6143 * unicasts 0-31:port0 32-63:port1
6144 * multicast 64-127:port0 128-191:port1
6146 config->hdr.length_6b = 2;
6147 config->hdr.offset = port ? 32 : 0;
6148 config->hdr.client_id = BP_CL_ID(bp);
6149 config->hdr.reserved1 = 0;
6152 config->config_table[0].cam_entry.msb_mac_addr =
6153 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6154 config->config_table[0].cam_entry.middle_mac_addr =
6155 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6156 config->config_table[0].cam_entry.lsb_mac_addr =
6157 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6158 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6160 config->config_table[0].target_table_entry.flags = 0;
6162 CAM_INVALIDATE(config->config_table[0]);
6163 config->config_table[0].target_table_entry.client_id = 0;
6164 config->config_table[0].target_table_entry.vlan_id = 0;
6166 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6167 (set ? "setting" : "clearing"),
6168 config->config_table[0].cam_entry.msb_mac_addr,
6169 config->config_table[0].cam_entry.middle_mac_addr,
6170 config->config_table[0].cam_entry.lsb_mac_addr);
6173 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6174 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6175 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6176 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6178 config->config_table[1].target_table_entry.flags =
6179 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6181 CAM_INVALIDATE(config->config_table[1]);
6182 config->config_table[1].target_table_entry.client_id = 0;
6183 config->config_table[1].target_table_entry.vlan_id = 0;
6185 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6186 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6187 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6190 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6192 struct mac_configuration_cmd_e1h *config =
6193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6195 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6196 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6200 /* CAM allocation for E1H
6201 * unicasts: by func number
6202 * multicast: 20+FUNC*20, 20 each
6204 config->hdr.length_6b = 1;
6205 config->hdr.offset = BP_FUNC(bp);
6206 config->hdr.client_id = BP_CL_ID(bp);
6207 config->hdr.reserved1 = 0;
6210 config->config_table[0].msb_mac_addr =
6211 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6212 config->config_table[0].middle_mac_addr =
6213 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6214 config->config_table[0].lsb_mac_addr =
6215 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6216 config->config_table[0].client_id = BP_L_ID(bp);
6217 config->config_table[0].vlan_id = 0;
6218 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6220 config->config_table[0].flags = BP_PORT(bp);
6222 config->config_table[0].flags =
6223 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6225 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6226 (set ? "setting" : "clearing"),
6227 config->config_table[0].msb_mac_addr,
6228 config->config_table[0].middle_mac_addr,
6229 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6231 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6232 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6233 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6236 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6237 int *state_p, int poll)
6239 /* can take a while if any port is running */
6242 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6243 poll ? "polling" : "waiting", state, idx);
6248 bnx2x_rx_int(bp->fp, 10);
6249 /* if index is different from 0
6250 * the reply for some commands will
6251 * be on the non default queue
6254 bnx2x_rx_int(&bp->fp[idx], 10);
6257 mb(); /* state is changed by bnx2x_sp_event() */
6258 if (*state_p == state)
6265 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6266 poll ? "polling" : "waiting", state, idx);
6267 #ifdef BNX2X_STOP_ON_ERROR
6274 static int bnx2x_setup_leading(struct bnx2x *bp)
6278 /* reset IGU state */
6279 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6282 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6284 /* Wait for completion */
6285 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6290 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6292 /* reset IGU state */
6293 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6296 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6297 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6299 /* Wait for completion */
6300 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6301 &(bp->fp[index].state), 0);
6304 static int bnx2x_poll(struct napi_struct *napi, int budget);
6305 static void bnx2x_set_rx_mode(struct net_device *dev);
6307 /* must be called with rtnl_lock */
6308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6312 #ifdef BNX2X_STOP_ON_ERROR
6313 if (unlikely(bp->panic))
6317 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6319 /* Send LOAD_REQUEST command to MCP
6320 Returns the type of LOAD command:
6321 if it is the first port to be initialized
6322 common blocks should be initialized, otherwise - not
6324 if (!BP_NOMCP(bp)) {
6325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6327 BNX2X_ERR("MCP response failure, aborting\n");
6330 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6331 return -EBUSY; /* other port in diagnostic mode */
6334 int port = BP_PORT(bp);
6336 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6337 load_count[0], load_count[1], load_count[2]);
6339 load_count[1 + port]++;
6340 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6341 load_count[0], load_count[1], load_count[2]);
6342 if (load_count[0] == 1)
6343 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6344 else if (load_count[1 + port] == 1)
6345 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6347 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6350 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6351 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6355 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6357 /* if we can't use MSI-X we only need one fp,
6358 * so try to enable MSI-X with the requested number of fp's
6359 * and fallback to inta with one fp
6365 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6366 /* user requested number */
6367 bp->num_queues = use_multi;
6370 bp->num_queues = min_t(u32, num_online_cpus(),
6375 if (bnx2x_enable_msix(bp)) {
6376 /* failed to enable MSI-X */
6379 BNX2X_ERR("Multi requested but failed"
6380 " to enable MSI-X\n");
6384 "set number of queues to %d\n", bp->num_queues);
6386 if (bnx2x_alloc_mem(bp))
6389 for_each_queue(bp, i)
6390 bnx2x_fp(bp, i, disable_tpa) =
6391 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6393 if (bp->flags & USING_MSIX_FLAG) {
6394 rc = bnx2x_req_msix_irqs(bp);
6396 pci_disable_msix(bp->pdev);
6401 rc = bnx2x_req_irq(bp);
6403 BNX2X_ERR("IRQ request failed, aborting\n");
6408 for_each_queue(bp, i)
6409 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6413 rc = bnx2x_init_hw(bp, load_code);
6415 BNX2X_ERR("HW init failed, aborting\n");
6416 goto load_int_disable;
6419 /* Setup NIC internals and enable interrupts */
6420 bnx2x_nic_init(bp, load_code);
6422 /* Send LOAD_DONE command to MCP */
6423 if (!BP_NOMCP(bp)) {
6424 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6426 BNX2X_ERR("MCP response failure, aborting\n");
6428 goto load_rings_free;
6432 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6434 rc = bnx2x_setup_leading(bp);
6436 BNX2X_ERR("Setup leading failed!\n");
6437 goto load_netif_stop;
6440 if (CHIP_IS_E1H(bp))
6441 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6442 BNX2X_ERR("!!! mf_cfg function disabled\n");
6443 bp->state = BNX2X_STATE_DISABLED;
6446 if (bp->state == BNX2X_STATE_OPEN)
6447 for_each_nondefault_queue(bp, i) {
6448 rc = bnx2x_setup_multi(bp, i);
6450 goto load_netif_stop;
6454 bnx2x_set_mac_addr_e1(bp, 1);
6456 bnx2x_set_mac_addr_e1h(bp, 1);
6459 bnx2x_initial_phy_init(bp);
6461 /* Start fast path */
6462 switch (load_mode) {
6464 /* Tx queue should be only reenabled */
6465 netif_wake_queue(bp->dev);
6466 bnx2x_set_rx_mode(bp->dev);
6470 netif_start_queue(bp->dev);
6471 bnx2x_set_rx_mode(bp->dev);
6472 if (bp->flags & USING_MSIX_FLAG)
6473 printk(KERN_INFO PFX "%s: using MSI-X\n",
6478 bnx2x_set_rx_mode(bp->dev);
6479 bp->state = BNX2X_STATE_DIAG;
6487 bnx2x__link_status_update(bp);
6489 /* start the timer */
6490 mod_timer(&bp->timer, jiffies + bp->current_interval);
6496 bnx2x_napi_disable(bp);
6498 /* Free SKBs, SGEs, TPA pool and driver internals */
6499 bnx2x_free_skbs(bp);
6500 for_each_queue(bp, i)
6501 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6503 bnx2x_int_disable_sync(bp, 1);
6510 /* TBD we really need to reset the chip
6511 if we want to recover from this */
6515 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6519 /* halt the connection */
6520 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6521 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6523 /* Wait for completion */
6524 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6525 &(bp->fp[index].state), 1);
6526 if (rc) /* timeout */
6529 /* delete cfc entry */
6530 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6532 /* Wait for completion */
6533 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6534 &(bp->fp[index].state), 1);
6538 static int bnx2x_stop_leading(struct bnx2x *bp)
6540 u16 dsb_sp_prod_idx;
6541 /* if the other port is handling traffic,
6542 this can take a lot of time */
6548 /* Send HALT ramrod */
6549 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6550 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6552 /* Wait for completion */
6553 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6554 &(bp->fp[0].state), 1);
6555 if (rc) /* timeout */
6558 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6560 /* Send PORT_DELETE ramrod */
6561 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6563 /* Wait for completion to arrive on default status block
6564 we are going to reset the chip anyway
6565 so there is not much to do if this times out
6567 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6569 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6570 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6571 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6572 #ifdef BNX2X_STOP_ON_ERROR
6582 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6583 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6588 static void bnx2x_reset_func(struct bnx2x *bp)
6590 int port = BP_PORT(bp);
6591 int func = BP_FUNC(bp);
6595 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6596 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6598 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6601 base = FUNC_ILT_BASE(func);
6602 for (i = base; i < base + ILT_PER_FUNC; i++)
6603 bnx2x_ilt_wr(bp, i, 0);
6606 static void bnx2x_reset_port(struct bnx2x *bp)
6608 int port = BP_PORT(bp);
6611 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6613 /* Do not rcv packets to BRB */
6614 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6615 /* Do not direct rcv packets that are not for MCP to the BRB */
6616 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6617 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6620 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6623 /* Check for BRB port occupancy */
6624 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6626 DP(NETIF_MSG_IFDOWN,
6627 "BRB1 is not empty %d blocks are occupied\n", val);
6629 /* TODO: Close Doorbell port? */
6632 static void bnx2x_reset_common(struct bnx2x *bp)
6635 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6637 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6642 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6643 BP_FUNC(bp), reset_code);
6645 switch (reset_code) {
6646 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6647 bnx2x_reset_port(bp);
6648 bnx2x_reset_func(bp);
6649 bnx2x_reset_common(bp);
6652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6653 bnx2x_reset_port(bp);
6654 bnx2x_reset_func(bp);
6657 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6658 bnx2x_reset_func(bp);
6662 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6667 /* must be called with rtnl_lock */
6668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6670 int port = BP_PORT(bp);
6674 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6676 bp->rx_mode = BNX2X_RX_MODE_NONE;
6677 bnx2x_set_storm_rx_mode(bp);
6679 bnx2x_netif_stop(bp, 1);
6680 if (!netif_running(bp->dev))
6681 bnx2x_napi_disable(bp);
6682 del_timer_sync(&bp->timer);
6683 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6684 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6685 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6690 /* Wait until tx fast path tasks complete */
6691 for_each_queue(bp, i) {
6692 struct bnx2x_fastpath *fp = &bp->fp[i];
6696 while (BNX2X_HAS_TX_WORK(fp)) {
6698 bnx2x_tx_int(fp, 1000);
6700 BNX2X_ERR("timeout waiting for queue[%d]\n",
6702 #ifdef BNX2X_STOP_ON_ERROR
6714 /* Give HW time to discard old tx messages */
6717 if (CHIP_IS_E1(bp)) {
6718 struct mac_configuration_cmd *config =
6719 bnx2x_sp(bp, mcast_config);
6721 bnx2x_set_mac_addr_e1(bp, 0);
6723 for (i = 0; i < config->hdr.length_6b; i++)
6724 CAM_INVALIDATE(config->config_table[i]);
6726 config->hdr.length_6b = i;
6727 if (CHIP_REV_IS_SLOW(bp))
6728 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6730 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6731 config->hdr.client_id = BP_CL_ID(bp);
6732 config->hdr.reserved1 = 0;
6734 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6735 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6736 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6739 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6741 bnx2x_set_mac_addr_e1h(bp, 0);
6743 for (i = 0; i < MC_HASH_SIZE; i++)
6744 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6747 if (unload_mode == UNLOAD_NORMAL)
6748 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6750 else if (bp->flags & NO_WOL_FLAG) {
6751 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6752 if (CHIP_IS_E1H(bp))
6753 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6755 } else if (bp->wol) {
6756 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6757 u8 *mac_addr = bp->dev->dev_addr;
6759 /* The mac address is written to entries 1-4 to
6760 preserve entry 0 which is used by the PMF */
6761 u8 entry = (BP_E1HVN(bp) + 1)*8;
6763 val = (mac_addr[0] << 8) | mac_addr[1];
6764 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6766 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6767 (mac_addr[4] << 8) | mac_addr[5];
6768 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6770 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6773 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6775 /* Close multi and leading connections
6776 Completions for ramrods are collected in a synchronous way */
6777 for_each_nondefault_queue(bp, i)
6778 if (bnx2x_stop_multi(bp, i))
6781 rc = bnx2x_stop_leading(bp);
6783 BNX2X_ERR("Stop leading failed!\n");
6784 #ifdef BNX2X_STOP_ON_ERROR
6793 reset_code = bnx2x_fw_command(bp, reset_code);
6795 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6796 load_count[0], load_count[1], load_count[2]);
6798 load_count[1 + port]--;
6799 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6800 load_count[0], load_count[1], load_count[2]);
6801 if (load_count[0] == 0)
6802 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6803 else if (load_count[1 + port] == 0)
6804 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6806 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6809 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6810 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6811 bnx2x__link_reset(bp);
6813 /* Reset the chip */
6814 bnx2x_reset_chip(bp, reset_code);
6816 /* Report UNLOAD_DONE to MCP */
6818 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6821 /* Free SKBs, SGEs, TPA pool and driver internals */
6822 bnx2x_free_skbs(bp);
6823 for_each_queue(bp, i)
6824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6827 bp->state = BNX2X_STATE_CLOSED;
6829 netif_carrier_off(bp->dev);
6834 static void bnx2x_reset_task(struct work_struct *work)
6836 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6838 #ifdef BNX2X_STOP_ON_ERROR
6839 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6840 " so reset not done to allow debug dump,\n"
6841 KERN_ERR " you will need to reboot when done\n");
6847 if (!netif_running(bp->dev))
6848 goto reset_task_exit;
6850 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6851 bnx2x_nic_load(bp, LOAD_NORMAL);
6857 /* end of nic load/unload */
6862 * Init service functions
6865 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6869 /* Check if there is any driver already loaded */
6870 val = REG_RD(bp, MISC_REG_UNPREPARED);
6872 /* Check if it is the UNDI driver
6873 * UNDI driver initializes CID offset for normal bell to 0x7
6875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6876 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6878 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6880 int func = BP_FUNC(bp);
6884 /* clear the UNDI indication */
6885 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6887 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6889 /* try unload UNDI on port 0 */
6892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6893 DRV_MSG_SEQ_NUMBER_MASK);
6894 reset_code = bnx2x_fw_command(bp, reset_code);
6896 /* if UNDI is loaded on the other port */
6897 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6899 /* send "DONE" for previous unload */
6900 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6902 /* unload UNDI on port 1 */
6905 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6906 DRV_MSG_SEQ_NUMBER_MASK);
6907 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6909 bnx2x_fw_command(bp, reset_code);
6912 /* now it's safe to release the lock */
6913 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6915 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6916 HC_REG_CONFIG_0), 0x1000);
6918 /* close input traffic and wait for it */
6919 /* Do not rcv packets to BRB */
6921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6922 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6923 /* Do not direct rcv packets that are not for MCP to
6926 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6927 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6930 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6931 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6934 /* save NIG port swap info */
6935 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6936 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6939 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6942 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6944 /* take the NIG out of reset and restore swap values */
6946 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6947 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6948 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6951 /* send unload done to the MCP */
6952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6954 /* restore our func and fw_seq */
6957 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6958 DRV_MSG_SEQ_NUMBER_MASK);
6961 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6965 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6967 u32 val, val2, val3, val4, id;
6970 /* Get the chip revision id and number. */
6971 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6972 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6973 id = ((val & 0xffff) << 16);
6974 val = REG_RD(bp, MISC_REG_CHIP_REV);
6975 id |= ((val & 0xf) << 12);
6976 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6977 id |= ((val & 0xff) << 4);
6978 val = REG_RD(bp, MISC_REG_BOND_ID);
6980 bp->common.chip_id = id;
6981 bp->link_params.chip_id = bp->common.chip_id;
6982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6984 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6985 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6986 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6987 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6988 bp->common.flash_size, bp->common.flash_size);
6990 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6991 bp->link_params.shmem_base = bp->common.shmem_base;
6992 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6994 if (!bp->common.shmem_base ||
6995 (bp->common.shmem_base < 0xA0000) ||
6996 (bp->common.shmem_base >= 0xC0000)) {
6997 BNX2X_DEV_INFO("MCP not active\n");
6998 bp->flags |= NO_MCP_FLAG;
7002 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7003 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7004 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7005 BNX2X_ERR("BAD MCP validity signature\n");
7007 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7008 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7010 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7011 bp->common.hw_config, bp->common.board);
7013 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7014 SHARED_HW_CFG_LED_MODE_MASK) >>
7015 SHARED_HW_CFG_LED_MODE_SHIFT);
7017 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7018 bp->common.bc_ver = val;
7019 BNX2X_DEV_INFO("bc_ver %X\n", val);
7020 if (val < BNX2X_BC_VER) {
7021 /* for now only warn
7022 * later we might need to enforce this */
7023 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7024 " please upgrade BC\n", BNX2X_BC_VER, val);
7027 if (BP_E1HVN(bp) == 0) {
7028 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7029 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7031 /* no WOL capability for E1HVN != 0 */
7032 bp->flags |= NO_WOL_FLAG;
7034 BNX2X_DEV_INFO("%sWoL capable\n",
7035 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7037 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7038 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7039 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7040 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7042 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7043 val, val2, val3, val4);
7046 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7049 int port = BP_PORT(bp);
7052 switch (switch_cfg) {
7054 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7057 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7058 switch (ext_phy_type) {
7059 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7060 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7063 bp->port.supported |= (SUPPORTED_10baseT_Half |
7064 SUPPORTED_10baseT_Full |
7065 SUPPORTED_100baseT_Half |
7066 SUPPORTED_100baseT_Full |
7067 SUPPORTED_1000baseT_Full |
7068 SUPPORTED_2500baseX_Full |
7073 SUPPORTED_Asym_Pause);
7076 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7077 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7080 bp->port.supported |= (SUPPORTED_10baseT_Half |
7081 SUPPORTED_10baseT_Full |
7082 SUPPORTED_100baseT_Half |
7083 SUPPORTED_100baseT_Full |
7084 SUPPORTED_1000baseT_Full |
7089 SUPPORTED_Asym_Pause);
7093 BNX2X_ERR("NVRAM config error. "
7094 "BAD SerDes ext_phy_config 0x%x\n",
7095 bp->link_params.ext_phy_config);
7099 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7101 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7104 case SWITCH_CFG_10G:
7105 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7108 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7109 switch (ext_phy_type) {
7110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7111 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7114 bp->port.supported |= (SUPPORTED_10baseT_Half |
7115 SUPPORTED_10baseT_Full |
7116 SUPPORTED_100baseT_Half |
7117 SUPPORTED_100baseT_Full |
7118 SUPPORTED_1000baseT_Full |
7119 SUPPORTED_2500baseX_Full |
7120 SUPPORTED_10000baseT_Full |
7125 SUPPORTED_Asym_Pause);
7128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7129 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7132 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7135 SUPPORTED_Asym_Pause);
7138 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7139 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7142 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7143 SUPPORTED_1000baseT_Full |
7146 SUPPORTED_Asym_Pause);
7149 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7150 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7153 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7154 SUPPORTED_1000baseT_Full |
7158 SUPPORTED_Asym_Pause);
7161 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7162 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7165 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7166 SUPPORTED_2500baseX_Full |
7167 SUPPORTED_1000baseT_Full |
7171 SUPPORTED_Asym_Pause);
7174 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7175 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7178 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7182 SUPPORTED_Asym_Pause);
7185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7186 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7187 bp->link_params.ext_phy_config);
7191 BNX2X_ERR("NVRAM config error. "
7192 "BAD XGXS ext_phy_config 0x%x\n",
7193 bp->link_params.ext_phy_config);
7197 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7199 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7204 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7205 bp->port.link_config);
7208 bp->link_params.phy_addr = bp->port.phy_addr;
7210 /* mask what we support according to speed_cap_mask */
7211 if (!(bp->link_params.speed_cap_mask &
7212 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7213 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7215 if (!(bp->link_params.speed_cap_mask &
7216 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7217 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7219 if (!(bp->link_params.speed_cap_mask &
7220 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7221 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7223 if (!(bp->link_params.speed_cap_mask &
7224 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7225 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7227 if (!(bp->link_params.speed_cap_mask &
7228 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7229 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7230 SUPPORTED_1000baseT_Full);
7232 if (!(bp->link_params.speed_cap_mask &
7233 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7234 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7236 if (!(bp->link_params.speed_cap_mask &
7237 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7238 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7240 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7243 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7245 bp->link_params.req_duplex = DUPLEX_FULL;
7247 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7248 case PORT_FEATURE_LINK_SPEED_AUTO:
7249 if (bp->port.supported & SUPPORTED_Autoneg) {
7250 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7251 bp->port.advertising = bp->port.supported;
7254 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7256 if ((ext_phy_type ==
7257 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7259 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7260 /* force 10G, no AN */
7261 bp->link_params.req_line_speed = SPEED_10000;
7262 bp->port.advertising =
7263 (ADVERTISED_10000baseT_Full |
7267 BNX2X_ERR("NVRAM config error. "
7268 "Invalid link_config 0x%x"
7269 " Autoneg not supported\n",
7270 bp->port.link_config);
7275 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7276 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7277 bp->link_params.req_line_speed = SPEED_10;
7278 bp->port.advertising = (ADVERTISED_10baseT_Full |
7281 BNX2X_ERR("NVRAM config error. "
7282 "Invalid link_config 0x%x"
7283 " speed_cap_mask 0x%x\n",
7284 bp->port.link_config,
7285 bp->link_params.speed_cap_mask);
7290 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7291 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7292 bp->link_params.req_line_speed = SPEED_10;
7293 bp->link_params.req_duplex = DUPLEX_HALF;
7294 bp->port.advertising = (ADVERTISED_10baseT_Half |
7297 BNX2X_ERR("NVRAM config error. "
7298 "Invalid link_config 0x%x"
7299 " speed_cap_mask 0x%x\n",
7300 bp->port.link_config,
7301 bp->link_params.speed_cap_mask);
7306 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7307 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7308 bp->link_params.req_line_speed = SPEED_100;
7309 bp->port.advertising = (ADVERTISED_100baseT_Full |
7312 BNX2X_ERR("NVRAM config error. "
7313 "Invalid link_config 0x%x"
7314 " speed_cap_mask 0x%x\n",
7315 bp->port.link_config,
7316 bp->link_params.speed_cap_mask);
7321 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7322 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7323 bp->link_params.req_line_speed = SPEED_100;
7324 bp->link_params.req_duplex = DUPLEX_HALF;
7325 bp->port.advertising = (ADVERTISED_100baseT_Half |
7328 BNX2X_ERR("NVRAM config error. "
7329 "Invalid link_config 0x%x"
7330 " speed_cap_mask 0x%x\n",
7331 bp->port.link_config,
7332 bp->link_params.speed_cap_mask);
7337 case PORT_FEATURE_LINK_SPEED_1G:
7338 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7339 bp->link_params.req_line_speed = SPEED_1000;
7340 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7343 BNX2X_ERR("NVRAM config error. "
7344 "Invalid link_config 0x%x"
7345 " speed_cap_mask 0x%x\n",
7346 bp->port.link_config,
7347 bp->link_params.speed_cap_mask);
7352 case PORT_FEATURE_LINK_SPEED_2_5G:
7353 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7354 bp->link_params.req_line_speed = SPEED_2500;
7355 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7358 BNX2X_ERR("NVRAM config error. "
7359 "Invalid link_config 0x%x"
7360 " speed_cap_mask 0x%x\n",
7361 bp->port.link_config,
7362 bp->link_params.speed_cap_mask);
7367 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7368 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7369 case PORT_FEATURE_LINK_SPEED_10G_KR:
7370 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7371 bp->link_params.req_line_speed = SPEED_10000;
7372 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7375 BNX2X_ERR("NVRAM config error. "
7376 "Invalid link_config 0x%x"
7377 " speed_cap_mask 0x%x\n",
7378 bp->port.link_config,
7379 bp->link_params.speed_cap_mask);
7385 BNX2X_ERR("NVRAM config error. "
7386 "BAD link speed link_config 0x%x\n",
7387 bp->port.link_config);
7388 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7389 bp->port.advertising = bp->port.supported;
7393 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7394 PORT_FEATURE_FLOW_CONTROL_MASK);
7395 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7396 !(bp->port.supported & SUPPORTED_Autoneg))
7397 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7399 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7400 " advertising 0x%x\n",
7401 bp->link_params.req_line_speed,
7402 bp->link_params.req_duplex,
7403 bp->link_params.req_flow_ctrl, bp->port.advertising);
7406 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7408 int port = BP_PORT(bp);
7411 bp->link_params.bp = bp;
7412 bp->link_params.port = port;
7414 bp->link_params.serdes_config =
7415 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7416 bp->link_params.lane_config =
7417 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7418 bp->link_params.ext_phy_config =
7420 dev_info.port_hw_config[port].external_phy_config);
7421 bp->link_params.speed_cap_mask =
7423 dev_info.port_hw_config[port].speed_capability_mask);
7425 bp->port.link_config =
7426 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7428 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7429 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7430 " link_config 0x%08x\n",
7431 bp->link_params.serdes_config,
7432 bp->link_params.lane_config,
7433 bp->link_params.ext_phy_config,
7434 bp->link_params.speed_cap_mask, bp->port.link_config);
7436 bp->link_params.switch_cfg = (bp->port.link_config &
7437 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7438 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7440 bnx2x_link_settings_requested(bp);
7442 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7443 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7444 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7445 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7446 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7447 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7448 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7449 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7450 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7451 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7454 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7456 int func = BP_FUNC(bp);
7460 bnx2x_get_common_hwinfo(bp);
7464 if (CHIP_IS_E1H(bp)) {
7466 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7468 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7469 FUNC_MF_CFG_E1HOV_TAG_MASK);
7470 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7474 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7476 func, bp->e1hov, bp->e1hov);
7478 BNX2X_DEV_INFO("Single function mode\n");
7480 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7481 " aborting\n", func);
7487 if (!BP_NOMCP(bp)) {
7488 bnx2x_get_port_hwinfo(bp);
7490 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7491 DRV_MSG_SEQ_NUMBER_MASK);
7492 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7496 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7497 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7498 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7499 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7500 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7501 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7502 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7503 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7504 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7505 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7506 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7508 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7516 /* only supposed to happen on emulation/FPGA */
7517 BNX2X_ERR("warning random MAC workaround active\n");
7518 random_ether_addr(bp->dev->dev_addr);
7519 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7525 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7527 int func = BP_FUNC(bp);
7530 /* Disable interrupt handling until HW is initialized */
7531 atomic_set(&bp->intr_sem, 1);
7533 mutex_init(&bp->port.phy_mutex);
7535 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7536 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7538 rc = bnx2x_get_hwinfo(bp);
7540 /* need to reset chip if undi was active */
7542 bnx2x_undi_unload(bp);
7544 if (CHIP_REV_IS_FPGA(bp))
7545 printk(KERN_ERR PFX "FPGA detected\n");
7547 if (BP_NOMCP(bp) && (func == 0))
7549 "MCP disabled, must load devices in order!\n");
7553 bp->flags &= ~TPA_ENABLE_FLAG;
7554 bp->dev->features &= ~NETIF_F_LRO;
7556 bp->flags |= TPA_ENABLE_FLAG;
7557 bp->dev->features |= NETIF_F_LRO;
7561 bp->tx_ring_size = MAX_TX_AVAIL;
7562 bp->rx_ring_size = MAX_RX_AVAIL;
7570 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7571 bp->current_interval = (poll ? poll : bp->timer_interval);
7573 init_timer(&bp->timer);
7574 bp->timer.expires = jiffies + bp->current_interval;
7575 bp->timer.data = (unsigned long) bp;
7576 bp->timer.function = bnx2x_timer;
7582 * ethtool service functions
7585 /* All ethtool functions called with rtnl_lock */
7587 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7589 struct bnx2x *bp = netdev_priv(dev);
7591 cmd->supported = bp->port.supported;
7592 cmd->advertising = bp->port.advertising;
7594 if (netif_carrier_ok(dev)) {
7595 cmd->speed = bp->link_vars.line_speed;
7596 cmd->duplex = bp->link_vars.duplex;
7598 cmd->speed = bp->link_params.req_line_speed;
7599 cmd->duplex = bp->link_params.req_duplex;
7604 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7605 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7606 if (vn_max_rate < cmd->speed)
7607 cmd->speed = vn_max_rate;
7610 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7612 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7614 switch (ext_phy_type) {
7615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7616 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7617 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7618 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7619 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7620 cmd->port = PORT_FIBRE;
7623 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7624 cmd->port = PORT_TP;
7627 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7628 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7629 bp->link_params.ext_phy_config);
7633 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7634 bp->link_params.ext_phy_config);
7638 cmd->port = PORT_TP;
7640 cmd->phy_address = bp->port.phy_addr;
7641 cmd->transceiver = XCVR_INTERNAL;
7643 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7644 cmd->autoneg = AUTONEG_ENABLE;
7646 cmd->autoneg = AUTONEG_DISABLE;
7651 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7652 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7653 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7654 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7655 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7656 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7657 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7662 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7664 struct bnx2x *bp = netdev_priv(dev);
7670 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7671 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7672 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7673 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7674 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7675 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7676 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7678 if (cmd->autoneg == AUTONEG_ENABLE) {
7679 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7680 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7684 /* advertise the requested speed and duplex if supported */
7685 cmd->advertising &= bp->port.supported;
7687 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7688 bp->link_params.req_duplex = DUPLEX_FULL;
7689 bp->port.advertising |= (ADVERTISED_Autoneg |
7692 } else { /* forced speed */
7693 /* advertise the requested speed and duplex if supported */
7694 switch (cmd->speed) {
7696 if (cmd->duplex == DUPLEX_FULL) {
7697 if (!(bp->port.supported &
7698 SUPPORTED_10baseT_Full)) {
7700 "10M full not supported\n");
7704 advertising = (ADVERTISED_10baseT_Full |
7707 if (!(bp->port.supported &
7708 SUPPORTED_10baseT_Half)) {
7710 "10M half not supported\n");
7714 advertising = (ADVERTISED_10baseT_Half |
7720 if (cmd->duplex == DUPLEX_FULL) {
7721 if (!(bp->port.supported &
7722 SUPPORTED_100baseT_Full)) {
7724 "100M full not supported\n");
7728 advertising = (ADVERTISED_100baseT_Full |
7731 if (!(bp->port.supported &
7732 SUPPORTED_100baseT_Half)) {
7734 "100M half not supported\n");
7738 advertising = (ADVERTISED_100baseT_Half |
7744 if (cmd->duplex != DUPLEX_FULL) {
7745 DP(NETIF_MSG_LINK, "1G half not supported\n");
7749 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7750 DP(NETIF_MSG_LINK, "1G full not supported\n");
7754 advertising = (ADVERTISED_1000baseT_Full |
7759 if (cmd->duplex != DUPLEX_FULL) {
7761 "2.5G half not supported\n");
7765 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7767 "2.5G full not supported\n");
7771 advertising = (ADVERTISED_2500baseX_Full |
7776 if (cmd->duplex != DUPLEX_FULL) {
7777 DP(NETIF_MSG_LINK, "10G half not supported\n");
7781 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7782 DP(NETIF_MSG_LINK, "10G full not supported\n");
7786 advertising = (ADVERTISED_10000baseT_Full |
7791 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7795 bp->link_params.req_line_speed = cmd->speed;
7796 bp->link_params.req_duplex = cmd->duplex;
7797 bp->port.advertising = advertising;
7800 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7801 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7802 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7803 bp->port.advertising);
7805 if (netif_running(dev)) {
7806 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7813 #define PHY_FW_VER_LEN 10
7815 static void bnx2x_get_drvinfo(struct net_device *dev,
7816 struct ethtool_drvinfo *info)
7818 struct bnx2x *bp = netdev_priv(dev);
7819 u8 phy_fw_ver[PHY_FW_VER_LEN];
7821 strcpy(info->driver, DRV_MODULE_NAME);
7822 strcpy(info->version, DRV_MODULE_VERSION);
7824 phy_fw_ver[0] = '\0';
7826 bnx2x_acquire_phy_lock(bp);
7827 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7828 (bp->state != BNX2X_STATE_CLOSED),
7829 phy_fw_ver, PHY_FW_VER_LEN);
7830 bnx2x_release_phy_lock(bp);
7833 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7834 (bp->common.bc_ver & 0xff0000) >> 16,
7835 (bp->common.bc_ver & 0xff00) >> 8,
7836 (bp->common.bc_ver & 0xff),
7837 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7838 strcpy(info->bus_info, pci_name(bp->pdev));
7839 info->n_stats = BNX2X_NUM_STATS;
7840 info->testinfo_len = BNX2X_NUM_TESTS;
7841 info->eedump_len = bp->common.flash_size;
7842 info->regdump_len = 0;
7845 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7847 struct bnx2x *bp = netdev_priv(dev);
7849 if (bp->flags & NO_WOL_FLAG) {
7853 wol->supported = WAKE_MAGIC;
7855 wol->wolopts = WAKE_MAGIC;
7859 memset(&wol->sopass, 0, sizeof(wol->sopass));
7862 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7864 struct bnx2x *bp = netdev_priv(dev);
7866 if (wol->wolopts & ~WAKE_MAGIC)
7869 if (wol->wolopts & WAKE_MAGIC) {
7870 if (bp->flags & NO_WOL_FLAG)
7880 static u32 bnx2x_get_msglevel(struct net_device *dev)
7882 struct bnx2x *bp = netdev_priv(dev);
7884 return bp->msglevel;
7887 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7889 struct bnx2x *bp = netdev_priv(dev);
7891 if (capable(CAP_NET_ADMIN))
7892 bp->msglevel = level;
7895 static int bnx2x_nway_reset(struct net_device *dev)
7897 struct bnx2x *bp = netdev_priv(dev);
7902 if (netif_running(dev)) {
7903 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7910 static int bnx2x_get_eeprom_len(struct net_device *dev)
7912 struct bnx2x *bp = netdev_priv(dev);
7914 return bp->common.flash_size;
7917 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7919 int port = BP_PORT(bp);
7923 /* adjust timeout for emulation/FPGA */
7924 count = NVRAM_TIMEOUT_COUNT;
7925 if (CHIP_REV_IS_SLOW(bp))
7928 /* request access to nvram interface */
7929 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7930 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7932 for (i = 0; i < count*10; i++) {
7933 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7934 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7940 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7941 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7948 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7950 int port = BP_PORT(bp);
7954 /* adjust timeout for emulation/FPGA */
7955 count = NVRAM_TIMEOUT_COUNT;
7956 if (CHIP_REV_IS_SLOW(bp))
7959 /* relinquish nvram interface */
7960 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7961 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7963 for (i = 0; i < count*10; i++) {
7964 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7965 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7971 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7972 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7979 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7983 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7985 /* enable both bits, even on read */
7986 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7987 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7988 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7991 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7995 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7997 /* disable both bits, even after read */
7998 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7999 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8000 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8003 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8009 /* build the command word */
8010 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8012 /* need to clear DONE bit separately */
8013 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8015 /* address of the NVRAM to read from */
8016 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8017 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8019 /* issue a read command */
8020 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8022 /* adjust timeout for emulation/FPGA */
8023 count = NVRAM_TIMEOUT_COUNT;
8024 if (CHIP_REV_IS_SLOW(bp))
8027 /* wait for completion */
8030 for (i = 0; i < count; i++) {
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8034 if (val & MCPR_NVM_COMMAND_DONE) {
8035 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8036 /* we read nvram data in cpu order
8037 * but ethtool sees it as an array of bytes
8038 * converting to big-endian will do the work */
8039 val = cpu_to_be32(val);
8049 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8056 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8058 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8063 if (offset + buf_size > bp->common.flash_size) {
8064 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8065 " buf_size (0x%x) > flash_size (0x%x)\n",
8066 offset, buf_size, bp->common.flash_size);
8070 /* request access to nvram interface */
8071 rc = bnx2x_acquire_nvram_lock(bp);
8075 /* enable access to nvram interface */
8076 bnx2x_enable_nvram_access(bp);
8078 /* read the first word(s) */
8079 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8080 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8081 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8082 memcpy(ret_buf, &val, 4);
8084 /* advance to the next dword */
8085 offset += sizeof(u32);
8086 ret_buf += sizeof(u32);
8087 buf_size -= sizeof(u32);
8092 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8093 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8094 memcpy(ret_buf, &val, 4);
8097 /* disable access to nvram interface */
8098 bnx2x_disable_nvram_access(bp);
8099 bnx2x_release_nvram_lock(bp);
8104 static int bnx2x_get_eeprom(struct net_device *dev,
8105 struct ethtool_eeprom *eeprom, u8 *eebuf)
8107 struct bnx2x *bp = netdev_priv(dev);
8110 if (!netif_running(dev))
8113 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8114 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8115 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8116 eeprom->len, eeprom->len);
8118 /* parameters already validated in ethtool_get_eeprom */
8120 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8125 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8130 /* build the command word */
8131 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8133 /* need to clear DONE bit separately */
8134 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8136 /* write the data */
8137 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8139 /* address of the NVRAM to write to */
8140 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8141 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8143 /* issue the write command */
8144 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8146 /* adjust timeout for emulation/FPGA */
8147 count = NVRAM_TIMEOUT_COUNT;
8148 if (CHIP_REV_IS_SLOW(bp))
8151 /* wait for completion */
8153 for (i = 0; i < count; i++) {
8155 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8156 if (val & MCPR_NVM_COMMAND_DONE) {
8165 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8167 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8175 if (offset + buf_size > bp->common.flash_size) {
8176 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8177 " buf_size (0x%x) > flash_size (0x%x)\n",
8178 offset, buf_size, bp->common.flash_size);
8182 /* request access to nvram interface */
8183 rc = bnx2x_acquire_nvram_lock(bp);
8187 /* enable access to nvram interface */
8188 bnx2x_enable_nvram_access(bp);
8190 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8191 align_offset = (offset & ~0x03);
8192 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8195 val &= ~(0xff << BYTE_OFFSET(offset));
8196 val |= (*data_buf << BYTE_OFFSET(offset));
8198 /* nvram data is returned as an array of bytes
8199 * convert it back to cpu order */
8200 val = be32_to_cpu(val);
8202 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8206 /* disable access to nvram interface */
8207 bnx2x_disable_nvram_access(bp);
8208 bnx2x_release_nvram_lock(bp);
8213 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8221 if (buf_size == 1) /* ethtool */
8222 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8224 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8226 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8231 if (offset + buf_size > bp->common.flash_size) {
8232 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8233 " buf_size (0x%x) > flash_size (0x%x)\n",
8234 offset, buf_size, bp->common.flash_size);
8238 /* request access to nvram interface */
8239 rc = bnx2x_acquire_nvram_lock(bp);
8243 /* enable access to nvram interface */
8244 bnx2x_enable_nvram_access(bp);
8247 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8248 while ((written_so_far < buf_size) && (rc == 0)) {
8249 if (written_so_far == (buf_size - sizeof(u32)))
8250 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8251 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8252 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8253 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8254 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8256 memcpy(&val, data_buf, 4);
8258 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8260 /* advance to the next dword */
8261 offset += sizeof(u32);
8262 data_buf += sizeof(u32);
8263 written_so_far += sizeof(u32);
8267 /* disable access to nvram interface */
8268 bnx2x_disable_nvram_access(bp);
8269 bnx2x_release_nvram_lock(bp);
8274 static int bnx2x_set_eeprom(struct net_device *dev,
8275 struct ethtool_eeprom *eeprom, u8 *eebuf)
8277 struct bnx2x *bp = netdev_priv(dev);
8280 if (!netif_running(dev))
8283 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8284 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8285 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8286 eeprom->len, eeprom->len);
8288 /* parameters already validated in ethtool_set_eeprom */
8290 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8291 if (eeprom->magic == 0x00504859)
8294 bnx2x_acquire_phy_lock(bp);
8295 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8296 bp->link_params.ext_phy_config,
8297 (bp->state != BNX2X_STATE_CLOSED),
8298 eebuf, eeprom->len);
8299 if ((bp->state == BNX2X_STATE_OPEN) ||
8300 (bp->state == BNX2X_STATE_DISABLED)) {
8301 rc |= bnx2x_link_reset(&bp->link_params,
8303 rc |= bnx2x_phy_init(&bp->link_params,
8306 bnx2x_release_phy_lock(bp);
8308 } else /* Only the PMF can access the PHY */
8311 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8316 static int bnx2x_get_coalesce(struct net_device *dev,
8317 struct ethtool_coalesce *coal)
8319 struct bnx2x *bp = netdev_priv(dev);
8321 memset(coal, 0, sizeof(struct ethtool_coalesce));
8323 coal->rx_coalesce_usecs = bp->rx_ticks;
8324 coal->tx_coalesce_usecs = bp->tx_ticks;
8329 static int bnx2x_set_coalesce(struct net_device *dev,
8330 struct ethtool_coalesce *coal)
8332 struct bnx2x *bp = netdev_priv(dev);
8334 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8335 if (bp->rx_ticks > 3000)
8336 bp->rx_ticks = 3000;
8338 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8339 if (bp->tx_ticks > 0x3000)
8340 bp->tx_ticks = 0x3000;
8342 if (netif_running(dev))
8343 bnx2x_update_coalesce(bp);
8348 static void bnx2x_get_ringparam(struct net_device *dev,
8349 struct ethtool_ringparam *ering)
8351 struct bnx2x *bp = netdev_priv(dev);
8353 ering->rx_max_pending = MAX_RX_AVAIL;
8354 ering->rx_mini_max_pending = 0;
8355 ering->rx_jumbo_max_pending = 0;
8357 ering->rx_pending = bp->rx_ring_size;
8358 ering->rx_mini_pending = 0;
8359 ering->rx_jumbo_pending = 0;
8361 ering->tx_max_pending = MAX_TX_AVAIL;
8362 ering->tx_pending = bp->tx_ring_size;
8365 static int bnx2x_set_ringparam(struct net_device *dev,
8366 struct ethtool_ringparam *ering)
8368 struct bnx2x *bp = netdev_priv(dev);
8371 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8372 (ering->tx_pending > MAX_TX_AVAIL) ||
8373 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8376 bp->rx_ring_size = ering->rx_pending;
8377 bp->tx_ring_size = ering->tx_pending;
8379 if (netif_running(dev)) {
8380 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8381 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8387 static void bnx2x_get_pauseparam(struct net_device *dev,
8388 struct ethtool_pauseparam *epause)
8390 struct bnx2x *bp = netdev_priv(dev);
8392 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8393 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8395 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8396 BNX2X_FLOW_CTRL_RX);
8397 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8398 BNX2X_FLOW_CTRL_TX);
8400 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8401 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8402 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8405 static int bnx2x_set_pauseparam(struct net_device *dev,
8406 struct ethtool_pauseparam *epause)
8408 struct bnx2x *bp = netdev_priv(dev);
8413 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8414 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8415 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8417 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8419 if (epause->rx_pause)
8420 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8422 if (epause->tx_pause)
8423 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8425 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8426 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8428 if (epause->autoneg) {
8429 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8430 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8434 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8435 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8439 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8441 if (netif_running(dev)) {
8442 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8449 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8451 struct bnx2x *bp = netdev_priv(dev);
8455 /* TPA requires Rx CSUM offloading */
8456 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8457 if (!(dev->features & NETIF_F_LRO)) {
8458 dev->features |= NETIF_F_LRO;
8459 bp->flags |= TPA_ENABLE_FLAG;
8463 } else if (dev->features & NETIF_F_LRO) {
8464 dev->features &= ~NETIF_F_LRO;
8465 bp->flags &= ~TPA_ENABLE_FLAG;
8469 if (changed && netif_running(dev)) {
8470 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8471 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8477 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8479 struct bnx2x *bp = netdev_priv(dev);
8484 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8486 struct bnx2x *bp = netdev_priv(dev);
8491 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8492 TPA'ed packets will be discarded due to wrong TCP CSUM */
8494 u32 flags = ethtool_op_get_flags(dev);
8496 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8502 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8505 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8506 dev->features |= NETIF_F_TSO6;
8508 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8509 dev->features &= ~NETIF_F_TSO6;
8515 static const struct {
8516 char string[ETH_GSTRING_LEN];
8517 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8518 { "register_test (offline)" },
8519 { "memory_test (offline)" },
8520 { "loopback_test (offline)" },
8521 { "nvram_test (online)" },
8522 { "interrupt_test (online)" },
8523 { "link_test (online)" },
8524 { "idle check (online)" },
8525 { "MC errors (online)" }
8528 static int bnx2x_self_test_count(struct net_device *dev)
8530 return BNX2X_NUM_TESTS;
8533 static int bnx2x_test_registers(struct bnx2x *bp)
8535 int idx, i, rc = -ENODEV;
8537 int port = BP_PORT(bp);
8538 static const struct {
8543 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8544 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8545 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8546 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8547 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8548 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8549 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8550 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8551 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8552 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8553 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8554 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8555 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8556 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8557 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8558 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8559 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8560 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8561 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8562 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8563 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8564 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8565 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8566 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8567 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8568 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8569 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8570 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8571 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8572 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8573 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8574 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8575 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8576 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8577 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8578 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8579 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8580 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8582 { 0xffffffff, 0, 0x00000000 }
8585 if (!netif_running(bp->dev))
8588 /* Repeat the test twice:
8589 First by writing 0x00000000, second by writing 0xffffffff */
8590 for (idx = 0; idx < 2; idx++) {
8597 wr_val = 0xffffffff;
8601 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8602 u32 offset, mask, save_val, val;
8604 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8605 mask = reg_tbl[i].mask;
8607 save_val = REG_RD(bp, offset);
8609 REG_WR(bp, offset, wr_val);
8610 val = REG_RD(bp, offset);
8612 /* Restore the original register's value */
8613 REG_WR(bp, offset, save_val);
8615 /* verify that value is as expected value */
8616 if ((val & mask) != (wr_val & mask))
8627 static int bnx2x_test_memory(struct bnx2x *bp)
8629 int i, j, rc = -ENODEV;
8631 static const struct {
8635 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8636 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8637 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8638 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8639 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8640 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8641 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8645 static const struct {
8651 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8652 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8653 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8654 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8655 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8656 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8658 { NULL, 0xffffffff, 0, 0 }
8661 if (!netif_running(bp->dev))
8664 /* Go through all the memories */
8665 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8666 for (j = 0; j < mem_tbl[i].size; j++)
8667 REG_RD(bp, mem_tbl[i].offset + j*4);
8669 /* Check the parity status */
8670 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8671 val = REG_RD(bp, prty_tbl[i].offset);
8672 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8673 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8675 "%s is 0x%x\n", prty_tbl[i].name, val);
8686 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8691 while (bnx2x_link_test(bp) && cnt--)
8695 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8697 unsigned int pkt_size, num_pkts, i;
8698 struct sk_buff *skb;
8699 unsigned char *packet;
8700 struct bnx2x_fastpath *fp = &bp->fp[0];
8701 u16 tx_start_idx, tx_idx;
8702 u16 rx_start_idx, rx_idx;
8704 struct sw_tx_bd *tx_buf;
8705 struct eth_tx_bd *tx_bd;
8707 union eth_rx_cqe *cqe;
8709 struct sw_rx_bd *rx_buf;
8713 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8714 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8715 bnx2x_acquire_phy_lock(bp);
8716 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8717 bnx2x_release_phy_lock(bp);
8719 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8720 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8721 bnx2x_acquire_phy_lock(bp);
8722 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8723 bnx2x_release_phy_lock(bp);
8724 /* wait until link state is restored */
8725 bnx2x_wait_for_link(bp, link_up);
8731 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8734 goto test_loopback_exit;
8736 packet = skb_put(skb, pkt_size);
8737 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8738 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8739 for (i = ETH_HLEN; i < pkt_size; i++)
8740 packet[i] = (unsigned char) (i & 0xff);
8743 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8744 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8746 pkt_prod = fp->tx_pkt_prod++;
8747 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8748 tx_buf->first_bd = fp->tx_bd_prod;
8751 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8752 mapping = pci_map_single(bp->pdev, skb->data,
8753 skb_headlen(skb), PCI_DMA_TODEVICE);
8754 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8755 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8756 tx_bd->nbd = cpu_to_le16(1);
8757 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8758 tx_bd->vlan = cpu_to_le16(pkt_prod);
8759 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8760 ETH_TX_BD_FLAGS_END_BD);
8761 tx_bd->general_data = ((UNICAST_ADDRESS <<
8762 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8766 fp->hw_tx_prods->bds_prod =
8767 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8768 mb(); /* FW restriction: must not reorder writing nbd and packets */
8769 fp->hw_tx_prods->packets_prod =
8770 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8771 DOORBELL(bp, FP_IDX(fp), 0);
8777 bp->dev->trans_start = jiffies;
8781 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8782 if (tx_idx != tx_start_idx + num_pkts)
8783 goto test_loopback_exit;
8785 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8786 if (rx_idx != rx_start_idx + num_pkts)
8787 goto test_loopback_exit;
8789 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8790 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8791 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8792 goto test_loopback_rx_exit;
8794 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8795 if (len != pkt_size)
8796 goto test_loopback_rx_exit;
8798 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8800 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8801 for (i = ETH_HLEN; i < pkt_size; i++)
8802 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8803 goto test_loopback_rx_exit;
8807 test_loopback_rx_exit:
8809 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8810 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8811 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8812 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8814 /* Update producers */
8815 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8819 bp->link_params.loopback_mode = LOOPBACK_NONE;
8824 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8828 if (!netif_running(bp->dev))
8829 return BNX2X_LOOPBACK_FAILED;
8831 bnx2x_netif_stop(bp, 1);
8833 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8834 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8835 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8838 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8839 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8840 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8843 bnx2x_netif_start(bp);
8848 #define CRC32_RESIDUAL 0xdebb20e3
8850 static int bnx2x_test_nvram(struct bnx2x *bp)
8852 static const struct {
8856 { 0, 0x14 }, /* bootstrap */
8857 { 0x14, 0xec }, /* dir */
8858 { 0x100, 0x350 }, /* manuf_info */
8859 { 0x450, 0xf0 }, /* feature_info */
8860 { 0x640, 0x64 }, /* upgrade_key_info */
8862 { 0x708, 0x70 }, /* manuf_key_info */
8867 u8 *data = (u8 *)buf;
8871 rc = bnx2x_nvram_read(bp, 0, data, 4);
8873 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8874 goto test_nvram_exit;
8877 magic = be32_to_cpu(buf[0]);
8878 if (magic != 0x669955aa) {
8879 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8881 goto test_nvram_exit;
8884 for (i = 0; nvram_tbl[i].size; i++) {
8886 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8890 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8891 goto test_nvram_exit;
8894 csum = ether_crc_le(nvram_tbl[i].size, data);
8895 if (csum != CRC32_RESIDUAL) {
8897 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8899 goto test_nvram_exit;
8907 static int bnx2x_test_intr(struct bnx2x *bp)
8909 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8912 if (!netif_running(bp->dev))
8915 config->hdr.length_6b = 0;
8917 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8919 config->hdr.offset = BP_FUNC(bp);
8920 config->hdr.client_id = BP_CL_ID(bp);
8921 config->hdr.reserved1 = 0;
8923 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8924 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8925 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8927 bp->set_mac_pending++;
8928 for (i = 0; i < 10; i++) {
8929 if (!bp->set_mac_pending)
8931 msleep_interruptible(10);
8940 static void bnx2x_self_test(struct net_device *dev,
8941 struct ethtool_test *etest, u64 *buf)
8943 struct bnx2x *bp = netdev_priv(dev);
8945 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8947 if (!netif_running(dev))
8950 /* offline tests are not supported in MF mode */
8952 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8954 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8957 link_up = bp->link_vars.link_up;
8958 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8959 bnx2x_nic_load(bp, LOAD_DIAG);
8960 /* wait until link state is restored */
8961 bnx2x_wait_for_link(bp, link_up);
8963 if (bnx2x_test_registers(bp) != 0) {
8965 etest->flags |= ETH_TEST_FL_FAILED;
8967 if (bnx2x_test_memory(bp) != 0) {
8969 etest->flags |= ETH_TEST_FL_FAILED;
8971 buf[2] = bnx2x_test_loopback(bp, link_up);
8973 etest->flags |= ETH_TEST_FL_FAILED;
8975 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8976 bnx2x_nic_load(bp, LOAD_NORMAL);
8977 /* wait until link state is restored */
8978 bnx2x_wait_for_link(bp, link_up);
8980 if (bnx2x_test_nvram(bp) != 0) {
8982 etest->flags |= ETH_TEST_FL_FAILED;
8984 if (bnx2x_test_intr(bp) != 0) {
8986 etest->flags |= ETH_TEST_FL_FAILED;
8989 if (bnx2x_link_test(bp) != 0) {
8991 etest->flags |= ETH_TEST_FL_FAILED;
8993 buf[7] = bnx2x_mc_assert(bp);
8995 etest->flags |= ETH_TEST_FL_FAILED;
8997 #ifdef BNX2X_EXTRA_DEBUG
8998 bnx2x_panic_dump(bp);
9002 static const struct {
9006 #define STATS_FLAGS_PORT 1
9007 #define STATS_FLAGS_FUNC 2
9008 u8 string[ETH_GSTRING_LEN];
9009 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9010 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9011 8, STATS_FLAGS_FUNC, "rx_bytes" },
9012 { STATS_OFFSET32(error_bytes_received_hi),
9013 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9014 { STATS_OFFSET32(total_bytes_transmitted_hi),
9015 8, STATS_FLAGS_FUNC, "tx_bytes" },
9016 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9017 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9018 { STATS_OFFSET32(total_unicast_packets_received_hi),
9019 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9020 { STATS_OFFSET32(total_multicast_packets_received_hi),
9021 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9022 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9023 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9024 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9025 8, STATS_FLAGS_FUNC, "tx_packets" },
9026 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9027 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9028 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9029 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9030 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9031 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9032 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9033 8, STATS_FLAGS_PORT, "rx_align_errors" },
9034 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9035 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9036 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9037 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9038 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9039 8, STATS_FLAGS_PORT, "tx_deferred" },
9040 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9041 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9042 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9043 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9044 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9045 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9046 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9047 8, STATS_FLAGS_PORT, "rx_fragments" },
9048 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9049 8, STATS_FLAGS_PORT, "rx_jabbers" },
9050 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9051 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9052 { STATS_OFFSET32(jabber_packets_received),
9053 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9054 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9055 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9056 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9057 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9058 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9059 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9060 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9061 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9062 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9063 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9064 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9065 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9066 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9067 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9068 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9069 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9070 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9071 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9072 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9073 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9074 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9075 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9076 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9077 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9078 { STATS_OFFSET32(mac_filter_discard),
9079 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9080 { STATS_OFFSET32(no_buff_discard),
9081 4, STATS_FLAGS_FUNC, "rx_discards" },
9082 { STATS_OFFSET32(xxoverflow_discard),
9083 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9084 { STATS_OFFSET32(brb_drop_hi),
9085 8, STATS_FLAGS_PORT, "brb_discard" },
9086 { STATS_OFFSET32(brb_truncate_hi),
9087 8, STATS_FLAGS_PORT, "brb_truncate" },
9088 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9089 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9090 { STATS_OFFSET32(rx_skb_alloc_failed),
9091 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9092 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9093 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9096 #define IS_NOT_E1HMF_STAT(bp, i) \
9097 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9099 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9101 struct bnx2x *bp = netdev_priv(dev);
9104 switch (stringset) {
9106 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9107 if (IS_NOT_E1HMF_STAT(bp, i))
9109 strcpy(buf + j*ETH_GSTRING_LEN,
9110 bnx2x_stats_arr[i].string);
9116 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9121 static int bnx2x_get_stats_count(struct net_device *dev)
9123 struct bnx2x *bp = netdev_priv(dev);
9124 int i, num_stats = 0;
9126 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9127 if (IS_NOT_E1HMF_STAT(bp, i))
9134 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9135 struct ethtool_stats *stats, u64 *buf)
9137 struct bnx2x *bp = netdev_priv(dev);
9138 u32 *hw_stats = (u32 *)&bp->eth_stats;
9141 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9142 if (IS_NOT_E1HMF_STAT(bp, i))
9145 if (bnx2x_stats_arr[i].size == 0) {
9146 /* skip this counter */
9151 if (bnx2x_stats_arr[i].size == 4) {
9152 /* 4-byte counter */
9153 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9157 /* 8-byte counter */
9158 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9159 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9164 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9166 struct bnx2x *bp = netdev_priv(dev);
9167 int port = BP_PORT(bp);
9170 if (!netif_running(dev))
9179 for (i = 0; i < (data * 2); i++) {
9181 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9182 bp->link_params.hw_led_mode,
9183 bp->link_params.chip_id);
9185 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9186 bp->link_params.hw_led_mode,
9187 bp->link_params.chip_id);
9189 msleep_interruptible(500);
9190 if (signal_pending(current))
9194 if (bp->link_vars.link_up)
9195 bnx2x_set_led(bp, port, LED_MODE_OPER,
9196 bp->link_vars.line_speed,
9197 bp->link_params.hw_led_mode,
9198 bp->link_params.chip_id);
9203 static struct ethtool_ops bnx2x_ethtool_ops = {
9204 .get_settings = bnx2x_get_settings,
9205 .set_settings = bnx2x_set_settings,
9206 .get_drvinfo = bnx2x_get_drvinfo,
9207 .get_wol = bnx2x_get_wol,
9208 .set_wol = bnx2x_set_wol,
9209 .get_msglevel = bnx2x_get_msglevel,
9210 .set_msglevel = bnx2x_set_msglevel,
9211 .nway_reset = bnx2x_nway_reset,
9212 .get_link = ethtool_op_get_link,
9213 .get_eeprom_len = bnx2x_get_eeprom_len,
9214 .get_eeprom = bnx2x_get_eeprom,
9215 .set_eeprom = bnx2x_set_eeprom,
9216 .get_coalesce = bnx2x_get_coalesce,
9217 .set_coalesce = bnx2x_set_coalesce,
9218 .get_ringparam = bnx2x_get_ringparam,
9219 .set_ringparam = bnx2x_set_ringparam,
9220 .get_pauseparam = bnx2x_get_pauseparam,
9221 .set_pauseparam = bnx2x_set_pauseparam,
9222 .get_rx_csum = bnx2x_get_rx_csum,
9223 .set_rx_csum = bnx2x_set_rx_csum,
9224 .get_tx_csum = ethtool_op_get_tx_csum,
9225 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9226 .set_flags = bnx2x_set_flags,
9227 .get_flags = ethtool_op_get_flags,
9228 .get_sg = ethtool_op_get_sg,
9229 .set_sg = ethtool_op_set_sg,
9230 .get_tso = ethtool_op_get_tso,
9231 .set_tso = bnx2x_set_tso,
9232 .self_test_count = bnx2x_self_test_count,
9233 .self_test = bnx2x_self_test,
9234 .get_strings = bnx2x_get_strings,
9235 .phys_id = bnx2x_phys_id,
9236 .get_stats_count = bnx2x_get_stats_count,
9237 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9240 /* end of ethtool_ops */
9242 /****************************************************************************
9243 * General service functions
9244 ****************************************************************************/
9246 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9250 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9254 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9255 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9256 PCI_PM_CTRL_PME_STATUS));
9258 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9259 /* delay required during transition out of D3hot */
9264 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9268 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9270 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9273 /* No more memory access after this point until
9274 * device is brought back to D0.
9285 * net_device service functions
9288 static int bnx2x_poll(struct napi_struct *napi, int budget)
9290 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9292 struct bnx2x *bp = fp->bp;
9296 #ifdef BNX2X_STOP_ON_ERROR
9297 if (unlikely(bp->panic))
9301 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9302 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9303 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9305 bnx2x_update_fpsb_idx(fp);
9307 if (BNX2X_HAS_TX_WORK(fp))
9308 bnx2x_tx_int(fp, budget);
9310 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9311 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9313 if (BNX2X_HAS_RX_WORK(fp))
9314 work_done = bnx2x_rx_int(fp, budget);
9316 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9317 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9318 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9321 /* must not complete if we consumed full budget */
9322 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9324 #ifdef BNX2X_STOP_ON_ERROR
9327 netif_rx_complete(napi);
9329 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9330 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9331 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9332 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9338 /* we split the first BD into headers and data BDs
9339 * to ease the pain of our fellow microcode engineers
9340 * we use one mapping for both BDs
9341 * So far this has only been observed to happen
9342 * in Other Operating Systems(TM)
9344 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9345 struct bnx2x_fastpath *fp,
9346 struct eth_tx_bd **tx_bd, u16 hlen,
9347 u16 bd_prod, int nbd)
9349 struct eth_tx_bd *h_tx_bd = *tx_bd;
9350 struct eth_tx_bd *d_tx_bd;
9352 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9354 /* first fix first BD */
9355 h_tx_bd->nbd = cpu_to_le16(nbd);
9356 h_tx_bd->nbytes = cpu_to_le16(hlen);
9358 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9359 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9360 h_tx_bd->addr_lo, h_tx_bd->nbd);
9362 /* now get a new data BD
9363 * (after the pbd) and fill it */
9364 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9365 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9367 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9368 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9370 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9371 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9372 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9374 /* this marks the BD as one that has no individual mapping
9375 * the FW ignores this flag in a BD not marked start
9377 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9378 DP(NETIF_MSG_TX_QUEUED,
9379 "TSO split data size is %d (%x:%x)\n",
9380 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9382 /* update tx_bd for marking the last BD flag */
9388 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9391 csum = (u16) ~csum_fold(csum_sub(csum,
9392 csum_partial(t_header - fix, fix, 0)));
9395 csum = (u16) ~csum_fold(csum_add(csum,
9396 csum_partial(t_header, -fix, 0)));
9398 return swab16(csum);
9401 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9405 if (skb->ip_summed != CHECKSUM_PARTIAL)
9409 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9411 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9412 rc |= XMIT_CSUM_TCP;
9416 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9417 rc |= XMIT_CSUM_TCP;
9421 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9424 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9430 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9431 /* check if packet requires linearization (packet is too fragmented) */
9432 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9437 int first_bd_sz = 0;
9439 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9440 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9442 if (xmit_type & XMIT_GSO) {
9443 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9444 /* Check if LSO packet needs to be copied:
9445 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9446 int wnd_size = MAX_FETCH_BD - 3;
9447 /* Number of windows to check */
9448 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9453 /* Headers length */
9454 hlen = (int)(skb_transport_header(skb) - skb->data) +
9457 /* Amount of data (w/o headers) on linear part of SKB*/
9458 first_bd_sz = skb_headlen(skb) - hlen;
9460 wnd_sum = first_bd_sz;
9462 /* Calculate the first sum - it's special */
9463 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9465 skb_shinfo(skb)->frags[frag_idx].size;
9467 /* If there was data on linear skb data - check it */
9468 if (first_bd_sz > 0) {
9469 if (unlikely(wnd_sum < lso_mss)) {
9474 wnd_sum -= first_bd_sz;
9477 /* Others are easier: run through the frag list and
9478 check all windows */
9479 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9481 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9483 if (unlikely(wnd_sum < lso_mss)) {
9488 skb_shinfo(skb)->frags[wnd_idx].size;
9492 /* in non-LSO too fragmented packet should always
9499 if (unlikely(to_copy))
9500 DP(NETIF_MSG_TX_QUEUED,
9501 "Linearization IS REQUIRED for %s packet. "
9502 "num_frags %d hlen %d first_bd_sz %d\n",
9503 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9504 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9510 /* called with netif_tx_lock
9511 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9512 * netif_wake_queue()
9514 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9516 struct bnx2x *bp = netdev_priv(dev);
9517 struct bnx2x_fastpath *fp;
9518 struct sw_tx_bd *tx_buf;
9519 struct eth_tx_bd *tx_bd;
9520 struct eth_tx_parse_bd *pbd = NULL;
9521 u16 pkt_prod, bd_prod;
9524 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9525 int vlan_off = (bp->e1hov ? 4 : 0);
9529 #ifdef BNX2X_STOP_ON_ERROR
9530 if (unlikely(bp->panic))
9531 return NETDEV_TX_BUSY;
9534 fp_index = (smp_processor_id() % bp->num_queues);
9535 fp = &bp->fp[fp_index];
9537 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9538 bp->eth_stats.driver_xoff++,
9539 netif_stop_queue(dev);
9540 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9541 return NETDEV_TX_BUSY;
9544 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9545 " gso type %x xmit_type %x\n",
9546 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9547 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9549 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9550 /* First, check if we need to linearize the skb
9551 (due to FW restrictions) */
9552 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9553 /* Statistics of linearization */
9555 if (skb_linearize(skb) != 0) {
9556 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9557 "silently dropping this SKB\n");
9558 dev_kfree_skb_any(skb);
9559 return NETDEV_TX_OK;
9565 Please read carefully. First we use one BD which we mark as start,
9566 then for TSO or xsum we have a parsing info BD,
9567 and only then we have the rest of the TSO BDs.
9568 (don't forget to mark the last one as last,
9569 and to unmap only AFTER you write to the BD ...)
9570 And above all, all pdb sizes are in words - NOT DWORDS!
9573 pkt_prod = fp->tx_pkt_prod++;
9574 bd_prod = TX_BD(fp->tx_bd_prod);
9576 /* get a tx_buf and first BD */
9577 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9578 tx_bd = &fp->tx_desc_ring[bd_prod];
9580 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9581 tx_bd->general_data = (UNICAST_ADDRESS <<
9582 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9584 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9586 /* remember the first BD of the packet */
9587 tx_buf->first_bd = fp->tx_bd_prod;
9590 DP(NETIF_MSG_TX_QUEUED,
9591 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9592 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9595 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9596 (bp->flags & HW_VLAN_TX_FLAG)) {
9597 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9598 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9602 tx_bd->vlan = cpu_to_le16(pkt_prod);
9605 /* turn on parsing and get a BD */
9606 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9607 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9609 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9612 if (xmit_type & XMIT_CSUM) {
9613 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9615 /* for now NS flag is not used in Linux */
9616 pbd->global_data = (hlen |
9617 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9618 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9620 pbd->ip_hlen = (skb_transport_header(skb) -
9621 skb_network_header(skb)) / 2;
9623 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9625 pbd->total_hlen = cpu_to_le16(hlen);
9626 hlen = hlen*2 - vlan_off;
9628 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9630 if (xmit_type & XMIT_CSUM_V4)
9631 tx_bd->bd_flags.as_bitfield |=
9632 ETH_TX_BD_FLAGS_IP_CSUM;
9634 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9636 if (xmit_type & XMIT_CSUM_TCP) {
9637 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9640 s8 fix = SKB_CS_OFF(skb); /* signed! */
9642 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9643 pbd->cs_offset = fix / 2;
9645 DP(NETIF_MSG_TX_QUEUED,
9646 "hlen %d offset %d fix %d csum before fix %x\n",
9647 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9650 /* HW bug: fixup the CSUM */
9651 pbd->tcp_pseudo_csum =
9652 bnx2x_csum_fix(skb_transport_header(skb),
9655 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9656 pbd->tcp_pseudo_csum);
9660 mapping = pci_map_single(bp->pdev, skb->data,
9661 skb_headlen(skb), PCI_DMA_TODEVICE);
9663 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9664 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9665 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9666 tx_bd->nbd = cpu_to_le16(nbd);
9667 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9669 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9670 " nbytes %d flags %x vlan %x\n",
9671 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9672 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9673 le16_to_cpu(tx_bd->vlan));
9675 if (xmit_type & XMIT_GSO) {
9677 DP(NETIF_MSG_TX_QUEUED,
9678 "TSO packet len %d hlen %d total len %d tso size %d\n",
9679 skb->len, hlen, skb_headlen(skb),
9680 skb_shinfo(skb)->gso_size);
9682 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9684 if (unlikely(skb_headlen(skb) > hlen))
9685 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9688 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9689 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9690 pbd->tcp_flags = pbd_tcp_flags(skb);
9692 if (xmit_type & XMIT_GSO_V4) {
9693 pbd->ip_id = swab16(ip_hdr(skb)->id);
9694 pbd->tcp_pseudo_csum =
9695 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9697 0, IPPROTO_TCP, 0));
9700 pbd->tcp_pseudo_csum =
9701 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9702 &ipv6_hdr(skb)->daddr,
9703 0, IPPROTO_TCP, 0));
9705 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9708 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9709 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9711 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9712 tx_bd = &fp->tx_desc_ring[bd_prod];
9714 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9715 frag->size, PCI_DMA_TODEVICE);
9717 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9718 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9719 tx_bd->nbytes = cpu_to_le16(frag->size);
9720 tx_bd->vlan = cpu_to_le16(pkt_prod);
9721 tx_bd->bd_flags.as_bitfield = 0;
9723 DP(NETIF_MSG_TX_QUEUED,
9724 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9725 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9726 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9729 /* now at last mark the BD as the last BD */
9730 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9732 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9733 tx_bd, tx_bd->bd_flags.as_bitfield);
9735 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9737 /* now send a tx doorbell, counting the next BD
9738 * if the packet contains or ends with it
9740 if (TX_BD_POFF(bd_prod) < nbd)
9744 DP(NETIF_MSG_TX_QUEUED,
9745 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9746 " tcp_flags %x xsum %x seq %u hlen %u\n",
9747 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9748 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9749 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9751 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9754 * Make sure that the BD data is updated before updating the producer
9755 * since FW might read the BD right after the producer is updated.
9756 * This is only applicable for weak-ordered memory model archs such
9757 * as IA-64. The following barrier is also mandatory since FW will
9758 * assumes packets must have BDs.
9762 fp->hw_tx_prods->bds_prod =
9763 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9764 mb(); /* FW restriction: must not reorder writing nbd and packets */
9765 fp->hw_tx_prods->packets_prod =
9766 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9767 DOORBELL(bp, FP_IDX(fp), 0);
9771 fp->tx_bd_prod += nbd;
9772 dev->trans_start = jiffies;
9774 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9775 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9776 if we put Tx into XOFF state. */
9778 netif_stop_queue(dev);
9779 bp->eth_stats.driver_xoff++;
9780 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9781 netif_wake_queue(dev);
9785 return NETDEV_TX_OK;
9788 /* called with rtnl_lock */
9789 static int bnx2x_open(struct net_device *dev)
9791 struct bnx2x *bp = netdev_priv(dev);
9793 bnx2x_set_power_state(bp, PCI_D0);
9795 return bnx2x_nic_load(bp, LOAD_OPEN);
9798 /* called with rtnl_lock */
9799 static int bnx2x_close(struct net_device *dev)
9801 struct bnx2x *bp = netdev_priv(dev);
9803 /* Unload the driver, release IRQs */
9804 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9805 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9806 if (!CHIP_REV_IS_SLOW(bp))
9807 bnx2x_set_power_state(bp, PCI_D3hot);
9812 /* called with netif_tx_lock from set_multicast */
9813 static void bnx2x_set_rx_mode(struct net_device *dev)
9815 struct bnx2x *bp = netdev_priv(dev);
9816 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9817 int port = BP_PORT(bp);
9819 if (bp->state != BNX2X_STATE_OPEN) {
9820 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9824 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9826 if (dev->flags & IFF_PROMISC)
9827 rx_mode = BNX2X_RX_MODE_PROMISC;
9829 else if ((dev->flags & IFF_ALLMULTI) ||
9830 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9831 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9833 else { /* some multicasts */
9834 if (CHIP_IS_E1(bp)) {
9836 struct dev_mc_list *mclist;
9837 struct mac_configuration_cmd *config =
9838 bnx2x_sp(bp, mcast_config);
9840 for (i = 0, mclist = dev->mc_list;
9841 mclist && (i < dev->mc_count);
9842 i++, mclist = mclist->next) {
9844 config->config_table[i].
9845 cam_entry.msb_mac_addr =
9846 swab16(*(u16 *)&mclist->dmi_addr[0]);
9847 config->config_table[i].
9848 cam_entry.middle_mac_addr =
9849 swab16(*(u16 *)&mclist->dmi_addr[2]);
9850 config->config_table[i].
9851 cam_entry.lsb_mac_addr =
9852 swab16(*(u16 *)&mclist->dmi_addr[4]);
9853 config->config_table[i].cam_entry.flags =
9855 config->config_table[i].
9856 target_table_entry.flags = 0;
9857 config->config_table[i].
9858 target_table_entry.client_id = 0;
9859 config->config_table[i].
9860 target_table_entry.vlan_id = 0;
9863 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9864 config->config_table[i].
9865 cam_entry.msb_mac_addr,
9866 config->config_table[i].
9867 cam_entry.middle_mac_addr,
9868 config->config_table[i].
9869 cam_entry.lsb_mac_addr);
9871 old = config->hdr.length_6b;
9873 for (; i < old; i++) {
9874 if (CAM_IS_INVALID(config->
9876 /* already invalidated */
9880 CAM_INVALIDATE(config->
9885 if (CHIP_REV_IS_SLOW(bp))
9886 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9888 offset = BNX2X_MAX_MULTICAST*(1 + port);
9890 config->hdr.length_6b = i;
9891 config->hdr.offset = offset;
9892 config->hdr.client_id = BP_CL_ID(bp);
9893 config->hdr.reserved1 = 0;
9895 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9896 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9897 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9900 /* Accept one or more multicasts */
9901 struct dev_mc_list *mclist;
9902 u32 mc_filter[MC_HASH_SIZE];
9903 u32 crc, bit, regidx;
9906 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9908 for (i = 0, mclist = dev->mc_list;
9909 mclist && (i < dev->mc_count);
9910 i++, mclist = mclist->next) {
9912 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9915 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9916 bit = (crc >> 24) & 0xff;
9919 mc_filter[regidx] |= (1 << bit);
9922 for (i = 0; i < MC_HASH_SIZE; i++)
9923 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9928 bp->rx_mode = rx_mode;
9929 bnx2x_set_storm_rx_mode(bp);
9932 /* called with rtnl_lock */
9933 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9935 struct sockaddr *addr = p;
9936 struct bnx2x *bp = netdev_priv(dev);
9938 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9941 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9942 if (netif_running(dev)) {
9944 bnx2x_set_mac_addr_e1(bp, 1);
9946 bnx2x_set_mac_addr_e1h(bp, 1);
9952 /* called with rtnl_lock */
9953 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9955 struct mii_ioctl_data *data = if_mii(ifr);
9956 struct bnx2x *bp = netdev_priv(dev);
9957 int port = BP_PORT(bp);
9962 data->phy_id = bp->port.phy_addr;
9969 if (!netif_running(dev))
9972 mutex_lock(&bp->port.phy_mutex);
9973 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9974 DEFAULT_PHY_DEV_ADDR,
9975 (data->reg_num & 0x1f), &mii_regval);
9976 data->val_out = mii_regval;
9977 mutex_unlock(&bp->port.phy_mutex);
9982 if (!capable(CAP_NET_ADMIN))
9985 if (!netif_running(dev))
9988 mutex_lock(&bp->port.phy_mutex);
9989 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9990 DEFAULT_PHY_DEV_ADDR,
9991 (data->reg_num & 0x1f), data->val_in);
9992 mutex_unlock(&bp->port.phy_mutex);
10000 return -EOPNOTSUPP;
10003 /* called with rtnl_lock */
10004 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10006 struct bnx2x *bp = netdev_priv(dev);
10009 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10010 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10013 /* This does not race with packet allocation
10014 * because the actual alloc size is
10015 * only updated as part of load
10017 dev->mtu = new_mtu;
10019 if (netif_running(dev)) {
10020 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10021 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10027 static void bnx2x_tx_timeout(struct net_device *dev)
10029 struct bnx2x *bp = netdev_priv(dev);
10031 #ifdef BNX2X_STOP_ON_ERROR
10035 /* This allows the netif to be shutdown gracefully before resetting */
10036 schedule_work(&bp->reset_task);
10040 /* called with rtnl_lock */
10041 static void bnx2x_vlan_rx_register(struct net_device *dev,
10042 struct vlan_group *vlgrp)
10044 struct bnx2x *bp = netdev_priv(dev);
10048 /* Set flags according to the required capabilities */
10049 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10051 if (dev->features & NETIF_F_HW_VLAN_TX)
10052 bp->flags |= HW_VLAN_TX_FLAG;
10054 if (dev->features & NETIF_F_HW_VLAN_RX)
10055 bp->flags |= HW_VLAN_RX_FLAG;
10057 if (netif_running(dev))
10058 bnx2x_set_client_config(bp);
10063 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10064 static void poll_bnx2x(struct net_device *dev)
10066 struct bnx2x *bp = netdev_priv(dev);
10068 disable_irq(bp->pdev->irq);
10069 bnx2x_interrupt(bp->pdev->irq, dev);
10070 enable_irq(bp->pdev->irq);
10074 static const struct net_device_ops bnx2x_netdev_ops = {
10075 .ndo_open = bnx2x_open,
10076 .ndo_stop = bnx2x_close,
10077 .ndo_start_xmit = bnx2x_start_xmit,
10078 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10079 .ndo_set_mac_address = bnx2x_change_mac_addr,
10080 .ndo_validate_addr = eth_validate_addr,
10081 .ndo_do_ioctl = bnx2x_ioctl,
10082 .ndo_change_mtu = bnx2x_change_mtu,
10083 .ndo_tx_timeout = bnx2x_tx_timeout,
10085 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10087 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10088 .ndo_poll_controller = poll_bnx2x,
10093 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10094 struct net_device *dev)
10099 SET_NETDEV_DEV(dev, &pdev->dev);
10100 bp = netdev_priv(dev);
10105 bp->func = PCI_FUNC(pdev->devfn);
10107 rc = pci_enable_device(pdev);
10109 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10113 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10114 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10117 goto err_out_disable;
10120 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10121 printk(KERN_ERR PFX "Cannot find second PCI device"
10122 " base address, aborting\n");
10124 goto err_out_disable;
10127 if (atomic_read(&pdev->enable_cnt) == 1) {
10128 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10130 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10132 goto err_out_disable;
10135 pci_set_master(pdev);
10136 pci_save_state(pdev);
10139 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10140 if (bp->pm_cap == 0) {
10141 printk(KERN_ERR PFX "Cannot find power management"
10142 " capability, aborting\n");
10144 goto err_out_release;
10147 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10148 if (bp->pcie_cap == 0) {
10149 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10152 goto err_out_release;
10155 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10156 bp->flags |= USING_DAC_FLAG;
10157 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10158 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10159 " failed, aborting\n");
10161 goto err_out_release;
10164 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10165 printk(KERN_ERR PFX "System does not support DMA,"
10168 goto err_out_release;
10171 dev->mem_start = pci_resource_start(pdev, 0);
10172 dev->base_addr = dev->mem_start;
10173 dev->mem_end = pci_resource_end(pdev, 0);
10175 dev->irq = pdev->irq;
10177 bp->regview = pci_ioremap_bar(pdev, 0);
10178 if (!bp->regview) {
10179 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10181 goto err_out_release;
10184 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10185 min_t(u64, BNX2X_DB_SIZE,
10186 pci_resource_len(pdev, 2)));
10187 if (!bp->doorbells) {
10188 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10190 goto err_out_unmap;
10193 bnx2x_set_power_state(bp, PCI_D0);
10195 /* clean indirect addresses */
10196 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10197 PCICFG_VENDOR_ID_OFFSET);
10198 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10199 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10200 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10201 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10203 dev->watchdog_timeo = TX_TIMEOUT;
10205 dev->netdev_ops = &bnx2x_netdev_ops;
10206 dev->ethtool_ops = &bnx2x_ethtool_ops;
10207 dev->features |= NETIF_F_SG;
10208 dev->features |= NETIF_F_HW_CSUM;
10209 if (bp->flags & USING_DAC_FLAG)
10210 dev->features |= NETIF_F_HIGHDMA;
10212 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10213 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10215 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10216 dev->features |= NETIF_F_TSO6;
10222 iounmap(bp->regview);
10223 bp->regview = NULL;
10225 if (bp->doorbells) {
10226 iounmap(bp->doorbells);
10227 bp->doorbells = NULL;
10231 if (atomic_read(&pdev->enable_cnt) == 1)
10232 pci_release_regions(pdev);
10235 pci_disable_device(pdev);
10236 pci_set_drvdata(pdev, NULL);
10242 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10244 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10246 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10250 /* return value of 1=2.5GHz 2=5GHz */
10251 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10253 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10255 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10259 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10260 const struct pci_device_id *ent)
10262 static int version_printed;
10263 struct net_device *dev = NULL;
10267 if (version_printed++ == 0)
10268 printk(KERN_INFO "%s", version);
10270 /* dev zeroed in init_etherdev */
10271 dev = alloc_etherdev(sizeof(*bp));
10273 printk(KERN_ERR PFX "Cannot allocate net device\n");
10277 bp = netdev_priv(dev);
10278 bp->msglevel = debug;
10280 rc = bnx2x_init_dev(pdev, dev);
10286 pci_set_drvdata(pdev, dev);
10288 rc = bnx2x_init_bp(bp);
10290 goto init_one_exit;
10292 rc = register_netdev(dev);
10294 dev_err(&pdev->dev, "Cannot register net device\n");
10295 goto init_one_exit;
10298 netif_carrier_off(dev);
10300 bp->common.name = board_info[ent->driver_data].name;
10301 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10302 " IRQ %d, ", dev->name, bp->common.name,
10303 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10304 bnx2x_get_pcie_width(bp),
10305 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10306 dev->base_addr, bp->pdev->irq);
10307 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10312 iounmap(bp->regview);
10315 iounmap(bp->doorbells);
10319 if (atomic_read(&pdev->enable_cnt) == 1)
10320 pci_release_regions(pdev);
10322 pci_disable_device(pdev);
10323 pci_set_drvdata(pdev, NULL);
10328 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10330 struct net_device *dev = pci_get_drvdata(pdev);
10334 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10337 bp = netdev_priv(dev);
10339 unregister_netdev(dev);
10342 iounmap(bp->regview);
10345 iounmap(bp->doorbells);
10349 if (atomic_read(&pdev->enable_cnt) == 1)
10350 pci_release_regions(pdev);
10352 pci_disable_device(pdev);
10353 pci_set_drvdata(pdev, NULL);
10356 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10358 struct net_device *dev = pci_get_drvdata(pdev);
10362 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10365 bp = netdev_priv(dev);
10369 pci_save_state(pdev);
10371 if (!netif_running(dev)) {
10376 netif_device_detach(dev);
10378 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10380 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10387 static int bnx2x_resume(struct pci_dev *pdev)
10389 struct net_device *dev = pci_get_drvdata(pdev);
10394 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10397 bp = netdev_priv(dev);
10401 pci_restore_state(pdev);
10403 if (!netif_running(dev)) {
10408 bnx2x_set_power_state(bp, PCI_D0);
10409 netif_device_attach(dev);
10411 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10418 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10422 bp->state = BNX2X_STATE_ERROR;
10424 bp->rx_mode = BNX2X_RX_MODE_NONE;
10426 bnx2x_netif_stop(bp, 0);
10428 del_timer_sync(&bp->timer);
10429 bp->stats_state = STATS_STATE_DISABLED;
10430 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10433 bnx2x_free_irq(bp);
10435 if (CHIP_IS_E1(bp)) {
10436 struct mac_configuration_cmd *config =
10437 bnx2x_sp(bp, mcast_config);
10439 for (i = 0; i < config->hdr.length_6b; i++)
10440 CAM_INVALIDATE(config->config_table[i]);
10443 /* Free SKBs, SGEs, TPA pool and driver internals */
10444 bnx2x_free_skbs(bp);
10445 for_each_queue(bp, i)
10446 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10447 bnx2x_free_mem(bp);
10449 bp->state = BNX2X_STATE_CLOSED;
10451 netif_carrier_off(bp->dev);
10456 static void bnx2x_eeh_recover(struct bnx2x *bp)
10460 mutex_init(&bp->port.phy_mutex);
10462 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10463 bp->link_params.shmem_base = bp->common.shmem_base;
10464 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10466 if (!bp->common.shmem_base ||
10467 (bp->common.shmem_base < 0xA0000) ||
10468 (bp->common.shmem_base >= 0xC0000)) {
10469 BNX2X_DEV_INFO("MCP not active\n");
10470 bp->flags |= NO_MCP_FLAG;
10474 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10475 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10476 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10477 BNX2X_ERR("BAD MCP validity signature\n");
10479 if (!BP_NOMCP(bp)) {
10480 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10481 & DRV_MSG_SEQ_NUMBER_MASK);
10482 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10487 * bnx2x_io_error_detected - called when PCI error is detected
10488 * @pdev: Pointer to PCI device
10489 * @state: The current pci connection state
10491 * This function is called after a PCI bus error affecting
10492 * this device has been detected.
10494 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10495 pci_channel_state_t state)
10497 struct net_device *dev = pci_get_drvdata(pdev);
10498 struct bnx2x *bp = netdev_priv(dev);
10502 netif_device_detach(dev);
10504 if (netif_running(dev))
10505 bnx2x_eeh_nic_unload(bp);
10507 pci_disable_device(pdev);
10511 /* Request a slot reset */
10512 return PCI_ERS_RESULT_NEED_RESET;
10516 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10517 * @pdev: Pointer to PCI device
10519 * Restart the card from scratch, as if from a cold-boot.
10521 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10523 struct net_device *dev = pci_get_drvdata(pdev);
10524 struct bnx2x *bp = netdev_priv(dev);
10528 if (pci_enable_device(pdev)) {
10529 dev_err(&pdev->dev,
10530 "Cannot re-enable PCI device after reset\n");
10532 return PCI_ERS_RESULT_DISCONNECT;
10535 pci_set_master(pdev);
10536 pci_restore_state(pdev);
10538 if (netif_running(dev))
10539 bnx2x_set_power_state(bp, PCI_D0);
10543 return PCI_ERS_RESULT_RECOVERED;
10547 * bnx2x_io_resume - called when traffic can start flowing again
10548 * @pdev: Pointer to PCI device
10550 * This callback is called when the error recovery driver tells us that
10551 * its OK to resume normal operation.
10553 static void bnx2x_io_resume(struct pci_dev *pdev)
10555 struct net_device *dev = pci_get_drvdata(pdev);
10556 struct bnx2x *bp = netdev_priv(dev);
10560 bnx2x_eeh_recover(bp);
10562 if (netif_running(dev))
10563 bnx2x_nic_load(bp, LOAD_NORMAL);
10565 netif_device_attach(dev);
10570 static struct pci_error_handlers bnx2x_err_handler = {
10571 .error_detected = bnx2x_io_error_detected,
10572 .slot_reset = bnx2x_io_slot_reset,
10573 .resume = bnx2x_io_resume,
10576 static struct pci_driver bnx2x_pci_driver = {
10577 .name = DRV_MODULE_NAME,
10578 .id_table = bnx2x_pci_tbl,
10579 .probe = bnx2x_init_one,
10580 .remove = __devexit_p(bnx2x_remove_one),
10581 .suspend = bnx2x_suspend,
10582 .resume = bnx2x_resume,
10583 .err_handler = &bnx2x_err_handler,
10586 static int __init bnx2x_init(void)
10588 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10589 if (bnx2x_wq == NULL) {
10590 printk(KERN_ERR PFX "Cannot create workqueue\n");
10594 return pci_register_driver(&bnx2x_pci_driver);
10597 static void __exit bnx2x_cleanup(void)
10599 pci_unregister_driver(&bnx2x_pci_driver);
10601 destroy_workqueue(bnx2x_wq);
10604 module_init(bnx2x_init);
10605 module_exit(bnx2x_cleanup);