1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig and gpio3 attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1100 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1103 u16 last_max = fp->last_max_sge;
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1109 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1123 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1126 struct bnx2x *bp = fp->bp;
1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
1130 u16 last_max, last_elem, first_elem;
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1176 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1189 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217 #ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219 #ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224 fp->tpa_queue_used);
1228 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1233 struct sw_rx_page *rx_pg, old_rx_pg;
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1242 /* This is needed in order to enable forwarding support */
1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1245 max(frag_size, (u32)len_on_bd));
1247 #ifdef BNX2X_STOP_ON_ERROR
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1266 rx_pg = &fp->rx_page_ring[sge_idx];
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
1273 fp->eth_q_stats.rx_skb_alloc_failed++;
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1288 frag_size -= frag_len;
1294 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1309 if (likely(new_skb)) {
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1321 prefetch(((char *)(skb)) + 128);
1323 #ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1333 skb_reserve(skb, pad);
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 iph = (struct iphdr *)skb->data;
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1363 netif_receive_skb(skb);
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1375 /* else drop the packet and keep the buffer in the bin */
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
1378 fp->eth_q_stats.rx_skb_alloc_failed++;
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1384 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1389 struct ustorm_eth_rx_producers rx_prods = {0};
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1410 ((u32 *)&rx_prods)[i]);
1412 mmiowb(); /* keep prod updates ordered */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1419 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421 struct bnx2x *bp = fp->bp;
1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1426 #ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
1439 bd_prod_fw = bd_prod;
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1452 while (sw_comp_cons != hw_comp_cons) {
1453 struct sw_rx_bd *rx_buf = NULL;
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1473 /* is this a slowpath msg? */
1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1475 bnx2x_sp_event(fp, cqe);
1478 /* this is an rx packet */
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
1490 u16 queue = cqe->fast_path_cqe.queue_index;
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1497 bnx2x_tpa_start(fp, queue, skb,
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1511 /* This is a size of the linear data
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517 #ifdef BNX2X_STOP_ON_ERROR
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1533 prefetch(((char *)(skb)) + 128);
1535 /* is this an error packet? */
1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1537 DP(NETIF_MSG_RX_ERR,
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
1540 fp->eth_q_stats.rx_err_discard_pkt++;
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1551 new_skb = netdev_alloc_skb(bp->dev,
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
1555 "ERROR packet dropped "
1556 "because of alloc failure\n");
1557 fp->eth_q_stats.rx_skb_alloc_failed++;
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1580 DP(NETIF_MSG_RX_ERR,
1581 "ERROR packet dropped because "
1582 "of alloc failure\n");
1583 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1591 skb->ip_summed = CHECKSUM_NONE;
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
1596 fp->eth_q_stats.hw_csum_err++;
1600 skb_record_rx_queue(skb, fp->index);
1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1609 netif_receive_skb(skb);
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1623 if (rx_pkt == budget)
1627 fp->rx_bd_cons = bd_cons;
1628 fp->rx_bd_prod = bd_prod_fw;
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1636 fp->rx_pkt += rx_pkt;
1642 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
1646 int index = FP_IDX(fp);
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1658 #ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668 napi_schedule(&bnx2x_fp(bp, index, napi));
1673 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675 struct bnx2x *bp = netdev_priv(dev_instance);
1676 u16 status = bnx2x_ack_int(bp);
1679 /* Return here if interrupt is shared and it's not for us */
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1686 /* Return here if interrupt is disabled */
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1692 #ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
1712 if (unlikely(status & 0x1)) {
1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 /* end of fast path */
1729 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1734 * General service functions
1737 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1740 u32 resource_bit = (1 << resource);
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1760 /* Validating that the resource is not already taken */
1761 lock_status = REG_RD(bp, hw_lock_control_reg);
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
1770 /* Try to acquire the lock */
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
1773 if (lock_status & resource_bit)
1778 DP(NETIF_MSG_HW, "Timeout\n");
1782 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1785 u32 resource_bit = (1 << resource);
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1804 /* Validating that the resource is currently taken */
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
1816 /* HW Lock for shared dual port PHYs */
1817 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1819 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1821 mutex_lock(&bp->port.phy_mutex);
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1828 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1830 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1832 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1833 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1834 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1836 mutex_unlock(&bp->port.phy_mutex);
1839 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1841 /* The GPIO should be swapped if swap register is set and active */
1842 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1843 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1844 int gpio_shift = gpio_num +
1845 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1846 u32 gpio_mask = (1 << gpio_shift);
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1855 /* read GPIO value */
1856 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1858 /* get the requested pin value */
1859 if ((gpio_reg & gpio_mask) == gpio_mask)
1864 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1869 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1871 /* The GPIO should be swapped if swap register is set and active */
1872 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1873 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1874 int gpio_shift = gpio_num +
1875 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1876 u32 gpio_mask = (1 << gpio_shift);
1879 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1880 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1885 /* read GPIO and mask except the float bits */
1886 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1889 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1890 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1891 gpio_num, gpio_shift);
1892 /* clear FLOAT and set CLR */
1893 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1894 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1897 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1898 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1899 gpio_num, gpio_shift);
1900 /* clear FLOAT and set SET */
1901 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1902 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1905 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1907 gpio_num, gpio_shift);
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1916 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1922 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1924 /* The GPIO should be swapped if swap register is set and active */
1925 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1926 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1927 int gpio_shift = gpio_num +
1928 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1929 u32 gpio_mask = (1 << gpio_shift);
1932 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1933 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1942 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1943 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1944 "output low\n", gpio_num, gpio_shift);
1945 /* clear SET and set CLR */
1946 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1950 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1951 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1952 "output high\n", gpio_num, gpio_shift);
1953 /* clear CLR and set SET */
1954 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1955 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1962 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1963 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1968 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1970 u32 spio_mask = (1 << spio_num);
1973 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1974 (spio_num > MISC_REGISTERS_SPIO_7)) {
1975 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1979 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1980 /* read SPIO and mask except the float bits */
1981 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1984 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1985 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1986 /* clear FLOAT and set CLR */
1987 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1988 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1991 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1992 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1993 /* clear FLOAT and set SET */
1994 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1998 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1999 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2001 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2008 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2009 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2014 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2016 switch (bp->link_vars.ieee_fc &
2017 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2018 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2019 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2022 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2023 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2026 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2027 bp->port.advertising |= ADVERTISED_Asym_Pause;
2030 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2036 static void bnx2x_link_report(struct bnx2x *bp)
2038 if (bp->link_vars.link_up) {
2039 if (bp->state == BNX2X_STATE_OPEN)
2040 netif_carrier_on(bp->dev);
2041 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2043 printk("%d Mbps ", bp->link_vars.line_speed);
2045 if (bp->link_vars.duplex == DUPLEX_FULL)
2046 printk("full duplex");
2048 printk("half duplex");
2050 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2051 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2052 printk(", receive ");
2053 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2054 printk("& transmit ");
2056 printk(", transmit ");
2058 printk("flow control ON");
2062 } else { /* link_down */
2063 netif_carrier_off(bp->dev);
2064 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2068 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2070 if (!BP_NOMCP(bp)) {
2073 /* Initialize link parameters structure variables */
2074 /* It is recommended to turn off RX FC for jumbo frames
2075 for better performance */
2077 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2078 else if (bp->dev->mtu > 5000)
2079 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2081 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2083 bnx2x_acquire_phy_lock(bp);
2084 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2085 bnx2x_release_phy_lock(bp);
2087 bnx2x_calc_fc_adv(bp);
2089 if (bp->link_vars.link_up)
2090 bnx2x_link_report(bp);
2095 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2099 static void bnx2x_link_set(struct bnx2x *bp)
2101 if (!BP_NOMCP(bp)) {
2102 bnx2x_acquire_phy_lock(bp);
2103 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2104 bnx2x_release_phy_lock(bp);
2106 bnx2x_calc_fc_adv(bp);
2108 BNX2X_ERR("Bootcode is missing -not setting link\n");
2111 static void bnx2x__link_reset(struct bnx2x *bp)
2113 if (!BP_NOMCP(bp)) {
2114 bnx2x_acquire_phy_lock(bp);
2115 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2116 bnx2x_release_phy_lock(bp);
2118 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2121 static u8 bnx2x_link_test(struct bnx2x *bp)
2125 bnx2x_acquire_phy_lock(bp);
2126 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2127 bnx2x_release_phy_lock(bp);
2132 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2134 u32 r_param = bp->link_vars.line_speed / 8;
2135 u32 fair_periodic_timeout_usec;
2138 memset(&(bp->cmng.rs_vars), 0,
2139 sizeof(struct rate_shaping_vars_per_port));
2140 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2142 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2143 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2145 /* this is the threshold below which no timer arming will occur
2146 1.25 coefficient is for the threshold to be a little bigger
2147 than the real time, to compensate for timer in-accuracy */
2148 bp->cmng.rs_vars.rs_threshold =
2149 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2151 /* resolution of fairness timer */
2152 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2153 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2154 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2156 /* this is the threshold below which we won't arm the timer anymore */
2157 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2159 /* we multiply by 1e3/8 to get bytes/msec.
2160 We don't want the credits to pass a credit
2161 of the t_fair*FAIR_MEM (algorithm resolution) */
2162 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2163 /* since each tick is 4 usec */
2164 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2167 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2169 struct rate_shaping_vars_per_vn m_rs_vn;
2170 struct fairness_vars_per_vn m_fair_vn;
2171 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2172 u16 vn_min_rate, vn_max_rate;
2175 /* If function is hidden - set min and max to zeroes */
2176 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2181 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2182 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2183 /* If fairness is enabled (not all min rates are zeroes) and
2184 if current min rate is zero - set it to 1.
2185 This is a requirement of the algorithm. */
2186 if (bp->vn_weight_sum && (vn_min_rate == 0))
2187 vn_min_rate = DEF_MIN_RATE;
2188 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2189 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2193 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2194 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2196 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2197 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2199 /* global vn counter - maximal Mbps for this vn */
2200 m_rs_vn.vn_counter.rate = vn_max_rate;
2202 /* quota - number of bytes transmitted in this period */
2203 m_rs_vn.vn_counter.quota =
2204 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2206 if (bp->vn_weight_sum) {
2207 /* credit for each period of the fairness algorithm:
2208 number of bytes in T_FAIR (the vn share the port rate).
2209 vn_weight_sum should not be larger than 10000, thus
2210 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2212 m_fair_vn.vn_credit_delta =
2213 max((u32)(vn_min_rate * (T_FAIR_COEF /
2214 (8 * bp->vn_weight_sum))),
2215 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2216 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2217 m_fair_vn.vn_credit_delta);
2220 /* Store it to internal memory */
2221 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2222 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2224 ((u32 *)(&m_rs_vn))[i]);
2226 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_fair_vn))[i]);
2233 /* This function is called upon link interrupt */
2234 static void bnx2x_link_attn(struct bnx2x *bp)
2236 /* Make sure that we are synced with the current statistics */
2237 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2239 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2241 if (bp->link_vars.link_up) {
2243 /* dropless flow control */
2244 if (CHIP_IS_E1H(bp)) {
2245 int port = BP_PORT(bp);
2246 u32 pause_enabled = 0;
2248 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2251 REG_WR(bp, BAR_USTRORM_INTMEM +
2252 USTORM_PAUSE_ENABLED_OFFSET(port),
2256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2257 struct host_port_stats *pstats;
2259 pstats = bnx2x_sp(bp, port_stats);
2260 /* reset old bmac stats */
2261 memset(&(pstats->mac_stx[0]), 0,
2262 sizeof(struct mac_stx));
2264 if ((bp->state == BNX2X_STATE_OPEN) ||
2265 (bp->state == BNX2X_STATE_DISABLED))
2266 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269 /* indicate link status */
2270 bnx2x_link_report(bp);
2273 int port = BP_PORT(bp);
2277 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2278 if (vn == BP_E1HVN(bp))
2281 func = ((vn << 1) | port);
2283 /* Set the attention towards other drivers
2285 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2286 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2289 if (bp->link_vars.link_up) {
2292 /* Init rate shaping and fairness contexts */
2293 bnx2x_init_port_minmax(bp);
2295 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2296 bnx2x_init_vn_minmax(bp, 2*vn + port);
2298 /* Store it to internal memory */
2300 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2301 REG_WR(bp, BAR_XSTRORM_INTMEM +
2302 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2303 ((u32 *)(&bp->cmng))[i]);
2308 static void bnx2x__link_status_update(struct bnx2x *bp)
2310 if (bp->state != BNX2X_STATE_OPEN)
2313 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2315 if (bp->link_vars.link_up)
2316 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2318 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2320 /* indicate link status */
2321 bnx2x_link_report(bp);
2324 static void bnx2x_pmf_update(struct bnx2x *bp)
2326 int port = BP_PORT(bp);
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2332 /* enable nig attention */
2333 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2334 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2335 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2337 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2345 * General service functions
2348 /* the slow path queue is odd since completions arrive on the fastpath ring */
2349 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2350 u32 data_hi, u32 data_lo, int common)
2352 int func = BP_FUNC(bp);
2354 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2355 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2356 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2357 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2358 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2360 #ifdef BNX2X_STOP_ON_ERROR
2361 if (unlikely(bp->panic))
2365 spin_lock_bh(&bp->spq_lock);
2367 if (!bp->spq_left) {
2368 BNX2X_ERR("BUG! SPQ ring full!\n");
2369 spin_unlock_bh(&bp->spq_lock);
2374 /* CID needs port number to be encoded int it */
2375 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2376 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2378 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2380 bp->spq_prod_bd->hdr.type |=
2381 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2383 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2384 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2388 if (bp->spq_prod_bd == bp->spq_last_bd) {
2389 bp->spq_prod_bd = bp->spq;
2390 bp->spq_prod_idx = 0;
2391 DP(NETIF_MSG_TIMER, "end of spq\n");
2398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2401 spin_unlock_bh(&bp->spq_lock);
2405 /* acquire split MCP access lock register */
2406 static int bnx2x_acquire_alr(struct bnx2x *bp)
2413 for (j = 0; j < i*10; j++) {
2415 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2416 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2417 if (val & (1L << 31))
2422 if (!(val & (1L << 31))) {
2423 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2430 /* release split MCP access lock register */
2431 static void bnx2x_release_alr(struct bnx2x *bp)
2435 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2440 struct host_def_status_block *def_sb = bp->def_status_blk;
2443 barrier(); /* status block is written to by the chip */
2444 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2445 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2448 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2449 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2452 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2453 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2456 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2457 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2460 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2461 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2468 * slow path service functions
2471 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2473 int port = BP_PORT(bp);
2474 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2475 COMMAND_REG_ATTN_BITS_SET);
2476 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2478 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2479 NIG_REG_MASK_INTERRUPT_PORT0;
2483 if (bp->attn_state & asserted)
2484 BNX2X_ERR("IGU ERROR\n");
2486 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2487 aeu_mask = REG_RD(bp, aeu_addr);
2489 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2490 aeu_mask, asserted);
2491 aeu_mask &= ~(asserted & 0xff);
2492 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2494 REG_WR(bp, aeu_addr, aeu_mask);
2495 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2498 bp->attn_state |= asserted;
2499 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2501 if (asserted & ATTN_HARD_WIRED_MASK) {
2502 if (asserted & ATTN_NIG_FOR_FUNC) {
2504 bnx2x_acquire_phy_lock(bp);
2506 /* save nig interrupt mask */
2507 nig_mask = REG_RD(bp, nig_int_mask_addr);
2508 REG_WR(bp, nig_int_mask_addr, 0);
2510 bnx2x_link_attn(bp);
2512 /* handle unicore attn? */
2514 if (asserted & ATTN_SW_TIMER_4_FUNC)
2515 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2517 if (asserted & GPIO_2_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2520 if (asserted & GPIO_3_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2523 if (asserted & GPIO_4_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2527 if (asserted & ATTN_GENERAL_ATTN_1) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2531 if (asserted & ATTN_GENERAL_ATTN_2) {
2532 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2535 if (asserted & ATTN_GENERAL_ATTN_3) {
2536 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2537 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2540 if (asserted & ATTN_GENERAL_ATTN_4) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2544 if (asserted & ATTN_GENERAL_ATTN_5) {
2545 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2546 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2548 if (asserted & ATTN_GENERAL_ATTN_6) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554 } /* if hardwired */
2556 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2558 REG_WR(bp, hc_addr, asserted);
2560 /* now set back the mask */
2561 if (asserted & ATTN_NIG_FOR_FUNC) {
2562 REG_WR(bp, nig_int_mask_addr, nig_mask);
2563 bnx2x_release_phy_lock(bp);
2567 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2569 int port = BP_PORT(bp);
2573 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2574 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2576 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2578 val = REG_RD(bp, reg_offset);
2579 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2580 REG_WR(bp, reg_offset, val);
2582 BNX2X_ERR("SPIO5 hw attention\n");
2584 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2586 /* Fan failure attention */
2588 /* The PHY reset is controlled by GPIO 1 */
2589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2590 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2591 /* Low power mode is controlled by GPIO 2 */
2592 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2593 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2594 /* mark the failure */
2595 bp->link_params.ext_phy_config &=
2596 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2597 bp->link_params.ext_phy_config |=
2598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2600 dev_info.port_hw_config[port].
2601 external_phy_config,
2602 bp->link_params.ext_phy_config);
2603 /* log the failure */
2604 printk(KERN_ERR PFX "Fan Failure on Network"
2605 " Controller %s has caused the driver to"
2606 " shutdown the card to prevent permanent"
2607 " damage. Please contact Dell Support for"
2608 " assistance\n", bp->dev->name);
2616 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2617 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2618 bnx2x_acquire_phy_lock(bp);
2619 bnx2x_handle_module_detect_int(&bp->link_params);
2620 bnx2x_release_phy_lock(bp);
2623 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2625 val = REG_RD(bp, reg_offset);
2626 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2627 REG_WR(bp, reg_offset, val);
2629 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2630 (attn & HW_INTERRUT_ASSERT_SET_0));
2635 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2639 if (attn & BNX2X_DOORQ_ASSERT) {
2641 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2642 BNX2X_ERR("DB hw attention 0x%x\n", val);
2643 /* DORQ discard attention */
2645 BNX2X_ERR("FATAL error from DORQ\n");
2648 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2650 int port = BP_PORT(bp);
2653 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2654 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2656 val = REG_RD(bp, reg_offset);
2657 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2658 REG_WR(bp, reg_offset, val);
2660 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2661 (attn & HW_INTERRUT_ASSERT_SET_1));
2666 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2670 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2672 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2673 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2674 /* CFC error attention */
2676 BNX2X_ERR("FATAL error from CFC\n");
2679 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2681 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2682 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2683 /* RQ_USDMDP_FIFO_OVERFLOW */
2685 BNX2X_ERR("FATAL error from PXP\n");
2688 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2690 int port = BP_PORT(bp);
2693 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2694 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2696 val = REG_RD(bp, reg_offset);
2697 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2698 REG_WR(bp, reg_offset, val);
2700 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2701 (attn & HW_INTERRUT_ASSERT_SET_2));
2706 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2710 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2712 if (attn & BNX2X_PMF_LINK_ASSERT) {
2713 int func = BP_FUNC(bp);
2715 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2716 bnx2x__link_status_update(bp);
2717 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2719 bnx2x_pmf_update(bp);
2721 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2723 BNX2X_ERR("MC assert!\n");
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2725 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2727 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2730 } else if (attn & BNX2X_MCP_ASSERT) {
2732 BNX2X_ERR("MCP assert!\n");
2733 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2737 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2740 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2741 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2742 if (attn & BNX2X_GRC_TIMEOUT) {
2743 val = CHIP_IS_E1H(bp) ?
2744 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2745 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2747 if (attn & BNX2X_GRC_RSV) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2750 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2752 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2756 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2758 struct attn_route attn;
2759 struct attn_route group_mask;
2760 int port = BP_PORT(bp);
2766 /* need to take HW lock because MCP or other port might also
2767 try to handle this event */
2768 bnx2x_acquire_alr(bp);
2770 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2771 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2772 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2773 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2774 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2775 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2777 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2778 if (deasserted & (1 << index)) {
2779 group_mask = bp->attn_group[index];
2781 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2782 index, group_mask.sig[0], group_mask.sig[1],
2783 group_mask.sig[2], group_mask.sig[3]);
2785 bnx2x_attn_int_deasserted3(bp,
2786 attn.sig[3] & group_mask.sig[3]);
2787 bnx2x_attn_int_deasserted1(bp,
2788 attn.sig[1] & group_mask.sig[1]);
2789 bnx2x_attn_int_deasserted2(bp,
2790 attn.sig[2] & group_mask.sig[2]);
2791 bnx2x_attn_int_deasserted0(bp,
2792 attn.sig[0] & group_mask.sig[0]);
2794 if ((attn.sig[0] & group_mask.sig[0] &
2795 HW_PRTY_ASSERT_SET_0) ||
2796 (attn.sig[1] & group_mask.sig[1] &
2797 HW_PRTY_ASSERT_SET_1) ||
2798 (attn.sig[2] & group_mask.sig[2] &
2799 HW_PRTY_ASSERT_SET_2))
2800 BNX2X_ERR("FATAL HW block parity attention\n");
2804 bnx2x_release_alr(bp);
2806 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2809 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2811 REG_WR(bp, reg_addr, val);
2813 if (~bp->attn_state & deasserted)
2814 BNX2X_ERR("IGU ERROR\n");
2816 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2819 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2820 aeu_mask = REG_RD(bp, reg_addr);
2822 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2823 aeu_mask, deasserted);
2824 aeu_mask |= (deasserted & 0xff);
2825 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2827 REG_WR(bp, reg_addr, aeu_mask);
2828 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2830 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2831 bp->attn_state &= ~deasserted;
2832 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2835 static void bnx2x_attn_int(struct bnx2x *bp)
2837 /* read local copy of bits */
2838 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2840 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2842 u32 attn_state = bp->attn_state;
2844 /* look for changed bits */
2845 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2846 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2849 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2850 attn_bits, attn_ack, asserted, deasserted);
2852 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2853 BNX2X_ERR("BAD attention state\n");
2855 /* handle bits that were raised */
2857 bnx2x_attn_int_asserted(bp, asserted);
2860 bnx2x_attn_int_deasserted(bp, deasserted);
2863 static void bnx2x_sp_task(struct work_struct *work)
2865 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2869 /* Return here if interrupt is disabled */
2870 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2871 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2875 status = bnx2x_update_dsb_idx(bp);
2876 /* if (status == 0) */
2877 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2879 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2885 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2887 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2898 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900 struct net_device *dev = dev_instance;
2901 struct bnx2x *bp = netdev_priv(dev);
2903 /* Return here if interrupt is disabled */
2904 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2905 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2909 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2911 #ifdef BNX2X_STOP_ON_ERROR
2912 if (unlikely(bp->panic))
2916 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2921 /* end of slow path */
2925 /****************************************************************************
2927 ****************************************************************************/
2929 /* sum[hi:lo] += add[hi:lo] */
2930 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2933 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2936 /* difference = minuend - subtrahend */
2937 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939 if (m_lo < s_lo) { \
2941 d_hi = m_hi - s_hi; \
2943 /* we can 'loan' 1 */ \
2945 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2947 /* m_hi <= s_hi */ \
2952 /* m_lo >= s_lo */ \
2953 if (m_hi < s_hi) { \
2957 /* m_hi >= s_hi */ \
2958 d_hi = m_hi - s_hi; \
2959 d_lo = m_lo - s_lo; \
2964 #define UPDATE_STAT64(s, t) \
2966 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2967 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2968 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2969 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2970 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2971 pstats->mac_stx[1].t##_lo, diff.lo); \
2974 #define UPDATE_STAT64_NIG(s, t) \
2976 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2977 diff.lo, new->s##_lo, old->s##_lo); \
2978 ADD_64(estats->t##_hi, diff.hi, \
2979 estats->t##_lo, diff.lo); \
2982 /* sum[hi:lo] += add */
2983 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2986 s_hi += (s_lo < a) ? 1 : 0; \
2989 #define UPDATE_EXTEND_STAT(s) \
2991 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2992 pstats->mac_stx[1].s##_lo, \
2996 #define UPDATE_EXTEND_TSTAT(s, t) \
2998 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2999 old_tclient->s = le32_to_cpu(tclient->s); \
3000 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3003 #define UPDATE_EXTEND_USTAT(s, t) \
3005 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3006 old_uclient->s = uclient->s; \
3007 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3010 #define UPDATE_EXTEND_XSTAT(s, t) \
3012 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3013 old_xclient->s = le32_to_cpu(xclient->s); \
3014 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3017 /* minuend -= subtrahend */
3018 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3020 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3023 /* minuend[hi:lo] -= subtrahend */
3024 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3026 SUB_64(m_hi, 0, m_lo, s); \
3029 #define SUB_EXTEND_USTAT(s, t) \
3031 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3036 * General service functions
3039 static inline long bnx2x_hilo(u32 *hiref)
3041 u32 lo = *(hiref + 1);
3042 #if (BITS_PER_LONG == 64)
3045 return HILO_U64(hi, lo);
3052 * Init service functions
3055 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3057 if (!bp->stats_pending) {
3058 struct eth_query_ramrod_data ramrod_data = {0};
3061 ramrod_data.drv_counter = bp->stats_counter++;
3062 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3063 for_each_queue(bp, i)
3064 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3066 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3067 ((u32 *)&ramrod_data)[1],
3068 ((u32 *)&ramrod_data)[0], 0);
3070 /* stats ramrod has it's own slot on the spq */
3072 bp->stats_pending = 1;
3077 static void bnx2x_stats_init(struct bnx2x *bp)
3079 int port = BP_PORT(bp);
3082 bp->stats_pending = 0;
3083 bp->executer_idx = 0;
3084 bp->stats_counter = 0;
3088 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3090 bp->port.port_stx = 0;
3091 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3093 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3094 bp->port.old_nig_stats.brb_discard =
3095 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3096 bp->port.old_nig_stats.brb_truncate =
3097 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3098 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3099 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3100 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3101 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3103 /* function stats */
3104 for_each_queue(bp, i) {
3105 struct bnx2x_fastpath *fp = &bp->fp[i];
3107 memset(&fp->old_tclient, 0,
3108 sizeof(struct tstorm_per_client_stats));
3109 memset(&fp->old_uclient, 0,
3110 sizeof(struct ustorm_per_client_stats));
3111 memset(&fp->old_xclient, 0,
3112 sizeof(struct xstorm_per_client_stats));
3113 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3116 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3117 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3119 bp->stats_state = STATS_STATE_DISABLED;
3120 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3121 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3124 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3126 struct dmae_command *dmae = &bp->stats_dmae;
3127 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3129 *stats_comp = DMAE_COMP_VAL;
3130 if (CHIP_REV_IS_SLOW(bp))
3134 if (bp->executer_idx) {
3135 int loader_idx = PMF_DMAE_C(bp);
3137 memset(dmae, 0, sizeof(struct dmae_command));
3139 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3140 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3141 DMAE_CMD_DST_RESET |
3143 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3145 DMAE_CMD_ENDIANITY_DW_SWAP |
3147 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3149 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3150 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3151 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3152 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3153 sizeof(struct dmae_command) *
3154 (loader_idx + 1)) >> 2;
3155 dmae->dst_addr_hi = 0;
3156 dmae->len = sizeof(struct dmae_command) >> 2;
3159 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3160 dmae->comp_addr_hi = 0;
3164 bnx2x_post_dmae(bp, dmae, loader_idx);
3166 } else if (bp->func_stx) {
3168 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3172 static int bnx2x_stats_comp(struct bnx2x *bp)
3174 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3178 while (*stats_comp != DMAE_COMP_VAL) {
3180 BNX2X_ERR("timeout waiting for stats finished\n");
3190 * Statistics service functions
3193 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3195 struct dmae_command *dmae;
3197 int loader_idx = PMF_DMAE_C(bp);
3198 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3202 BNX2X_ERR("BUG!\n");
3206 bp->executer_idx = 0;
3208 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3214 DMAE_CMD_ENDIANITY_DW_SWAP |
3216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3221 dmae->src_addr_lo = bp->port.port_stx >> 2;
3222 dmae->src_addr_hi = 0;
3223 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3224 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3225 dmae->len = DMAE_LEN32_RD_MAX;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3232 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3233 dmae->src_addr_hi = 0;
3234 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3235 DMAE_LEN32_RD_MAX * 4);
3236 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3237 DMAE_LEN32_RD_MAX * 4);
3238 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3239 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3240 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3241 dmae->comp_val = DMAE_COMP_VAL;
3244 bnx2x_hw_stats_post(bp);
3245 bnx2x_stats_comp(bp);
3248 static void bnx2x_port_stats_init(struct bnx2x *bp)
3250 struct dmae_command *dmae;
3251 int port = BP_PORT(bp);
3252 int vn = BP_E1HVN(bp);
3254 int loader_idx = PMF_DMAE_C(bp);
3256 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3259 if (!bp->link_vars.link_up || !bp->port.pmf) {
3260 BNX2X_ERR("BUG!\n");
3264 bp->executer_idx = 0;
3267 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3268 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3269 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273 DMAE_CMD_ENDIANITY_DW_SWAP |
3275 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3276 (vn << DMAE_CMD_E1HVN_SHIFT));
3278 if (bp->port.port_stx) {
3280 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3281 dmae->opcode = opcode;
3282 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3283 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3284 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3285 dmae->dst_addr_hi = 0;
3286 dmae->len = sizeof(struct host_port_stats) >> 2;
3287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3288 dmae->comp_addr_hi = 0;
3294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295 dmae->opcode = opcode;
3296 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3297 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3298 dmae->dst_addr_lo = bp->func_stx >> 2;
3299 dmae->dst_addr_hi = 0;
3300 dmae->len = sizeof(struct host_func_stats) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3307 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3308 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3309 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3311 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3313 DMAE_CMD_ENDIANITY_DW_SWAP |
3315 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3316 (vn << DMAE_CMD_E1HVN_SHIFT));
3318 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3320 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3321 NIG_REG_INGRESS_BMAC0_MEM);
3323 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3324 BIGMAC_REGISTER_TX_STAT_GTBYT */
3325 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3326 dmae->opcode = opcode;
3327 dmae->src_addr_lo = (mac_addr +
3328 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3329 dmae->src_addr_hi = 0;
3330 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3332 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3333 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3338 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3339 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (mac_addr +
3343 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3346 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3348 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3349 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3350 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3355 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3357 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3359 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = (mac_addr +
3363 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3364 dmae->src_addr_hi = 0;
3365 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369 dmae->comp_addr_hi = 0;
3372 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
3375 dmae->src_addr_lo = (mac_addr +
3376 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3377 dmae->src_addr_hi = 0;
3378 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3379 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3380 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3381 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3383 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3384 dmae->comp_addr_hi = 0;
3387 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3389 dmae->opcode = opcode;
3390 dmae->src_addr_lo = (mac_addr +
3391 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3392 dmae->src_addr_hi = 0;
3393 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3394 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3395 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3396 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3397 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3399 dmae->comp_addr_hi = 0;
3404 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3405 dmae->opcode = opcode;
3406 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3407 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3408 dmae->src_addr_hi = 0;
3409 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3410 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3411 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3412 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3413 dmae->comp_addr_hi = 0;
3416 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3417 dmae->opcode = opcode;
3418 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3419 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3420 dmae->src_addr_hi = 0;
3421 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3422 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3423 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3425 dmae->len = (2*sizeof(u32)) >> 2;
3426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3427 dmae->comp_addr_hi = 0;
3430 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3432 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3437 DMAE_CMD_ENDIANITY_DW_SWAP |
3439 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440 (vn << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3450 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3451 dmae->comp_val = DMAE_COMP_VAL;
3456 static void bnx2x_func_stats_init(struct bnx2x *bp)
3458 struct dmae_command *dmae = &bp->stats_dmae;
3459 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3462 if (!bp->func_stx) {
3463 BNX2X_ERR("BUG!\n");
3467 bp->executer_idx = 0;
3468 memset(dmae, 0, sizeof(struct dmae_command));
3470 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3471 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3472 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3474 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3476 DMAE_CMD_ENDIANITY_DW_SWAP |
3478 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3479 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3480 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3481 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3482 dmae->dst_addr_lo = bp->func_stx >> 2;
3483 dmae->dst_addr_hi = 0;
3484 dmae->len = sizeof(struct host_func_stats) >> 2;
3485 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3487 dmae->comp_val = DMAE_COMP_VAL;
3492 static void bnx2x_stats_start(struct bnx2x *bp)
3495 bnx2x_port_stats_init(bp);
3497 else if (bp->func_stx)
3498 bnx2x_func_stats_init(bp);
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_storm_stats_post(bp);
3504 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3506 bnx2x_stats_comp(bp);
3507 bnx2x_stats_pmf_update(bp);
3508 bnx2x_stats_start(bp);
3511 static void bnx2x_stats_restart(struct bnx2x *bp)
3513 bnx2x_stats_comp(bp);
3514 bnx2x_stats_start(bp);
3517 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3519 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522 struct regpair diff;
3524 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3525 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3526 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3527 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3528 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3529 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3530 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3531 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3532 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3533 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3534 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3535 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3536 UPDATE_STAT64(tx_stat_gt127,
3537 tx_stat_etherstatspkts65octetsto127octets);
3538 UPDATE_STAT64(tx_stat_gt255,
3539 tx_stat_etherstatspkts128octetsto255octets);
3540 UPDATE_STAT64(tx_stat_gt511,
3541 tx_stat_etherstatspkts256octetsto511octets);
3542 UPDATE_STAT64(tx_stat_gt1023,
3543 tx_stat_etherstatspkts512octetsto1023octets);
3544 UPDATE_STAT64(tx_stat_gt1518,
3545 tx_stat_etherstatspkts1024octetsto1522octets);
3546 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3547 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3548 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3549 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3550 UPDATE_STAT64(tx_stat_gterr,
3551 tx_stat_dot3statsinternalmactransmiterrors);
3552 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3554 estats->pause_frames_received_hi =
3555 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3556 estats->pause_frames_received_lo =
3557 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3559 estats->pause_frames_sent_hi =
3560 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3561 estats->pause_frames_sent_lo =
3562 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3565 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3567 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3568 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3571 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3572 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3573 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3574 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3575 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3576 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3577 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3578 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3579 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3580 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3581 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3582 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3583 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3584 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3585 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3586 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3587 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3589 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3590 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3591 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3592 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3593 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3595 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3596 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3597 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3598 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3599 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3600 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3601 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3603 estats->pause_frames_received_hi =
3604 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3605 estats->pause_frames_received_lo =
3606 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3607 ADD_64(estats->pause_frames_received_hi,
3608 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3609 estats->pause_frames_received_lo,
3610 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3612 estats->pause_frames_sent_hi =
3613 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3614 estats->pause_frames_sent_lo =
3615 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3616 ADD_64(estats->pause_frames_sent_hi,
3617 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3618 estats->pause_frames_sent_lo,
3619 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3622 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3624 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3625 struct nig_stats *old = &(bp->port.old_nig_stats);
3626 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3627 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3628 struct regpair diff;
3631 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3632 bnx2x_bmac_stats_update(bp);
3634 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3635 bnx2x_emac_stats_update(bp);
3637 else { /* unreached */
3638 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3642 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3643 new->brb_discard - old->brb_discard);
3644 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3645 new->brb_truncate - old->brb_truncate);
3647 UPDATE_STAT64_NIG(egress_mac_pkt0,
3648 etherstatspkts1024octetsto1522octets);
3649 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3651 memcpy(old, new, sizeof(struct nig_stats));
3653 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3654 sizeof(struct mac_stx));
3655 estats->brb_drop_hi = pstats->brb_drop_hi;
3656 estats->brb_drop_lo = pstats->brb_drop_lo;
3658 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3660 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3661 if (nig_timer_max != estats->nig_timer_max) {
3662 estats->nig_timer_max = nig_timer_max;
3663 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3669 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3671 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3672 struct tstorm_per_port_stats *tport =
3673 &stats->tstorm_common.port_statistics;
3674 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3675 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3678 memset(&(fstats->total_bytes_received_hi), 0,
3679 sizeof(struct host_func_stats) - 2*sizeof(u32));
3680 estats->error_bytes_received_hi = 0;
3681 estats->error_bytes_received_lo = 0;
3682 estats->etherstatsoverrsizepkts_hi = 0;
3683 estats->etherstatsoverrsizepkts_lo = 0;
3684 estats->no_buff_discard_hi = 0;
3685 estats->no_buff_discard_lo = 0;
3687 for_each_queue(bp, i) {
3688 struct bnx2x_fastpath *fp = &bp->fp[i];
3689 int cl_id = fp->cl_id;
3690 struct tstorm_per_client_stats *tclient =
3691 &stats->tstorm_common.client_statistics[cl_id];
3692 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3693 struct ustorm_per_client_stats *uclient =
3694 &stats->ustorm_common.client_statistics[cl_id];
3695 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3696 struct xstorm_per_client_stats *xclient =
3697 &stats->xstorm_common.client_statistics[cl_id];
3698 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3699 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3702 /* are storm stats valid? */
3703 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3704 bp->stats_counter) {
3705 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3706 " xstorm counter (%d) != stats_counter (%d)\n",
3707 i, xclient->stats_counter, bp->stats_counter);
3710 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3711 bp->stats_counter) {
3712 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3713 " tstorm counter (%d) != stats_counter (%d)\n",
3714 i, tclient->stats_counter, bp->stats_counter);
3717 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3718 bp->stats_counter) {
3719 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3720 " ustorm counter (%d) != stats_counter (%d)\n",
3721 i, uclient->stats_counter, bp->stats_counter);
3725 qstats->total_bytes_received_hi =
3726 qstats->valid_bytes_received_hi =
3727 le32_to_cpu(tclient->total_rcv_bytes.hi);
3728 qstats->total_bytes_received_lo =
3729 qstats->valid_bytes_received_lo =
3730 le32_to_cpu(tclient->total_rcv_bytes.lo);
3732 qstats->error_bytes_received_hi =
3733 le32_to_cpu(tclient->rcv_error_bytes.hi);
3734 qstats->error_bytes_received_lo =
3735 le32_to_cpu(tclient->rcv_error_bytes.lo);
3737 ADD_64(qstats->total_bytes_received_hi,
3738 qstats->error_bytes_received_hi,
3739 qstats->total_bytes_received_lo,
3740 qstats->error_bytes_received_lo);
3742 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3743 total_unicast_packets_received);
3744 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3745 total_multicast_packets_received);
3746 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3747 total_broadcast_packets_received);
3748 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3749 etherstatsoverrsizepkts);
3750 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3752 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3753 total_unicast_packets_received);
3754 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3755 total_multicast_packets_received);
3756 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3757 total_broadcast_packets_received);
3758 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3759 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3760 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3762 qstats->total_bytes_transmitted_hi =
3763 le32_to_cpu(xclient->total_sent_bytes.hi);
3764 qstats->total_bytes_transmitted_lo =
3765 le32_to_cpu(xclient->total_sent_bytes.lo);
3767 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3768 total_unicast_packets_transmitted);
3769 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3770 total_multicast_packets_transmitted);
3771 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3772 total_broadcast_packets_transmitted);
3774 old_tclient->checksum_discard = tclient->checksum_discard;
3775 old_tclient->ttl0_discard = tclient->ttl0_discard;
3777 ADD_64(fstats->total_bytes_received_hi,
3778 qstats->total_bytes_received_hi,
3779 fstats->total_bytes_received_lo,
3780 qstats->total_bytes_received_lo);
3781 ADD_64(fstats->total_bytes_transmitted_hi,
3782 qstats->total_bytes_transmitted_hi,
3783 fstats->total_bytes_transmitted_lo,
3784 qstats->total_bytes_transmitted_lo);
3785 ADD_64(fstats->total_unicast_packets_received_hi,
3786 qstats->total_unicast_packets_received_hi,
3787 fstats->total_unicast_packets_received_lo,
3788 qstats->total_unicast_packets_received_lo);
3789 ADD_64(fstats->total_multicast_packets_received_hi,
3790 qstats->total_multicast_packets_received_hi,
3791 fstats->total_multicast_packets_received_lo,
3792 qstats->total_multicast_packets_received_lo);
3793 ADD_64(fstats->total_broadcast_packets_received_hi,
3794 qstats->total_broadcast_packets_received_hi,
3795 fstats->total_broadcast_packets_received_lo,
3796 qstats->total_broadcast_packets_received_lo);
3797 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3798 qstats->total_unicast_packets_transmitted_hi,
3799 fstats->total_unicast_packets_transmitted_lo,
3800 qstats->total_unicast_packets_transmitted_lo);
3801 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3802 qstats->total_multicast_packets_transmitted_hi,
3803 fstats->total_multicast_packets_transmitted_lo,
3804 qstats->total_multicast_packets_transmitted_lo);
3805 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3806 qstats->total_broadcast_packets_transmitted_hi,
3807 fstats->total_broadcast_packets_transmitted_lo,
3808 qstats->total_broadcast_packets_transmitted_lo);
3809 ADD_64(fstats->valid_bytes_received_hi,
3810 qstats->valid_bytes_received_hi,
3811 fstats->valid_bytes_received_lo,
3812 qstats->valid_bytes_received_lo);
3814 ADD_64(estats->error_bytes_received_hi,
3815 qstats->error_bytes_received_hi,
3816 estats->error_bytes_received_lo,
3817 qstats->error_bytes_received_lo);
3818 ADD_64(estats->etherstatsoverrsizepkts_hi,
3819 qstats->etherstatsoverrsizepkts_hi,
3820 estats->etherstatsoverrsizepkts_lo,
3821 qstats->etherstatsoverrsizepkts_lo);
3822 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3823 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3826 ADD_64(fstats->total_bytes_received_hi,
3827 estats->rx_stat_ifhcinbadoctets_hi,
3828 fstats->total_bytes_received_lo,
3829 estats->rx_stat_ifhcinbadoctets_lo);
3831 memcpy(estats, &(fstats->total_bytes_received_hi),
3832 sizeof(struct host_func_stats) - 2*sizeof(u32));
3834 ADD_64(estats->etherstatsoverrsizepkts_hi,
3835 estats->rx_stat_dot3statsframestoolong_hi,
3836 estats->etherstatsoverrsizepkts_lo,
3837 estats->rx_stat_dot3statsframestoolong_lo);
3838 ADD_64(estats->error_bytes_received_hi,
3839 estats->rx_stat_ifhcinbadoctets_hi,
3840 estats->error_bytes_received_lo,
3841 estats->rx_stat_ifhcinbadoctets_lo);
3844 estats->mac_filter_discard =
3845 le32_to_cpu(tport->mac_filter_discard);
3846 estats->xxoverflow_discard =
3847 le32_to_cpu(tport->xxoverflow_discard);
3848 estats->brb_truncate_discard =
3849 le32_to_cpu(tport->brb_truncate_discard);
3850 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3853 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3855 bp->stats_pending = 0;
3860 static void bnx2x_net_stats_update(struct bnx2x *bp)
3862 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3863 struct net_device_stats *nstats = &bp->dev->stats;
3866 nstats->rx_packets =
3867 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3868 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3869 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3871 nstats->tx_packets =
3872 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3873 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3874 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3876 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3878 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3880 nstats->rx_dropped = estats->mac_discard;
3881 for_each_queue(bp, i)
3882 nstats->rx_dropped +=
3883 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3885 nstats->tx_dropped = 0;
3888 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3890 nstats->collisions =
3891 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3893 nstats->rx_length_errors =
3894 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3895 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3896 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3897 bnx2x_hilo(&estats->brb_truncate_hi);
3898 nstats->rx_crc_errors =
3899 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3900 nstats->rx_frame_errors =
3901 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3902 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3903 nstats->rx_missed_errors = estats->xxoverflow_discard;
3905 nstats->rx_errors = nstats->rx_length_errors +
3906 nstats->rx_over_errors +
3907 nstats->rx_crc_errors +
3908 nstats->rx_frame_errors +
3909 nstats->rx_fifo_errors +
3910 nstats->rx_missed_errors;
3912 nstats->tx_aborted_errors =
3913 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3914 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3915 nstats->tx_carrier_errors =
3916 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3917 nstats->tx_fifo_errors = 0;
3918 nstats->tx_heartbeat_errors = 0;
3919 nstats->tx_window_errors = 0;
3921 nstats->tx_errors = nstats->tx_aborted_errors +
3922 nstats->tx_carrier_errors +
3923 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3926 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3928 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3931 estats->driver_xoff = 0;
3932 estats->rx_err_discard_pkt = 0;
3933 estats->rx_skb_alloc_failed = 0;
3934 estats->hw_csum_err = 0;
3935 for_each_queue(bp, i) {
3936 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3938 estats->driver_xoff += qstats->driver_xoff;
3939 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3940 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3941 estats->hw_csum_err += qstats->hw_csum_err;
3945 static void bnx2x_stats_update(struct bnx2x *bp)
3947 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3949 if (*stats_comp != DMAE_COMP_VAL)
3953 bnx2x_hw_stats_update(bp);
3955 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3956 BNX2X_ERR("storm stats were not updated for 3 times\n");
3961 bnx2x_net_stats_update(bp);
3962 bnx2x_drv_stats_update(bp);
3964 if (bp->msglevel & NETIF_MSG_TIMER) {
3965 struct tstorm_per_client_stats *old_tclient =
3966 &bp->fp->old_tclient;
3967 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969 struct net_device_stats *nstats = &bp->dev->stats;
3972 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3973 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3975 bnx2x_tx_avail(bp->fp),
3976 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3977 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3979 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3980 bp->fp->rx_comp_cons),
3981 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3982 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3983 "brb truncate %u\n",
3984 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3985 qstats->driver_xoff,
3986 estats->brb_drop_lo, estats->brb_truncate_lo);
3987 printk(KERN_DEBUG "tstats: checksum_discard %u "
3988 "packets_too_big_discard %lu no_buff_discard %lu "
3989 "mac_discard %u mac_filter_discard %u "
3990 "xxovrflow_discard %u brb_truncate_discard %u "
3991 "ttl0_discard %u\n",
3992 old_tclient->checksum_discard,
3993 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3994 bnx2x_hilo(&qstats->no_buff_discard_hi),
3995 estats->mac_discard, estats->mac_filter_discard,
3996 estats->xxoverflow_discard, estats->brb_truncate_discard,
3997 old_tclient->ttl0_discard);
3999 for_each_queue(bp, i) {
4000 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4001 bnx2x_fp(bp, i, tx_pkt),
4002 bnx2x_fp(bp, i, rx_pkt),
4003 bnx2x_fp(bp, i, rx_calls));
4007 bnx2x_hw_stats_post(bp);
4008 bnx2x_storm_stats_post(bp);
4011 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4013 struct dmae_command *dmae;
4015 int loader_idx = PMF_DMAE_C(bp);
4016 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4018 bp->executer_idx = 0;
4020 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4022 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4024 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4026 DMAE_CMD_ENDIANITY_DW_SWAP |
4028 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4029 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4031 if (bp->port.port_stx) {
4033 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4035 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4037 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4038 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4039 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4040 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4041 dmae->dst_addr_hi = 0;
4042 dmae->len = sizeof(struct host_port_stats) >> 2;
4044 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4045 dmae->comp_addr_hi = 0;
4048 dmae->comp_addr_lo =
4049 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4050 dmae->comp_addr_hi =
4051 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4052 dmae->comp_val = DMAE_COMP_VAL;
4060 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4061 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4062 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4063 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4064 dmae->dst_addr_lo = bp->func_stx >> 2;
4065 dmae->dst_addr_hi = 0;
4066 dmae->len = sizeof(struct host_func_stats) >> 2;
4067 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4068 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4069 dmae->comp_val = DMAE_COMP_VAL;
4075 static void bnx2x_stats_stop(struct bnx2x *bp)
4079 bnx2x_stats_comp(bp);
4082 update = (bnx2x_hw_stats_update(bp) == 0);
4084 update |= (bnx2x_storm_stats_update(bp) == 0);
4087 bnx2x_net_stats_update(bp);
4090 bnx2x_port_stats_stop(bp);
4092 bnx2x_hw_stats_post(bp);
4093 bnx2x_stats_comp(bp);
4097 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4101 static const struct {
4102 void (*action)(struct bnx2x *bp);
4103 enum bnx2x_stats_state next_state;
4104 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4107 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4108 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4109 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4110 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4113 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4114 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4115 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4116 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4120 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4122 enum bnx2x_stats_state state = bp->stats_state;
4124 bnx2x_stats_stm[state][event].action(bp);
4125 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4127 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4128 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4129 state, event, bp->stats_state);
4132 static void bnx2x_timer(unsigned long data)
4134 struct bnx2x *bp = (struct bnx2x *) data;
4136 if (!netif_running(bp->dev))
4139 if (atomic_read(&bp->intr_sem) != 0)
4143 struct bnx2x_fastpath *fp = &bp->fp[0];
4146 bnx2x_tx_int(fp, 1000);
4147 rc = bnx2x_rx_int(fp, 1000);
4150 if (!BP_NOMCP(bp)) {
4151 int func = BP_FUNC(bp);
4155 ++bp->fw_drv_pulse_wr_seq;
4156 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4157 /* TBD - add SYSTEM_TIME */
4158 drv_pulse = bp->fw_drv_pulse_wr_seq;
4159 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4161 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4162 MCP_PULSE_SEQ_MASK);
4163 /* The delta between driver pulse and mcp response
4164 * should be 1 (before mcp response) or 0 (after mcp response)
4166 if ((drv_pulse != mcp_pulse) &&
4167 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4168 /* someone lost a heartbeat... */
4169 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4170 drv_pulse, mcp_pulse);
4174 if ((bp->state == BNX2X_STATE_OPEN) ||
4175 (bp->state == BNX2X_STATE_DISABLED))
4176 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4179 mod_timer(&bp->timer, jiffies + bp->current_interval);
4182 /* end of Statistics */
4187 * nic init service functions
4190 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4192 int port = BP_PORT(bp);
4194 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4196 sizeof(struct ustorm_status_block)/4);
4197 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4198 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4199 sizeof(struct cstorm_status_block)/4);
4202 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4203 dma_addr_t mapping, int sb_id)
4205 int port = BP_PORT(bp);
4206 int func = BP_FUNC(bp);
4211 section = ((u64)mapping) + offsetof(struct host_status_block,
4213 sb->u_status_block.status_block_id = sb_id;
4215 REG_WR(bp, BAR_USTRORM_INTMEM +
4216 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4217 REG_WR(bp, BAR_USTRORM_INTMEM +
4218 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4220 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4221 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4223 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4228 section = ((u64)mapping) + offsetof(struct host_status_block,
4230 sb->c_status_block.status_block_id = sb_id;
4232 REG_WR(bp, BAR_CSTRORM_INTMEM +
4233 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4234 REG_WR(bp, BAR_CSTRORM_INTMEM +
4235 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4237 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4238 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4240 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4241 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4242 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4244 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4247 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4249 int func = BP_FUNC(bp);
4251 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4252 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253 sizeof(struct ustorm_def_status_block)/4);
4254 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4255 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256 sizeof(struct cstorm_def_status_block)/4);
4257 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4258 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4259 sizeof(struct xstorm_def_status_block)/4);
4260 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4261 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4262 sizeof(struct tstorm_def_status_block)/4);
4265 static void bnx2x_init_def_sb(struct bnx2x *bp,
4266 struct host_def_status_block *def_sb,
4267 dma_addr_t mapping, int sb_id)
4269 int port = BP_PORT(bp);
4270 int func = BP_FUNC(bp);
4271 int index, val, reg_offset;
4275 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4276 atten_status_block);
4277 def_sb->atten_status_block.status_block_id = sb_id;
4281 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4282 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4284 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4285 bp->attn_group[index].sig[0] = REG_RD(bp,
4286 reg_offset + 0x10*index);
4287 bp->attn_group[index].sig[1] = REG_RD(bp,
4288 reg_offset + 0x4 + 0x10*index);
4289 bp->attn_group[index].sig[2] = REG_RD(bp,
4290 reg_offset + 0x8 + 0x10*index);
4291 bp->attn_group[index].sig[3] = REG_RD(bp,
4292 reg_offset + 0xc + 0x10*index);
4295 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4296 HC_REG_ATTN_MSG0_ADDR_L);
4298 REG_WR(bp, reg_offset, U64_LO(section));
4299 REG_WR(bp, reg_offset + 4, U64_HI(section));
4301 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4303 val = REG_RD(bp, reg_offset);
4305 REG_WR(bp, reg_offset, val);
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 u_def_status_block);
4310 def_sb->u_def_status_block.status_block_id = sb_id;
4312 REG_WR(bp, BAR_USTRORM_INTMEM +
4313 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4314 REG_WR(bp, BAR_USTRORM_INTMEM +
4315 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4317 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4318 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4320 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4321 REG_WR16(bp, BAR_USTRORM_INTMEM +
4322 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4325 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4326 c_def_status_block);
4327 def_sb->c_def_status_block.status_block_id = sb_id;
4329 REG_WR(bp, BAR_CSTRORM_INTMEM +
4330 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4331 REG_WR(bp, BAR_CSTRORM_INTMEM +
4332 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4334 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4335 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4337 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4338 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4339 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4342 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4343 t_def_status_block);
4344 def_sb->t_def_status_block.status_block_id = sb_id;
4346 REG_WR(bp, BAR_TSTRORM_INTMEM +
4347 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4348 REG_WR(bp, BAR_TSTRORM_INTMEM +
4349 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4351 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4352 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4354 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4355 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4356 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4359 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4360 x_def_status_block);
4361 def_sb->x_def_status_block.status_block_id = sb_id;
4363 REG_WR(bp, BAR_XSTRORM_INTMEM +
4364 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4365 REG_WR(bp, BAR_XSTRORM_INTMEM +
4366 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4368 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4369 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4371 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4372 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4373 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4375 bp->stats_pending = 0;
4376 bp->set_mac_pending = 0;
4378 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4381 static void bnx2x_update_coalesce(struct bnx2x *bp)
4383 int port = BP_PORT(bp);
4386 for_each_queue(bp, i) {
4387 int sb_id = bp->fp[i].sb_id;
4389 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4390 REG_WR8(bp, BAR_USTRORM_INTMEM +
4391 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4392 U_SB_ETH_RX_CQ_INDEX),
4394 REG_WR16(bp, BAR_USTRORM_INTMEM +
4395 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4396 U_SB_ETH_RX_CQ_INDEX),
4397 bp->rx_ticks ? 0 : 1);
4399 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4400 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4401 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4402 C_SB_ETH_TX_CQ_INDEX),
4404 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4405 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4406 C_SB_ETH_TX_CQ_INDEX),
4407 bp->tx_ticks ? 0 : 1);
4411 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4412 struct bnx2x_fastpath *fp, int last)
4416 for (i = 0; i < last; i++) {
4417 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4418 struct sk_buff *skb = rx_buf->skb;
4421 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4425 if (fp->tpa_state[i] == BNX2X_TPA_START)
4426 pci_unmap_single(bp->pdev,
4427 pci_unmap_addr(rx_buf, mapping),
4429 PCI_DMA_FROMDEVICE);
4436 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4438 int func = BP_FUNC(bp);
4439 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4440 ETH_MAX_AGGREGATION_QUEUES_E1H;
4441 u16 ring_prod, cqe_ring_prod;
4444 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4446 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4448 if (bp->flags & TPA_ENABLE_FLAG) {
4450 for_each_rx_queue(bp, j) {
4451 struct bnx2x_fastpath *fp = &bp->fp[j];
4453 for (i = 0; i < max_agg_queues; i++) {
4454 fp->tpa_pool[i].skb =
4455 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4456 if (!fp->tpa_pool[i].skb) {
4457 BNX2X_ERR("Failed to allocate TPA "
4458 "skb pool for queue[%d] - "
4459 "disabling TPA on this "
4461 bnx2x_free_tpa_pool(bp, fp, i);
4462 fp->disable_tpa = 1;
4465 pci_unmap_addr_set((struct sw_rx_bd *)
4466 &bp->fp->tpa_pool[i],
4468 fp->tpa_state[i] = BNX2X_TPA_STOP;
4473 for_each_rx_queue(bp, j) {
4474 struct bnx2x_fastpath *fp = &bp->fp[j];
4477 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4478 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4480 /* "next page" elements initialization */
4482 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4483 struct eth_rx_sge *sge;
4485 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4487 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4488 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4490 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4494 bnx2x_init_sge_ring_bit_mask(fp);
4497 for (i = 1; i <= NUM_RX_RINGS; i++) {
4498 struct eth_rx_bd *rx_bd;
4500 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4502 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4503 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4505 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4506 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4510 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4511 struct eth_rx_cqe_next_page *nextpg;
4513 nextpg = (struct eth_rx_cqe_next_page *)
4514 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4516 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4519 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4523 /* Allocate SGEs and initialize the ring elements */
4524 for (i = 0, ring_prod = 0;
4525 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4527 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4528 BNX2X_ERR("was only able to allocate "
4530 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4531 /* Cleanup already allocated elements */
4532 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4533 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4534 fp->disable_tpa = 1;
4538 ring_prod = NEXT_SGE_IDX(ring_prod);
4540 fp->rx_sge_prod = ring_prod;
4542 /* Allocate BDs and initialize BD ring */
4543 fp->rx_comp_cons = 0;
4544 cqe_ring_prod = ring_prod = 0;
4545 for (i = 0; i < bp->rx_ring_size; i++) {
4546 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4547 BNX2X_ERR("was only able to allocate "
4548 "%d rx skbs on queue[%d]\n", i, j);
4549 fp->eth_q_stats.rx_skb_alloc_failed++;
4552 ring_prod = NEXT_RX_IDX(ring_prod);
4553 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4554 WARN_ON(ring_prod <= i);
4557 fp->rx_bd_prod = ring_prod;
4558 /* must not have more available CQEs than BDs */
4559 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4561 fp->rx_pkt = fp->rx_calls = 0;
4564 * this will generate an interrupt (to the TSTORM)
4565 * must only be done after chip is initialized
4567 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4572 REG_WR(bp, BAR_USTRORM_INTMEM +
4573 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4574 U64_LO(fp->rx_comp_mapping));
4575 REG_WR(bp, BAR_USTRORM_INTMEM +
4576 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4577 U64_HI(fp->rx_comp_mapping));
4581 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4585 for_each_tx_queue(bp, j) {
4586 struct bnx2x_fastpath *fp = &bp->fp[j];
4588 for (i = 1; i <= NUM_TX_RINGS; i++) {
4589 struct eth_tx_bd *tx_bd =
4590 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4593 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4594 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4596 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4597 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4600 fp->tx_pkt_prod = 0;
4601 fp->tx_pkt_cons = 0;
4604 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4609 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4611 int func = BP_FUNC(bp);
4613 spin_lock_init(&bp->spq_lock);
4615 bp->spq_left = MAX_SPQ_PENDING;
4616 bp->spq_prod_idx = 0;
4617 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4618 bp->spq_prod_bd = bp->spq;
4619 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4621 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4622 U64_LO(bp->spq_mapping));
4624 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4625 U64_HI(bp->spq_mapping));
4627 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4631 static void bnx2x_init_context(struct bnx2x *bp)
4635 for_each_queue(bp, i) {
4636 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4637 struct bnx2x_fastpath *fp = &bp->fp[i];
4638 u8 cl_id = fp->cl_id;
4639 u8 sb_id = FP_SB_ID(fp);
4641 context->ustorm_st_context.common.sb_index_numbers =
4642 BNX2X_RX_SB_INDEX_NUM;
4643 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4644 context->ustorm_st_context.common.status_block_id = sb_id;
4645 context->ustorm_st_context.common.flags =
4646 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4647 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4648 context->ustorm_st_context.common.statistics_counter_id =
4650 context->ustorm_st_context.common.mc_alignment_log_size =
4651 BNX2X_RX_ALIGN_SHIFT;
4652 context->ustorm_st_context.common.bd_buff_size =
4654 context->ustorm_st_context.common.bd_page_base_hi =
4655 U64_HI(fp->rx_desc_mapping);
4656 context->ustorm_st_context.common.bd_page_base_lo =
4657 U64_LO(fp->rx_desc_mapping);
4658 if (!fp->disable_tpa) {
4659 context->ustorm_st_context.common.flags |=
4660 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4661 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4662 context->ustorm_st_context.common.sge_buff_size =
4663 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4665 context->ustorm_st_context.common.sge_page_base_hi =
4666 U64_HI(fp->rx_sge_mapping);
4667 context->ustorm_st_context.common.sge_page_base_lo =
4668 U64_LO(fp->rx_sge_mapping);
4671 context->ustorm_ag_context.cdu_usage =
4672 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4673 CDU_REGION_NUMBER_UCM_AG,
4674 ETH_CONNECTION_TYPE);
4676 context->xstorm_st_context.tx_bd_page_base_hi =
4677 U64_HI(fp->tx_desc_mapping);
4678 context->xstorm_st_context.tx_bd_page_base_lo =
4679 U64_LO(fp->tx_desc_mapping);
4680 context->xstorm_st_context.db_data_addr_hi =
4681 U64_HI(fp->tx_prods_mapping);
4682 context->xstorm_st_context.db_data_addr_lo =
4683 U64_LO(fp->tx_prods_mapping);
4684 context->xstorm_st_context.statistics_data = (fp->cl_id |
4685 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4686 context->cstorm_st_context.sb_index_number =
4687 C_SB_ETH_TX_CQ_INDEX;
4688 context->cstorm_st_context.status_block_id = sb_id;
4690 context->xstorm_ag_context.cdu_reserved =
4691 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4692 CDU_REGION_NUMBER_XCM_AG,
4693 ETH_CONNECTION_TYPE);
4697 static void bnx2x_init_ind_table(struct bnx2x *bp)
4699 int func = BP_FUNC(bp);
4702 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4706 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4707 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4709 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4710 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4713 static void bnx2x_set_client_config(struct bnx2x *bp)
4715 struct tstorm_eth_client_config tstorm_client = {0};
4716 int port = BP_PORT(bp);
4719 tstorm_client.mtu = bp->dev->mtu;
4720 tstorm_client.config_flags =
4721 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4722 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4724 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4725 tstorm_client.config_flags |=
4726 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4727 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4731 if (bp->flags & TPA_ENABLE_FLAG) {
4732 tstorm_client.max_sges_for_packet =
4733 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4734 tstorm_client.max_sges_for_packet =
4735 ((tstorm_client.max_sges_for_packet +
4736 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4737 PAGES_PER_SGE_SHIFT;
4739 tstorm_client.config_flags |=
4740 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4743 for_each_queue(bp, i) {
4744 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4746 REG_WR(bp, BAR_TSTRORM_INTMEM +
4747 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4748 ((u32 *)&tstorm_client)[0]);
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
4750 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4751 ((u32 *)&tstorm_client)[1]);
4754 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4755 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4758 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4760 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4761 int mode = bp->rx_mode;
4762 int mask = (1 << BP_L_ID(bp));
4763 int func = BP_FUNC(bp);
4766 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4769 case BNX2X_RX_MODE_NONE: /* no Rx */
4770 tstorm_mac_filter.ucast_drop_all = mask;
4771 tstorm_mac_filter.mcast_drop_all = mask;
4772 tstorm_mac_filter.bcast_drop_all = mask;
4774 case BNX2X_RX_MODE_NORMAL:
4775 tstorm_mac_filter.bcast_accept_all = mask;
4777 case BNX2X_RX_MODE_ALLMULTI:
4778 tstorm_mac_filter.mcast_accept_all = mask;
4779 tstorm_mac_filter.bcast_accept_all = mask;
4781 case BNX2X_RX_MODE_PROMISC:
4782 tstorm_mac_filter.ucast_accept_all = mask;
4783 tstorm_mac_filter.mcast_accept_all = mask;
4784 tstorm_mac_filter.bcast_accept_all = mask;
4787 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4791 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4792 REG_WR(bp, BAR_TSTRORM_INTMEM +
4793 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4794 ((u32 *)&tstorm_mac_filter)[i]);
4796 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4797 ((u32 *)&tstorm_mac_filter)[i]); */
4800 if (mode != BNX2X_RX_MODE_NONE)
4801 bnx2x_set_client_config(bp);
4804 static void bnx2x_init_internal_common(struct bnx2x *bp)
4808 if (bp->flags & TPA_ENABLE_FLAG) {
4809 struct tstorm_eth_tpa_exist tpa = {0};
4813 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4815 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4819 /* Zero this manually as its initialization is
4820 currently missing in the initTool */
4821 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4822 REG_WR(bp, BAR_USTRORM_INTMEM +
4823 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4826 static void bnx2x_init_internal_port(struct bnx2x *bp)
4828 int port = BP_PORT(bp);
4830 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4831 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4832 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4833 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4836 /* Calculates the sum of vn_min_rates.
4837 It's needed for further normalizing of the min_rates.
4839 sum of vn_min_rates.
4841 0 - if all the min_rates are 0.
4842 In the later case fainess algorithm should be deactivated.
4843 If not all min_rates are zero then those that are zeroes will be set to 1.
4845 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4848 int port = BP_PORT(bp);
4851 bp->vn_weight_sum = 0;
4852 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4853 int func = 2*vn + port;
4855 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4856 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4857 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4859 /* Skip hidden vns */
4860 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4863 /* If min rate is zero - set it to 1 */
4865 vn_min_rate = DEF_MIN_RATE;
4869 bp->vn_weight_sum += vn_min_rate;
4872 /* ... only if all min rates are zeros - disable fairness */
4874 bp->vn_weight_sum = 0;
4877 static void bnx2x_init_internal_func(struct bnx2x *bp)
4879 struct tstorm_eth_function_common_config tstorm_config = {0};
4880 struct stats_indication_flags stats_flags = {0};
4881 int port = BP_PORT(bp);
4882 int func = BP_FUNC(bp);
4888 tstorm_config.config_flags = MULTI_FLAGS(bp);
4889 tstorm_config.rss_result_mask = MULTI_MASK;
4892 tstorm_config.config_flags |=
4893 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4895 tstorm_config.leading_client_id = BP_L_ID(bp);
4897 REG_WR(bp, BAR_TSTRORM_INTMEM +
4898 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4899 (*(u32 *)&tstorm_config));
4901 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4902 bnx2x_set_storm_rx_mode(bp);
4904 for_each_queue(bp, i) {
4905 u8 cl_id = bp->fp[i].cl_id;
4907 /* reset xstorm per client statistics */
4908 offset = BAR_XSTRORM_INTMEM +
4909 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4911 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4912 REG_WR(bp, offset + j*4, 0);
4914 /* reset tstorm per client statistics */
4915 offset = BAR_TSTRORM_INTMEM +
4916 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4918 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4919 REG_WR(bp, offset + j*4, 0);
4921 /* reset ustorm per client statistics */
4922 offset = BAR_USTRORM_INTMEM +
4923 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4925 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4926 REG_WR(bp, offset + j*4, 0);
4929 /* Init statistics related context */
4930 stats_flags.collect_eth = 1;
4932 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4933 ((u32 *)&stats_flags)[0]);
4934 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4935 ((u32 *)&stats_flags)[1]);
4937 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4938 ((u32 *)&stats_flags)[0]);
4939 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4940 ((u32 *)&stats_flags)[1]);
4942 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4943 ((u32 *)&stats_flags)[0]);
4944 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4945 ((u32 *)&stats_flags)[1]);
4947 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4948 ((u32 *)&stats_flags)[0]);
4949 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4950 ((u32 *)&stats_flags)[1]);
4952 REG_WR(bp, BAR_XSTRORM_INTMEM +
4953 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4954 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4955 REG_WR(bp, BAR_XSTRORM_INTMEM +
4956 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4957 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4959 REG_WR(bp, BAR_TSTRORM_INTMEM +
4960 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4961 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4962 REG_WR(bp, BAR_TSTRORM_INTMEM +
4963 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4964 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4966 REG_WR(bp, BAR_USTRORM_INTMEM +
4967 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4968 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4969 REG_WR(bp, BAR_USTRORM_INTMEM +
4970 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4971 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4973 if (CHIP_IS_E1H(bp)) {
4974 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4976 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4978 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4980 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4983 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4987 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4989 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4990 SGE_PAGE_SIZE * PAGES_PER_SGE),
4992 for_each_rx_queue(bp, i) {
4993 struct bnx2x_fastpath *fp = &bp->fp[i];
4995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4997 U64_LO(fp->rx_comp_mapping));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
5000 U64_HI(fp->rx_comp_mapping));
5002 REG_WR16(bp, BAR_USTRORM_INTMEM +
5003 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5007 /* dropless flow control */
5008 if (CHIP_IS_E1H(bp)) {
5009 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5011 rx_pause.bd_thr_low = 250;
5012 rx_pause.cqe_thr_low = 250;
5014 rx_pause.sge_thr_low = 0;
5015 rx_pause.bd_thr_high = 350;
5016 rx_pause.cqe_thr_high = 350;
5017 rx_pause.sge_thr_high = 0;
5019 for_each_rx_queue(bp, i) {
5020 struct bnx2x_fastpath *fp = &bp->fp[i];
5022 if (!fp->disable_tpa) {
5023 rx_pause.sge_thr_low = 150;
5024 rx_pause.sge_thr_high = 250;
5028 offset = BAR_USTRORM_INTMEM +
5029 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5032 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5034 REG_WR(bp, offset + j*4,
5035 ((u32 *)&rx_pause)[j]);
5039 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5041 /* Init rate shaping and fairness contexts */
5045 /* During init there is no active link
5046 Until link is up, set link rate to 10Gbps */
5047 bp->link_vars.line_speed = SPEED_10000;
5048 bnx2x_init_port_minmax(bp);
5050 bnx2x_calc_vn_weight_sum(bp);
5052 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5053 bnx2x_init_vn_minmax(bp, 2*vn + port);
5055 /* Enable rate shaping and fairness */
5056 bp->cmng.flags.cmng_enables =
5057 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5058 if (bp->vn_weight_sum)
5059 bp->cmng.flags.cmng_enables |=
5060 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5062 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5063 " fairness will be disabled\n");
5065 /* rate shaping and fairness are disabled */
5067 "single function mode minmax will be disabled\n");
5071 /* Store it to internal memory */
5073 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5074 REG_WR(bp, BAR_XSTRORM_INTMEM +
5075 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5076 ((u32 *)(&bp->cmng))[i]);
5079 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5081 switch (load_code) {
5082 case FW_MSG_CODE_DRV_LOAD_COMMON:
5083 bnx2x_init_internal_common(bp);
5086 case FW_MSG_CODE_DRV_LOAD_PORT:
5087 bnx2x_init_internal_port(bp);
5090 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5091 bnx2x_init_internal_func(bp);
5095 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5100 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5104 for_each_queue(bp, i) {
5105 struct bnx2x_fastpath *fp = &bp->fp[i];
5108 fp->state = BNX2X_FP_STATE_CLOSED;
5110 fp->cl_id = BP_L_ID(bp) + i;
5111 fp->sb_id = fp->cl_id;
5113 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5114 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5115 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5117 bnx2x_update_fpsb_idx(fp);
5120 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5122 bnx2x_update_dsb_idx(bp);
5123 bnx2x_update_coalesce(bp);
5124 bnx2x_init_rx_rings(bp);
5125 bnx2x_init_tx_ring(bp);
5126 bnx2x_init_sp_ring(bp);
5127 bnx2x_init_context(bp);
5128 bnx2x_init_internal(bp, load_code);
5129 bnx2x_init_ind_table(bp);
5130 bnx2x_stats_init(bp);
5132 /* At this point, we are ready for interrupts */
5133 atomic_set(&bp->intr_sem, 0);
5135 /* flush all before enabling interrupts */
5139 bnx2x_int_enable(bp);
5142 /* end of nic init */
5145 * gzip service functions
5148 static int bnx2x_gunzip_init(struct bnx2x *bp)
5150 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5151 &bp->gunzip_mapping);
5152 if (bp->gunzip_buf == NULL)
5155 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5156 if (bp->strm == NULL)
5159 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5161 if (bp->strm->workspace == NULL)
5171 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5172 bp->gunzip_mapping);
5173 bp->gunzip_buf = NULL;
5176 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5177 " un-compression\n", bp->dev->name);
5181 static void bnx2x_gunzip_end(struct bnx2x *bp)
5183 kfree(bp->strm->workspace);
5188 if (bp->gunzip_buf) {
5189 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5190 bp->gunzip_mapping);
5191 bp->gunzip_buf = NULL;
5195 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5199 /* check gzip header */
5200 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5207 if (zbuf[3] & FNAME)
5208 while ((zbuf[n++] != 0) && (n < len));
5210 bp->strm->next_in = zbuf + n;
5211 bp->strm->avail_in = len - n;
5212 bp->strm->next_out = bp->gunzip_buf;
5213 bp->strm->avail_out = FW_BUF_SIZE;
5215 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5219 rc = zlib_inflate(bp->strm, Z_FINISH);
5220 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5221 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5222 bp->dev->name, bp->strm->msg);
5224 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5225 if (bp->gunzip_outlen & 0x3)
5226 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5227 " gunzip_outlen (%d) not aligned\n",
5228 bp->dev->name, bp->gunzip_outlen);
5229 bp->gunzip_outlen >>= 2;
5231 zlib_inflateEnd(bp->strm);
5233 if (rc == Z_STREAM_END)
5239 /* nic load/unload */
5242 * General service functions
5245 /* send a NIG loopback debug packet */
5246 static void bnx2x_lb_pckt(struct bnx2x *bp)
5250 /* Ethernet source and destination addresses */
5251 wb_write[0] = 0x55555555;
5252 wb_write[1] = 0x55555555;
5253 wb_write[2] = 0x20; /* SOP */
5254 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5256 /* NON-IP protocol */
5257 wb_write[0] = 0x09000000;
5258 wb_write[1] = 0x55555555;
5259 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5260 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5263 /* some of the internal memories
5264 * are not directly readable from the driver
5265 * to test them we send debug packets
5267 static int bnx2x_int_mem_test(struct bnx2x *bp)
5273 if (CHIP_REV_IS_FPGA(bp))
5275 else if (CHIP_REV_IS_EMUL(bp))
5280 DP(NETIF_MSG_HW, "start part1\n");
5282 /* Disable inputs of parser neighbor blocks */
5283 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5284 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5285 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5286 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5288 /* Write 0 to parser credits for CFC search request */
5289 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5291 /* send Ethernet packet */
5294 /* TODO do i reset NIG statistic? */
5295 /* Wait until NIG register shows 1 packet of size 0x10 */
5296 count = 1000 * factor;
5299 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5300 val = *bnx2x_sp(bp, wb_data[0]);
5308 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5312 /* Wait until PRS register shows 1 packet */
5313 count = 1000 * factor;
5315 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5323 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5327 /* Reset and init BRB, PRS */
5328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5330 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5332 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5333 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5335 DP(NETIF_MSG_HW, "part2\n");
5337 /* Disable inputs of parser neighbor blocks */
5338 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5339 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5340 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5341 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5343 /* Write 0 to parser credits for CFC search request */
5344 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5346 /* send 10 Ethernet packets */
5347 for (i = 0; i < 10; i++)
5350 /* Wait until NIG register shows 10 + 1
5351 packets of size 11*0x10 = 0xb0 */
5352 count = 1000 * factor;
5355 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5356 val = *bnx2x_sp(bp, wb_data[0]);
5364 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5368 /* Wait until PRS register shows 2 packets */
5369 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5371 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373 /* Write 1 to parser credits for CFC search request */
5374 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5376 /* Wait until PRS register shows 3 packets */
5377 msleep(10 * factor);
5378 /* Wait until NIG register shows 1 packet of size 0x10 */
5379 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5381 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5383 /* clear NIG EOP FIFO */
5384 for (i = 0; i < 11; i++)
5385 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5386 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5388 BNX2X_ERR("clear of NIG failed\n");
5392 /* Reset and init BRB, PRS, NIG */
5393 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5395 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5397 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5398 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5401 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5404 /* Enable inputs of parser neighbor blocks */
5405 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5406 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5407 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5408 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5410 DP(NETIF_MSG_HW, "done\n");
5415 static void enable_blocks_attention(struct bnx2x *bp)
5417 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5418 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5419 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5420 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5421 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5422 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5423 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5424 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5425 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5426 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5427 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5428 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5429 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5430 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5431 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5432 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5433 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5434 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5435 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5436 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5437 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5438 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5439 if (CHIP_REV_IS_FPGA(bp))
5440 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5442 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5443 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5444 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5445 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5446 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5447 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5448 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5449 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5450 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5451 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5455 static void bnx2x_reset_common(struct bnx2x *bp)
5458 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5460 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5463 static int bnx2x_init_common(struct bnx2x *bp)
5467 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5469 bnx2x_reset_common(bp);
5470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5471 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5473 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5474 if (CHIP_IS_E1H(bp))
5475 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5477 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5479 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5481 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5482 if (CHIP_IS_E1(bp)) {
5483 /* enable HW interrupt from PXP on USDM overflow
5484 bit 16 on INT_MASK_0 */
5485 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5488 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5492 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5493 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5494 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5495 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5496 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5497 /* make sure this value is 0 */
5498 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5500 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5501 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5502 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5503 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5504 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5507 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5509 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5510 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5511 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5514 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5515 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5517 /* let the HW do it's magic ... */
5519 /* finish PXP init */
5520 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5522 BNX2X_ERR("PXP2 CFG failed\n");
5525 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5527 BNX2X_ERR("PXP2 RD_INIT failed\n");
5531 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5532 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5534 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5536 /* clean the DMAE memory */
5538 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5540 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5541 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5542 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5543 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5545 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5546 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5547 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5548 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5550 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5551 /* soft reset pulse */
5552 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5553 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5556 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5559 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5560 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5561 if (!CHIP_REV_IS_SLOW(bp)) {
5562 /* enable hw interrupt from doorbell Q */
5563 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5566 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5567 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5568 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5570 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5571 if (CHIP_IS_E1H(bp))
5572 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5574 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5575 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5576 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5577 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5579 if (CHIP_IS_E1H(bp)) {
5580 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5581 STORM_INTMEM_SIZE_E1H/2);
5583 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5584 0, STORM_INTMEM_SIZE_E1H/2);
5585 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5586 STORM_INTMEM_SIZE_E1H/2);
5588 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5589 0, STORM_INTMEM_SIZE_E1H/2);
5590 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5591 STORM_INTMEM_SIZE_E1H/2);
5593 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5594 0, STORM_INTMEM_SIZE_E1H/2);
5595 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1H/2);
5598 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5599 0, STORM_INTMEM_SIZE_E1H/2);
5601 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1);
5603 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5604 STORM_INTMEM_SIZE_E1);
5605 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5606 STORM_INTMEM_SIZE_E1);
5607 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5608 STORM_INTMEM_SIZE_E1);
5611 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5612 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5613 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5614 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5617 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5619 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5622 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5623 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5624 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5626 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5627 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5628 REG_WR(bp, i, 0xc0cac01a);
5629 /* TODO: replace with something meaningful */
5631 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5632 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5634 if (sizeof(union cdu_context) != 1024)
5635 /* we currently assume that a context is 1024 bytes */
5636 printk(KERN_ALERT PFX "please adjust the size of"
5637 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5639 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5640 val = (4 << 24) + (0 << 12) + 1024;
5641 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5642 if (CHIP_IS_E1(bp)) {
5643 /* !!! fix pxp client crdit until excel update */
5644 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5645 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5648 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5649 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5650 /* enable context validation interrupt from CFC */
5651 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5653 /* set the thresholds to prevent CFC/CDU race */
5654 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5656 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5657 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5659 /* PXPCS COMMON comes here */
5660 /* Reset PCIE errors for debug */
5661 REG_WR(bp, 0x2814, 0xffffffff);
5662 REG_WR(bp, 0x3820, 0xffffffff);
5664 /* EMAC0 COMMON comes here */
5665 /* EMAC1 COMMON comes here */
5666 /* DBU COMMON comes here */
5667 /* DBG COMMON comes here */
5669 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5670 if (CHIP_IS_E1H(bp)) {
5671 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5672 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5675 if (CHIP_REV_IS_SLOW(bp))
5678 /* finish CFC init */
5679 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5681 BNX2X_ERR("CFC LL_INIT failed\n");
5684 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5686 BNX2X_ERR("CFC AC_INIT failed\n");
5689 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5691 BNX2X_ERR("CFC CAM_INIT failed\n");
5694 REG_WR(bp, CFC_REG_DEBUG0, 0);
5696 /* read NIG statistic
5697 to see if this is our first up since powerup */
5698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5699 val = *bnx2x_sp(bp, wb_data[0]);
5701 /* do internal memory self test */
5702 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5703 BNX2X_ERR("internal mem self test failed\n");
5707 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5709 /* Fan failure is indicated by SPIO 5 */
5710 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5713 /* set to active low mode */
5714 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5716 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5717 REG_WR(bp, MISC_REG_SPIO_INT, val);
5719 /* enable interrupt to signal the IGU */
5720 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721 val |= (1 << MISC_REGISTERS_SPIO_5);
5722 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5729 /* clear PXP2 attentions */
5730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5732 enable_blocks_attention(bp);
5734 if (!BP_NOMCP(bp)) {
5735 bnx2x_acquire_phy_lock(bp);
5736 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737 bnx2x_release_phy_lock(bp);
5739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5744 static int bnx2x_init_port(struct bnx2x *bp)
5746 int port = BP_PORT(bp);
5750 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5752 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5754 /* Port PXP comes here */
5755 /* Port PXP2 comes here */
5760 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5768 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5776 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5781 /* Port CMs come here */
5782 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783 (port ? XCM_PORT1_END : XCM_PORT0_END));
5785 /* Port QM comes here */
5787 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5790 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5793 /* Port DQ comes here */
5795 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798 /* no pause for emulation and FPGA */
5803 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804 else if (bp->dev->mtu > 4096) {
5805 if (bp->flags & ONE_PORT_FLAG)
5809 /* (24*1024 + val*4)/256 */
5810 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5813 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814 high = low + 56; /* 14*1024/256 */
5816 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5820 /* Port PRS comes here */
5821 /* Port TSDM comes here */
5822 /* Port CSDM comes here */
5823 /* Port USDM comes here */
5824 /* Port XSDM comes here */
5825 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828 port ? USEM_PORT1_END : USEM_PORT0_END);
5829 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5833 /* Port UPB comes here */
5834 /* Port XPB comes here */
5836 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837 port ? PBF_PORT1_END : PBF_PORT0_END);
5839 /* configure PBF to work without PAUSE mtu 9000 */
5840 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5842 /* update threshold */
5843 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5844 /* update init credit */
5845 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5848 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5850 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5853 /* tell the searcher where the T2 table is */
5854 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5856 wb_write[0] = U64_LO(bp->t2_mapping);
5857 wb_write[1] = U64_HI(bp->t2_mapping);
5858 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5863 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864 /* Port SRCH comes here */
5866 /* Port CDU comes here */
5867 /* Port CFC comes here */
5869 if (CHIP_IS_E1(bp)) {
5870 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5873 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874 port ? HC_PORT1_END : HC_PORT0_END);
5876 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5877 MISC_AEU_PORT0_START,
5878 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879 /* init aeu_mask_attn_func_0/1:
5880 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882 * bits 4-7 are used for "per vn group attention" */
5883 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5886 /* Port PXPCS comes here */
5887 /* Port EMAC0 comes here */
5888 /* Port EMAC1 comes here */
5889 /* Port DBU comes here */
5890 /* Port DBG comes here */
5891 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892 port ? NIG_PORT1_END : NIG_PORT0_END);
5894 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5896 if (CHIP_IS_E1H(bp)) {
5897 /* 0x2 disable e1hov, 0x1 enable */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899 (IS_E1HMF(bp) ? 0x1 : 0x2));
5901 /* support pause requests from USDM, TSDM and BRB */
5902 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5905 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5911 /* Port MCP comes here */
5912 /* Port DMAE comes here */
5914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5917 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5922 /* The GPIO should be swapped if the swap register is
5924 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5927 /* Select function upon port-swap configuration */
5929 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930 aeu_gpio_mask = (swap_val && swap_override) ?
5931 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5939 val = REG_RD(bp, offset);
5940 /* add GPIO3 to group */
5941 val |= aeu_gpio_mask;
5942 REG_WR(bp, offset, val);
5946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5947 /* add SPIO 5 to group 0 */
5948 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5957 bnx2x__link_reset(bp);
5962 #define ILT_PER_FUNC (768/2)
5963 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5964 /* the phys address is shifted right 12 bits and has an added
5965 1=valid bit added to the 53rd bit
5966 then since this is a wide register(TM)
5967 we split it into two 32 bit writes
5969 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5971 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5972 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5974 #define CNIC_ILT_LINES 0
5976 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5980 if (CHIP_IS_E1H(bp))
5981 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5983 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5985 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5988 static int bnx2x_init_func(struct bnx2x *bp)
5990 int port = BP_PORT(bp);
5991 int func = BP_FUNC(bp);
5995 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5997 /* set MSI reconfigure capability */
5998 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999 val = REG_RD(bp, addr);
6000 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001 REG_WR(bp, addr, val);
6003 i = FUNC_ILT_BASE(func);
6005 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006 if (CHIP_IS_E1H(bp)) {
6007 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6010 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6014 if (CHIP_IS_E1H(bp)) {
6015 for (i = 0; i < 9; i++)
6016 bnx2x_init_block(bp,
6017 cm_start[func][i], cm_end[func][i]);
6019 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6023 /* HC init per function */
6024 if (CHIP_IS_E1H(bp)) {
6025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6027 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6030 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6032 /* Reset PCIE errors for debug */
6033 REG_WR(bp, 0x2114, 0xffffffff);
6034 REG_WR(bp, 0x2120, 0xffffffff);
6039 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6043 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6044 BP_FUNC(bp), load_code);
6047 mutex_init(&bp->dmae_mutex);
6048 bnx2x_gunzip_init(bp);
6050 switch (load_code) {
6051 case FW_MSG_CODE_DRV_LOAD_COMMON:
6052 rc = bnx2x_init_common(bp);
6057 case FW_MSG_CODE_DRV_LOAD_PORT:
6059 rc = bnx2x_init_port(bp);
6064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6066 rc = bnx2x_init_func(bp);
6072 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6076 if (!BP_NOMCP(bp)) {
6077 int func = BP_FUNC(bp);
6079 bp->fw_drv_pulse_wr_seq =
6080 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6081 DRV_PULSE_SEQ_MASK);
6082 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6084 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6088 /* this needs to be done before gunzip end */
6089 bnx2x_zero_def_sb(bp);
6090 for_each_queue(bp, i)
6091 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6094 bnx2x_gunzip_end(bp);
6099 /* send the MCP a request, block until there is a reply */
6100 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6102 int func = BP_FUNC(bp);
6103 u32 seq = ++bp->fw_seq;
6106 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6108 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6109 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6112 /* let the FW do it's magic ... */
6115 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6117 /* Give the FW up to 2 second (200*10ms) */
6118 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6120 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121 cnt*delay, rc, seq);
6123 /* is this a reply to our command? */
6124 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125 rc &= FW_MSG_CODE_MASK;
6129 BNX2X_ERR("FW failed to respond!\n");
6137 static void bnx2x_free_mem(struct bnx2x *bp)
6140 #define BNX2X_PCI_FREE(x, y, size) \
6143 pci_free_consistent(bp->pdev, size, x, y); \
6149 #define BNX2X_FREE(x) \
6161 for_each_queue(bp, i) {
6164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165 bnx2x_fp(bp, i, status_blk_mapping),
6166 sizeof(struct host_status_block) +
6167 sizeof(struct eth_tx_db_data));
6170 for_each_rx_queue(bp, i) {
6172 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6173 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175 bnx2x_fp(bp, i, rx_desc_mapping),
6176 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179 bnx2x_fp(bp, i, rx_comp_mapping),
6180 sizeof(struct eth_fast_path_rx_cqe) *
6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186 bnx2x_fp(bp, i, rx_sge_mapping),
6187 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6190 for_each_tx_queue(bp, i) {
6192 /* fastpath tx rings: tx_buf tx_desc */
6193 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195 bnx2x_fp(bp, i, tx_desc_mapping),
6196 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6198 /* end of fastpath */
6200 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6201 sizeof(struct host_def_status_block));
6203 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6204 sizeof(struct bnx2x_slowpath));
6207 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6212 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6214 #undef BNX2X_PCI_FREE
6218 static int bnx2x_alloc_mem(struct bnx2x *bp)
6221 #define BNX2X_PCI_ALLOC(x, y, size) \
6223 x = pci_alloc_consistent(bp->pdev, size, y); \
6225 goto alloc_mem_err; \
6226 memset(x, 0, size); \
6229 #define BNX2X_ALLOC(x, size) \
6231 x = vmalloc(size); \
6233 goto alloc_mem_err; \
6234 memset(x, 0, size); \
6241 for_each_queue(bp, i) {
6242 bnx2x_fp(bp, i, bp) = bp;
6245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246 &bnx2x_fp(bp, i, status_blk_mapping),
6247 sizeof(struct host_status_block) +
6248 sizeof(struct eth_tx_db_data));
6251 for_each_rx_queue(bp, i) {
6253 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257 &bnx2x_fp(bp, i, rx_desc_mapping),
6258 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6260 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261 &bnx2x_fp(bp, i, rx_comp_mapping),
6262 sizeof(struct eth_fast_path_rx_cqe) *
6266 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269 &bnx2x_fp(bp, i, rx_sge_mapping),
6270 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6273 for_each_tx_queue(bp, i) {
6275 bnx2x_fp(bp, i, hw_tx_prods) =
6276 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6278 bnx2x_fp(bp, i, tx_prods_mapping) =
6279 bnx2x_fp(bp, i, status_blk_mapping) +
6280 sizeof(struct host_status_block);
6282 /* fastpath tx rings: tx_buf tx_desc */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286 &bnx2x_fp(bp, i, tx_desc_mapping),
6287 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6289 /* end of fastpath */
6291 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292 sizeof(struct host_def_status_block));
6294 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295 sizeof(struct bnx2x_slowpath));
6298 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6301 for (i = 0; i < 64*1024; i += 64) {
6302 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6306 /* allocate searcher T2 table
6307 we allocate 1/4 of alloc num for T2
6308 (which is not entered into the ILT) */
6309 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6312 for (i = 0; i < 16*1024; i += 64)
6313 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6315 /* now fixup the last line in the block to point to the next block */
6316 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6318 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6321 /* QM queues (128*MAX_CONN) */
6322 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6325 /* Slow path ring */
6326 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6334 #undef BNX2X_PCI_ALLOC
6338 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6342 for_each_tx_queue(bp, i) {
6343 struct bnx2x_fastpath *fp = &bp->fp[i];
6345 u16 bd_cons = fp->tx_bd_cons;
6346 u16 sw_prod = fp->tx_pkt_prod;
6347 u16 sw_cons = fp->tx_pkt_cons;
6349 while (sw_cons != sw_prod) {
6350 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6356 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6360 for_each_rx_queue(bp, j) {
6361 struct bnx2x_fastpath *fp = &bp->fp[j];
6363 for (i = 0; i < NUM_RX_BD; i++) {
6364 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365 struct sk_buff *skb = rx_buf->skb;
6370 pci_unmap_single(bp->pdev,
6371 pci_unmap_addr(rx_buf, mapping),
6373 PCI_DMA_FROMDEVICE);
6378 if (!fp->disable_tpa)
6379 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380 ETH_MAX_AGGREGATION_QUEUES_E1 :
6381 ETH_MAX_AGGREGATION_QUEUES_E1H);
6385 static void bnx2x_free_skbs(struct bnx2x *bp)
6387 bnx2x_free_tx_skbs(bp);
6388 bnx2x_free_rx_skbs(bp);
6391 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6395 free_irq(bp->msix_table[0].vector, bp->dev);
6396 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6397 bp->msix_table[0].vector);
6399 for_each_queue(bp, i) {
6400 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6401 "state %x\n", i, bp->msix_table[i + offset].vector,
6402 bnx2x_fp(bp, i, state));
6404 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6408 static void bnx2x_free_irq(struct bnx2x *bp)
6410 if (bp->flags & USING_MSIX_FLAG) {
6411 bnx2x_free_msix_irqs(bp);
6412 pci_disable_msix(bp->pdev);
6413 bp->flags &= ~USING_MSIX_FLAG;
6415 } else if (bp->flags & USING_MSI_FLAG) {
6416 free_irq(bp->pdev->irq, bp->dev);
6417 pci_disable_msi(bp->pdev);
6418 bp->flags &= ~USING_MSI_FLAG;
6421 free_irq(bp->pdev->irq, bp->dev);
6424 static int bnx2x_enable_msix(struct bnx2x *bp)
6426 int i, rc, offset = 1;
6429 bp->msix_table[0].entry = igu_vec;
6430 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6432 for_each_queue(bp, i) {
6433 igu_vec = BP_L_ID(bp) + offset + i;
6434 bp->msix_table[i + offset].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436 "(fastpath #%u)\n", i + offset, igu_vec, i);
6439 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6440 BNX2X_NUM_QUEUES(bp) + offset);
6442 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6446 bp->flags |= USING_MSIX_FLAG;
6451 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6453 int i, rc, offset = 1;
6455 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456 bp->dev->name, bp->dev);
6458 BNX2X_ERR("request sp irq failed\n");
6462 for_each_queue(bp, i) {
6463 struct bnx2x_fastpath *fp = &bp->fp[i];
6465 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6466 rc = request_irq(bp->msix_table[i + offset].vector,
6467 bnx2x_msix_fp_int, 0, fp->name, fp);
6469 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6470 bnx2x_free_msix_irqs(bp);
6474 fp->state = BNX2X_FP_STATE_IRQ;
6477 i = BNX2X_NUM_QUEUES(bp);
6479 printk(KERN_INFO PFX
6480 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6481 bp->dev->name, bp->msix_table[0].vector,
6482 bp->msix_table[offset].vector,
6483 bp->msix_table[offset + i - 1].vector);
6485 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset + i - 1].vector);
6492 static int bnx2x_enable_msi(struct bnx2x *bp)
6496 rc = pci_enable_msi(bp->pdev);
6498 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6501 bp->flags |= USING_MSI_FLAG;
6506 static int bnx2x_req_irq(struct bnx2x *bp)
6508 unsigned long flags;
6511 if (bp->flags & USING_MSI_FLAG)
6514 flags = IRQF_SHARED;
6516 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6517 bp->dev->name, bp->dev);
6519 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6524 static void bnx2x_napi_enable(struct bnx2x *bp)
6528 for_each_rx_queue(bp, i)
6529 napi_enable(&bnx2x_fp(bp, i, napi));
6532 static void bnx2x_napi_disable(struct bnx2x *bp)
6536 for_each_rx_queue(bp, i)
6537 napi_disable(&bnx2x_fp(bp, i, napi));
6540 static void bnx2x_netif_start(struct bnx2x *bp)
6542 if (atomic_dec_and_test(&bp->intr_sem)) {
6543 if (netif_running(bp->dev)) {
6544 bnx2x_napi_enable(bp);
6545 bnx2x_int_enable(bp);
6546 if (bp->state == BNX2X_STATE_OPEN)
6547 netif_tx_wake_all_queues(bp->dev);
6552 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6554 bnx2x_int_disable_sync(bp, disable_hw);
6555 bnx2x_napi_disable(bp);
6556 if (netif_running(bp->dev)) {
6557 netif_tx_disable(bp->dev);
6558 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6563 * Init service functions
6566 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6568 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6569 int port = BP_PORT(bp);
6572 * unicasts 0-31:port0 32-63:port1
6573 * multicast 64-127:port0 128-191:port1
6575 config->hdr.length = 2;
6576 config->hdr.offset = port ? 32 : 0;
6577 config->hdr.client_id = BP_CL_ID(bp);
6578 config->hdr.reserved1 = 0;
6581 config->config_table[0].cam_entry.msb_mac_addr =
6582 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583 config->config_table[0].cam_entry.middle_mac_addr =
6584 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585 config->config_table[0].cam_entry.lsb_mac_addr =
6586 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6587 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6589 config->config_table[0].target_table_entry.flags = 0;
6591 CAM_INVALIDATE(config->config_table[0]);
6592 config->config_table[0].target_table_entry.client_id = 0;
6593 config->config_table[0].target_table_entry.vlan_id = 0;
6595 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596 (set ? "setting" : "clearing"),
6597 config->config_table[0].cam_entry.msb_mac_addr,
6598 config->config_table[0].cam_entry.middle_mac_addr,
6599 config->config_table[0].cam_entry.lsb_mac_addr);
6602 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6605 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6607 config->config_table[1].target_table_entry.flags =
6608 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6610 CAM_INVALIDATE(config->config_table[1]);
6611 config->config_table[1].target_table_entry.client_id = 0;
6612 config->config_table[1].target_table_entry.vlan_id = 0;
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6619 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6621 struct mac_configuration_cmd_e1h *config =
6622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6624 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6625 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6629 /* CAM allocation for E1H
6630 * unicasts: by func number
6631 * multicast: 20+FUNC*20, 20 each
6633 config->hdr.length = 1;
6634 config->hdr.offset = BP_FUNC(bp);
6635 config->hdr.client_id = BP_CL_ID(bp);
6636 config->hdr.reserved1 = 0;
6639 config->config_table[0].msb_mac_addr =
6640 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641 config->config_table[0].middle_mac_addr =
6642 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643 config->config_table[0].lsb_mac_addr =
6644 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645 config->config_table[0].client_id = BP_L_ID(bp);
6646 config->config_table[0].vlan_id = 0;
6647 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6649 config->config_table[0].flags = BP_PORT(bp);
6651 config->config_table[0].flags =
6652 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6654 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6655 (set ? "setting" : "clearing"),
6656 config->config_table[0].msb_mac_addr,
6657 config->config_table[0].middle_mac_addr,
6658 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6665 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666 int *state_p, int poll)
6668 /* can take a while if any port is running */
6671 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672 poll ? "polling" : "waiting", state, idx);
6677 bnx2x_rx_int(bp->fp, 10);
6678 /* if index is different from 0
6679 * the reply for some commands will
6680 * be on the non default queue
6683 bnx2x_rx_int(&bp->fp[idx], 10);
6686 mb(); /* state is changed by bnx2x_sp_event() */
6687 if (*state_p == state)
6694 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695 poll ? "polling" : "waiting", state, idx);
6696 #ifdef BNX2X_STOP_ON_ERROR
6703 static int bnx2x_setup_leading(struct bnx2x *bp)
6707 /* reset IGU state */
6708 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6713 /* Wait for completion */
6714 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6719 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6721 struct bnx2x_fastpath *fp = &bp->fp[index];
6723 /* reset IGU state */
6724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6727 fp->state = BNX2X_FP_STATE_OPENING;
6728 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6731 /* Wait for completion */
6732 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6736 static int bnx2x_poll(struct napi_struct *napi, int budget);
6738 static void bnx2x_set_int_mode(struct bnx2x *bp)
6746 bp->num_rx_queues = num_queues;
6747 bp->num_tx_queues = num_queues;
6749 "set number of queues to %d\n", num_queues);
6754 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755 num_queues = min_t(u32, num_online_cpus(),
6756 BNX2X_MAX_QUEUES(bp));
6759 bp->num_rx_queues = num_queues;
6760 bp->num_tx_queues = num_queues;
6761 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762 " number of tx queues to %d\n",
6763 bp->num_rx_queues, bp->num_tx_queues);
6764 /* if we can't use MSI-X we only need one fp,
6765 * so try to enable MSI-X with the requested number of fp's
6766 * and fallback to MSI or legacy INTx with one fp
6768 if (bnx2x_enable_msix(bp)) {
6769 /* failed to enable MSI-X */
6771 bp->num_rx_queues = num_queues;
6772 bp->num_tx_queues = num_queues;
6774 BNX2X_ERR("Multi requested but failed to "
6775 "enable MSI-X set number of "
6776 "queues to %d\n", num_queues);
6780 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6783 static void bnx2x_set_rx_mode(struct net_device *dev);
6785 /* must be called with rtnl_lock */
6786 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6790 #ifdef BNX2X_STOP_ON_ERROR
6791 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6792 if (unlikely(bp->panic))
6796 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6798 bnx2x_set_int_mode(bp);
6800 if (bnx2x_alloc_mem(bp))
6803 for_each_rx_queue(bp, i)
6804 bnx2x_fp(bp, i, disable_tpa) =
6805 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6807 for_each_rx_queue(bp, i)
6808 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6811 #ifdef BNX2X_STOP_ON_ERROR
6812 for_each_rx_queue(bp, i) {
6813 struct bnx2x_fastpath *fp = &bp->fp[i];
6815 fp->poll_no_work = 0;
6817 fp->poll_max_calls = 0;
6818 fp->poll_complete = 0;
6822 bnx2x_napi_enable(bp);
6824 if (bp->flags & USING_MSIX_FLAG) {
6825 rc = bnx2x_req_msix_irqs(bp);
6827 pci_disable_msix(bp->pdev);
6831 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832 bnx2x_enable_msi(bp);
6834 rc = bnx2x_req_irq(bp);
6836 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6837 if (bp->flags & USING_MSI_FLAG)
6838 pci_disable_msi(bp->pdev);
6841 if (bp->flags & USING_MSI_FLAG) {
6842 bp->dev->irq = bp->pdev->irq;
6843 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6844 bp->dev->name, bp->pdev->irq);
6848 /* Send LOAD_REQUEST command to MCP
6849 Returns the type of LOAD command:
6850 if it is the first port to be initialized
6851 common blocks should be initialized, otherwise - not
6853 if (!BP_NOMCP(bp)) {
6854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6856 BNX2X_ERR("MCP response failure, aborting\n");
6860 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861 rc = -EBUSY; /* other port in diagnostic mode */
6866 int port = BP_PORT(bp);
6868 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869 load_count[0], load_count[1], load_count[2]);
6871 load_count[1 + port]++;
6872 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6873 load_count[0], load_count[1], load_count[2]);
6874 if (load_count[0] == 1)
6875 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876 else if (load_count[1 + port] == 1)
6877 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6879 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6882 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6887 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6890 rc = bnx2x_init_hw(bp, load_code);
6892 BNX2X_ERR("HW init failed, aborting\n");
6896 /* Setup NIC internals and enable interrupts */
6897 bnx2x_nic_init(bp, load_code);
6899 /* Send LOAD_DONE command to MCP */
6900 if (!BP_NOMCP(bp)) {
6901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6903 BNX2X_ERR("MCP response failure, aborting\n");
6909 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6911 rc = bnx2x_setup_leading(bp);
6913 BNX2X_ERR("Setup leading failed!\n");
6917 if (CHIP_IS_E1H(bp))
6918 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919 BNX2X_ERR("!!! mf_cfg function disabled\n");
6920 bp->state = BNX2X_STATE_DISABLED;
6923 if (bp->state == BNX2X_STATE_OPEN)
6924 for_each_nondefault_queue(bp, i) {
6925 rc = bnx2x_setup_multi(bp, i);
6931 bnx2x_set_mac_addr_e1(bp, 1);
6933 bnx2x_set_mac_addr_e1h(bp, 1);
6936 bnx2x_initial_phy_init(bp);
6938 /* Start fast path */
6939 switch (load_mode) {
6941 /* Tx queue should be only reenabled */
6942 netif_tx_wake_all_queues(bp->dev);
6943 /* Initialize the receive filter. */
6944 bnx2x_set_rx_mode(bp->dev);
6948 netif_tx_start_all_queues(bp->dev);
6949 /* Initialize the receive filter. */
6950 bnx2x_set_rx_mode(bp->dev);
6954 /* Initialize the receive filter. */
6955 bnx2x_set_rx_mode(bp->dev);
6956 bp->state = BNX2X_STATE_DIAG;
6964 bnx2x__link_status_update(bp);
6966 /* start the timer */
6967 mod_timer(&bp->timer, jiffies + bp->current_interval);
6973 bnx2x_int_disable_sync(bp, 1);
6974 if (!BP_NOMCP(bp)) {
6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6979 /* Free SKBs, SGEs, TPA pool and driver internals */
6980 bnx2x_free_skbs(bp);
6981 for_each_rx_queue(bp, i)
6982 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6987 bnx2x_napi_disable(bp);
6988 for_each_rx_queue(bp, i)
6989 netif_napi_del(&bnx2x_fp(bp, i, napi));
6992 /* TBD we really need to reset the chip
6993 if we want to recover from this */
6997 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6999 struct bnx2x_fastpath *fp = &bp->fp[index];
7002 /* halt the connection */
7003 fp->state = BNX2X_FP_STATE_HALTING;
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7006 /* Wait for completion */
7007 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7009 if (rc) /* timeout */
7012 /* delete cfc entry */
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7021 static int bnx2x_stop_leading(struct bnx2x *bp)
7023 u16 dsb_sp_prod_idx;
7024 /* if the other port is handling traffic,
7025 this can take a lot of time */
7031 /* Send HALT ramrod */
7032 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037 &(bp->fp[0].state), 1);
7038 if (rc) /* timeout */
7041 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7043 /* Send PORT_DELETE ramrod */
7044 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7046 /* Wait for completion to arrive on default status block
7047 we are going to reset the chip anyway
7048 so there is not much to do if this times out
7050 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7052 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055 #ifdef BNX2X_STOP_ON_ERROR
7064 rmb(); /* Refresh the dsb_sp_prod */
7066 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7072 static void bnx2x_reset_func(struct bnx2x *bp)
7074 int port = BP_PORT(bp);
7075 int func = BP_FUNC(bp);
7079 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7083 base = FUNC_ILT_BASE(func);
7084 for (i = base; i < base + ILT_PER_FUNC; i++)
7085 bnx2x_ilt_wr(bp, i, 0);
7088 static void bnx2x_reset_port(struct bnx2x *bp)
7090 int port = BP_PORT(bp);
7093 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7095 /* Do not rcv packets to BRB */
7096 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097 /* Do not direct rcv packets that are not for MCP to the BRB */
7098 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7102 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7105 /* Check for BRB port occupancy */
7106 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7108 DP(NETIF_MSG_IFDOWN,
7109 "BRB1 is not empty %d blocks are occupied\n", val);
7111 /* TODO: Close Doorbell port? */
7114 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7116 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7117 BP_FUNC(bp), reset_code);
7119 switch (reset_code) {
7120 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121 bnx2x_reset_port(bp);
7122 bnx2x_reset_func(bp);
7123 bnx2x_reset_common(bp);
7126 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127 bnx2x_reset_port(bp);
7128 bnx2x_reset_func(bp);
7131 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132 bnx2x_reset_func(bp);
7136 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7141 /* must be called with rtnl_lock */
7142 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7144 int port = BP_PORT(bp);
7148 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7150 bp->rx_mode = BNX2X_RX_MODE_NONE;
7151 bnx2x_set_storm_rx_mode(bp);
7153 bnx2x_netif_stop(bp, 1);
7155 del_timer_sync(&bp->timer);
7156 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7163 /* Wait until tx fastpath tasks complete */
7164 for_each_tx_queue(bp, i) {
7165 struct bnx2x_fastpath *fp = &bp->fp[i];
7169 while (bnx2x_has_tx_work_unload(fp)) {
7171 bnx2x_tx_int(fp, 1000);
7173 BNX2X_ERR("timeout waiting for queue[%d]\n",
7175 #ifdef BNX2X_STOP_ON_ERROR
7187 /* Give HW time to discard old tx messages */
7190 if (CHIP_IS_E1(bp)) {
7191 struct mac_configuration_cmd *config =
7192 bnx2x_sp(bp, mcast_config);
7194 bnx2x_set_mac_addr_e1(bp, 0);
7196 for (i = 0; i < config->hdr.length; i++)
7197 CAM_INVALIDATE(config->config_table[i]);
7199 config->hdr.length = i;
7200 if (CHIP_REV_IS_SLOW(bp))
7201 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7203 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204 config->hdr.client_id = BP_CL_ID(bp);
7205 config->hdr.reserved1 = 0;
7207 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7212 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7214 bnx2x_set_mac_addr_e1h(bp, 0);
7216 for (i = 0; i < MC_HASH_SIZE; i++)
7217 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7220 if (unload_mode == UNLOAD_NORMAL)
7221 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7223 else if (bp->flags & NO_WOL_FLAG) {
7224 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225 if (CHIP_IS_E1H(bp))
7226 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7228 } else if (bp->wol) {
7229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230 u8 *mac_addr = bp->dev->dev_addr;
7232 /* The mac address is written to entries 1-4 to
7233 preserve entry 0 which is used by the PMF */
7234 u8 entry = (BP_E1HVN(bp) + 1)*8;
7236 val = (mac_addr[0] << 8) | mac_addr[1];
7237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240 (mac_addr[4] << 8) | mac_addr[5];
7241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7248 /* Close multi and leading connections
7249 Completions for ramrods are collected in a synchronous way */
7250 for_each_nondefault_queue(bp, i)
7251 if (bnx2x_stop_multi(bp, i))
7254 rc = bnx2x_stop_leading(bp);
7256 BNX2X_ERR("Stop leading failed!\n");
7257 #ifdef BNX2X_STOP_ON_ERROR
7266 reset_code = bnx2x_fw_command(bp, reset_code);
7268 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7269 load_count[0], load_count[1], load_count[2]);
7271 load_count[1 + port]--;
7272 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7273 load_count[0], load_count[1], load_count[2]);
7274 if (load_count[0] == 0)
7275 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7276 else if (load_count[1 + port] == 0)
7277 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7279 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7282 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284 bnx2x__link_reset(bp);
7286 /* Reset the chip */
7287 bnx2x_reset_chip(bp, reset_code);
7289 /* Report UNLOAD_DONE to MCP */
7291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7294 /* Free SKBs, SGEs, TPA pool and driver internals */
7295 bnx2x_free_skbs(bp);
7296 for_each_rx_queue(bp, i)
7297 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7298 for_each_rx_queue(bp, i)
7299 netif_napi_del(&bnx2x_fp(bp, i, napi));
7302 bp->state = BNX2X_STATE_CLOSED;
7304 netif_carrier_off(bp->dev);
7309 static void bnx2x_reset_task(struct work_struct *work)
7311 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7313 #ifdef BNX2X_STOP_ON_ERROR
7314 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315 " so reset not done to allow debug dump,\n"
7316 KERN_ERR " you will need to reboot when done\n");
7322 if (!netif_running(bp->dev))
7323 goto reset_task_exit;
7325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326 bnx2x_nic_load(bp, LOAD_NORMAL);
7332 /* end of nic load/unload */
7337 * Init service functions
7340 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7343 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7352 BNX2X_ERR("Unsupported function index: %d\n", func);
7357 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7359 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7361 /* Flush all outstanding writes */
7364 /* Pretend to be function 0 */
7366 /* Flush the GRC transaction (in the chip) */
7367 new_val = REG_RD(bp, reg);
7369 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7374 /* From now we are in the "like-E1" mode */
7375 bnx2x_int_disable(bp);
7377 /* Flush all outstanding writes */
7380 /* Restore the original funtion settings */
7381 REG_WR(bp, reg, orig_func);
7382 new_val = REG_RD(bp, reg);
7383 if (new_val != orig_func) {
7384 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385 orig_func, new_val);
7390 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7392 if (CHIP_IS_E1H(bp))
7393 bnx2x_undi_int_disable_e1h(bp, func);
7395 bnx2x_int_disable(bp);
7398 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7402 /* Check if there is any driver already loaded */
7403 val = REG_RD(bp, MISC_REG_UNPREPARED);
7405 /* Check if it is the UNDI driver
7406 * UNDI driver initializes CID offset for normal bell to 0x7
7408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7409 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7411 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7413 int func = BP_FUNC(bp);
7417 /* clear the UNDI indication */
7418 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7420 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7422 /* try unload UNDI on port 0 */
7425 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426 DRV_MSG_SEQ_NUMBER_MASK);
7427 reset_code = bnx2x_fw_command(bp, reset_code);
7429 /* if UNDI is loaded on the other port */
7430 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7432 /* send "DONE" for previous unload */
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435 /* unload UNDI on port 1 */
7438 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439 DRV_MSG_SEQ_NUMBER_MASK);
7440 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7442 bnx2x_fw_command(bp, reset_code);
7445 /* now it's safe to release the lock */
7446 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7448 bnx2x_undi_int_disable(bp, func);
7450 /* close input traffic and wait for it */
7451 /* Do not rcv packets to BRB */
7453 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455 /* Do not direct rcv packets that are not for MCP to
7458 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7462 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7466 /* save NIG port swap info */
7467 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7471 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7474 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7476 /* take the NIG out of reset and restore swap values */
7478 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7483 /* send unload done to the MCP */
7484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7486 /* restore our func and fw_seq */
7489 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490 DRV_MSG_SEQ_NUMBER_MASK);
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7497 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7499 u32 val, val2, val3, val4, id;
7502 /* Get the chip revision id and number. */
7503 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505 id = ((val & 0xffff) << 16);
7506 val = REG_RD(bp, MISC_REG_CHIP_REV);
7507 id |= ((val & 0xf) << 12);
7508 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509 id |= ((val & 0xff) << 4);
7510 val = REG_RD(bp, MISC_REG_BOND_ID);
7512 bp->common.chip_id = id;
7513 bp->link_params.chip_id = bp->common.chip_id;
7514 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7516 val = (REG_RD(bp, 0x2874) & 0x55);
7517 if ((bp->common.chip_id & 0x1) ||
7518 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519 bp->flags |= ONE_PORT_FLAG;
7520 BNX2X_DEV_INFO("single port device\n");
7523 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527 bp->common.flash_size, bp->common.flash_size);
7529 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530 bp->link_params.shmem_base = bp->common.shmem_base;
7531 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7533 if (!bp->common.shmem_base ||
7534 (bp->common.shmem_base < 0xA0000) ||
7535 (bp->common.shmem_base >= 0xC0000)) {
7536 BNX2X_DEV_INFO("MCP not active\n");
7537 bp->flags |= NO_MCP_FLAG;
7541 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544 BNX2X_ERR("BAD MCP validity signature\n");
7546 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7547 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7549 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550 SHARED_HW_CFG_LED_MODE_MASK) >>
7551 SHARED_HW_CFG_LED_MODE_SHIFT);
7553 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7554 bp->common.bc_ver = val;
7555 BNX2X_DEV_INFO("bc_ver %X\n", val);
7556 if (val < BNX2X_BC_VER) {
7557 /* for now only warn
7558 * later we might need to enforce this */
7559 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7560 " please upgrade BC\n", BNX2X_BC_VER, val);
7563 if (BP_E1HVN(bp) == 0) {
7564 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7565 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7567 /* no WOL capability for E1HVN != 0 */
7568 bp->flags |= NO_WOL_FLAG;
7570 BNX2X_DEV_INFO("%sWoL capable\n",
7571 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7573 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7574 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7575 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7576 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7578 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7579 val, val2, val3, val4);
7582 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7585 int port = BP_PORT(bp);
7588 switch (switch_cfg) {
7590 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7593 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7594 switch (ext_phy_type) {
7595 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7596 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7599 bp->port.supported |= (SUPPORTED_10baseT_Half |
7600 SUPPORTED_10baseT_Full |
7601 SUPPORTED_100baseT_Half |
7602 SUPPORTED_100baseT_Full |
7603 SUPPORTED_1000baseT_Full |
7604 SUPPORTED_2500baseX_Full |
7609 SUPPORTED_Asym_Pause);
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7625 SUPPORTED_Asym_Pause);
7629 BNX2X_ERR("NVRAM config error. "
7630 "BAD SerDes ext_phy_config 0x%x\n",
7631 bp->link_params.ext_phy_config);
7635 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7637 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7640 case SWITCH_CFG_10G:
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7644 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7645 switch (ext_phy_type) {
7646 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7650 bp->port.supported |= (SUPPORTED_10baseT_Half |
7651 SUPPORTED_10baseT_Full |
7652 SUPPORTED_100baseT_Half |
7653 SUPPORTED_100baseT_Full |
7654 SUPPORTED_1000baseT_Full |
7655 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_10000baseT_Full |
7661 SUPPORTED_Asym_Pause);
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7668 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7669 SUPPORTED_1000baseT_Full |
7673 SUPPORTED_Asym_Pause);
7676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7677 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7680 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7681 SUPPORTED_2500baseX_Full |
7682 SUPPORTED_1000baseT_Full |
7686 SUPPORTED_Asym_Pause);
7689 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7690 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7693 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7696 SUPPORTED_Asym_Pause);
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7703 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7707 SUPPORTED_Asym_Pause);
7710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7711 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7714 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7715 SUPPORTED_1000baseT_Full |
7719 SUPPORTED_Asym_Pause);
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7730 SUPPORTED_Asym_Pause);
7733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7734 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7735 bp->link_params.ext_phy_config);
7739 BNX2X_ERR("NVRAM config error. "
7740 "BAD XGXS ext_phy_config 0x%x\n",
7741 bp->link_params.ext_phy_config);
7745 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7747 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7752 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7753 bp->port.link_config);
7756 bp->link_params.phy_addr = bp->port.phy_addr;
7758 /* mask what we support according to speed_cap_mask */
7759 if (!(bp->link_params.speed_cap_mask &
7760 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7761 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7763 if (!(bp->link_params.speed_cap_mask &
7764 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7765 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7767 if (!(bp->link_params.speed_cap_mask &
7768 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7769 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7771 if (!(bp->link_params.speed_cap_mask &
7772 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7773 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7775 if (!(bp->link_params.speed_cap_mask &
7776 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7777 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7778 SUPPORTED_1000baseT_Full);
7780 if (!(bp->link_params.speed_cap_mask &
7781 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7782 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7784 if (!(bp->link_params.speed_cap_mask &
7785 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7786 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7788 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7791 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7793 bp->link_params.req_duplex = DUPLEX_FULL;
7795 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7796 case PORT_FEATURE_LINK_SPEED_AUTO:
7797 if (bp->port.supported & SUPPORTED_Autoneg) {
7798 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7799 bp->port.advertising = bp->port.supported;
7802 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7804 if ((ext_phy_type ==
7805 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7807 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7808 /* force 10G, no AN */
7809 bp->link_params.req_line_speed = SPEED_10000;
7810 bp->port.advertising =
7811 (ADVERTISED_10000baseT_Full |
7815 BNX2X_ERR("NVRAM config error. "
7816 "Invalid link_config 0x%x"
7817 " Autoneg not supported\n",
7818 bp->port.link_config);
7823 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7824 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7825 bp->link_params.req_line_speed = SPEED_10;
7826 bp->port.advertising = (ADVERTISED_10baseT_Full |
7829 BNX2X_ERR("NVRAM config error. "
7830 "Invalid link_config 0x%x"
7831 " speed_cap_mask 0x%x\n",
7832 bp->port.link_config,
7833 bp->link_params.speed_cap_mask);
7838 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7839 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7840 bp->link_params.req_line_speed = SPEED_10;
7841 bp->link_params.req_duplex = DUPLEX_HALF;
7842 bp->port.advertising = (ADVERTISED_10baseT_Half |
7845 BNX2X_ERR("NVRAM config error. "
7846 "Invalid link_config 0x%x"
7847 " speed_cap_mask 0x%x\n",
7848 bp->port.link_config,
7849 bp->link_params.speed_cap_mask);
7854 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7855 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7856 bp->link_params.req_line_speed = SPEED_100;
7857 bp->port.advertising = (ADVERTISED_100baseT_Full |
7860 BNX2X_ERR("NVRAM config error. "
7861 "Invalid link_config 0x%x"
7862 " speed_cap_mask 0x%x\n",
7863 bp->port.link_config,
7864 bp->link_params.speed_cap_mask);
7869 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7870 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7871 bp->link_params.req_line_speed = SPEED_100;
7872 bp->link_params.req_duplex = DUPLEX_HALF;
7873 bp->port.advertising = (ADVERTISED_100baseT_Half |
7876 BNX2X_ERR("NVRAM config error. "
7877 "Invalid link_config 0x%x"
7878 " speed_cap_mask 0x%x\n",
7879 bp->port.link_config,
7880 bp->link_params.speed_cap_mask);
7885 case PORT_FEATURE_LINK_SPEED_1G:
7886 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7887 bp->link_params.req_line_speed = SPEED_1000;
7888 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7891 BNX2X_ERR("NVRAM config error. "
7892 "Invalid link_config 0x%x"
7893 " speed_cap_mask 0x%x\n",
7894 bp->port.link_config,
7895 bp->link_params.speed_cap_mask);
7900 case PORT_FEATURE_LINK_SPEED_2_5G:
7901 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7902 bp->link_params.req_line_speed = SPEED_2500;
7903 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7906 BNX2X_ERR("NVRAM config error. "
7907 "Invalid link_config 0x%x"
7908 " speed_cap_mask 0x%x\n",
7909 bp->port.link_config,
7910 bp->link_params.speed_cap_mask);
7915 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7916 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7917 case PORT_FEATURE_LINK_SPEED_10G_KR:
7918 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7919 bp->link_params.req_line_speed = SPEED_10000;
7920 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
7926 bp->port.link_config,
7927 bp->link_params.speed_cap_mask);
7933 BNX2X_ERR("NVRAM config error. "
7934 "BAD link speed link_config 0x%x\n",
7935 bp->port.link_config);
7936 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7937 bp->port.advertising = bp->port.supported;
7941 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7942 PORT_FEATURE_FLOW_CONTROL_MASK);
7943 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7944 !(bp->port.supported & SUPPORTED_Autoneg))
7945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7947 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7948 " advertising 0x%x\n",
7949 bp->link_params.req_line_speed,
7950 bp->link_params.req_duplex,
7951 bp->link_params.req_flow_ctrl, bp->port.advertising);
7954 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7956 int port = BP_PORT(bp);
7960 bp->link_params.bp = bp;
7961 bp->link_params.port = port;
7963 bp->link_params.serdes_config =
7964 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7965 bp->link_params.lane_config =
7966 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7967 bp->link_params.ext_phy_config =
7969 dev_info.port_hw_config[port].external_phy_config);
7970 bp->link_params.speed_cap_mask =
7972 dev_info.port_hw_config[port].speed_capability_mask);
7974 bp->port.link_config =
7975 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7977 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7978 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
7979 bp->link_params.feature_config_flags |=
7980 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7982 bp->link_params.feature_config_flags &=
7983 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7985 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7986 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7987 " link_config 0x%08x\n",
7988 bp->link_params.serdes_config,
7989 bp->link_params.lane_config,
7990 bp->link_params.ext_phy_config,
7991 bp->link_params.speed_cap_mask, bp->port.link_config);
7993 bp->link_params.switch_cfg = (bp->port.link_config &
7994 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7995 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7997 bnx2x_link_settings_requested(bp);
7999 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8000 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8001 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8002 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8003 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8004 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8005 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8006 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8007 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8008 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8011 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8013 int func = BP_FUNC(bp);
8017 bnx2x_get_common_hwinfo(bp);
8021 if (CHIP_IS_E1H(bp)) {
8023 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8025 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8026 FUNC_MF_CFG_E1HOV_TAG_MASK);
8027 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8031 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8033 func, bp->e1hov, bp->e1hov);
8035 BNX2X_DEV_INFO("Single function mode\n");
8037 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8038 " aborting\n", func);
8044 if (!BP_NOMCP(bp)) {
8045 bnx2x_get_port_hwinfo(bp);
8047 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8048 DRV_MSG_SEQ_NUMBER_MASK);
8049 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8053 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8054 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8055 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8056 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8057 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8058 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8059 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8060 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8061 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8062 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8063 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8065 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8073 /* only supposed to happen on emulation/FPGA */
8074 BNX2X_ERR("warning random MAC workaround active\n");
8075 random_ether_addr(bp->dev->dev_addr);
8076 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8082 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8084 int func = BP_FUNC(bp);
8088 /* Disable interrupt handling until HW is initialized */
8089 atomic_set(&bp->intr_sem, 1);
8091 mutex_init(&bp->port.phy_mutex);
8093 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8094 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8096 rc = bnx2x_get_hwinfo(bp);
8098 /* need to reset chip if undi was active */
8100 bnx2x_undi_unload(bp);
8102 if (CHIP_REV_IS_FPGA(bp))
8103 printk(KERN_ERR PFX "FPGA detected\n");
8105 if (BP_NOMCP(bp) && (func == 0))
8107 "MCP disabled, must load devices in order!\n");
8109 /* Set multi queue mode */
8110 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8111 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8113 "Multi disabled since int_mode requested is not MSI-X\n");
8114 multi_mode = ETH_RSS_MODE_DISABLED;
8116 bp->multi_mode = multi_mode;
8121 bp->flags &= ~TPA_ENABLE_FLAG;
8122 bp->dev->features &= ~NETIF_F_LRO;
8124 bp->flags |= TPA_ENABLE_FLAG;
8125 bp->dev->features |= NETIF_F_LRO;
8129 bp->tx_ring_size = MAX_TX_AVAIL;
8130 bp->rx_ring_size = MAX_RX_AVAIL;
8137 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8138 bp->current_interval = (poll ? poll : timer_interval);
8140 init_timer(&bp->timer);
8141 bp->timer.expires = jiffies + bp->current_interval;
8142 bp->timer.data = (unsigned long) bp;
8143 bp->timer.function = bnx2x_timer;
8149 * ethtool service functions
8152 /* All ethtool functions called with rtnl_lock */
8154 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8156 struct bnx2x *bp = netdev_priv(dev);
8158 cmd->supported = bp->port.supported;
8159 cmd->advertising = bp->port.advertising;
8161 if (netif_carrier_ok(dev)) {
8162 cmd->speed = bp->link_vars.line_speed;
8163 cmd->duplex = bp->link_vars.duplex;
8165 cmd->speed = bp->link_params.req_line_speed;
8166 cmd->duplex = bp->link_params.req_duplex;
8171 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8172 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8173 if (vn_max_rate < cmd->speed)
8174 cmd->speed = vn_max_rate;
8177 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8179 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8181 switch (ext_phy_type) {
8182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8188 cmd->port = PORT_FIBRE;
8191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8192 cmd->port = PORT_TP;
8195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8196 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8197 bp->link_params.ext_phy_config);
8201 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8202 bp->link_params.ext_phy_config);
8206 cmd->port = PORT_TP;
8208 cmd->phy_address = bp->port.phy_addr;
8209 cmd->transceiver = XCVR_INTERNAL;
8211 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8212 cmd->autoneg = AUTONEG_ENABLE;
8214 cmd->autoneg = AUTONEG_DISABLE;
8219 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8220 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8221 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8222 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8223 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8224 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8225 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8230 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8232 struct bnx2x *bp = netdev_priv(dev);
8238 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8239 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8240 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8241 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8242 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8243 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8244 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8246 if (cmd->autoneg == AUTONEG_ENABLE) {
8247 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8248 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8252 /* advertise the requested speed and duplex if supported */
8253 cmd->advertising &= bp->port.supported;
8255 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8256 bp->link_params.req_duplex = DUPLEX_FULL;
8257 bp->port.advertising |= (ADVERTISED_Autoneg |
8260 } else { /* forced speed */
8261 /* advertise the requested speed and duplex if supported */
8262 switch (cmd->speed) {
8264 if (cmd->duplex == DUPLEX_FULL) {
8265 if (!(bp->port.supported &
8266 SUPPORTED_10baseT_Full)) {
8268 "10M full not supported\n");
8272 advertising = (ADVERTISED_10baseT_Full |
8275 if (!(bp->port.supported &
8276 SUPPORTED_10baseT_Half)) {
8278 "10M half not supported\n");
8282 advertising = (ADVERTISED_10baseT_Half |
8288 if (cmd->duplex == DUPLEX_FULL) {
8289 if (!(bp->port.supported &
8290 SUPPORTED_100baseT_Full)) {
8292 "100M full not supported\n");
8296 advertising = (ADVERTISED_100baseT_Full |
8299 if (!(bp->port.supported &
8300 SUPPORTED_100baseT_Half)) {
8302 "100M half not supported\n");
8306 advertising = (ADVERTISED_100baseT_Half |
8312 if (cmd->duplex != DUPLEX_FULL) {
8313 DP(NETIF_MSG_LINK, "1G half not supported\n");
8317 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8318 DP(NETIF_MSG_LINK, "1G full not supported\n");
8322 advertising = (ADVERTISED_1000baseT_Full |
8327 if (cmd->duplex != DUPLEX_FULL) {
8329 "2.5G half not supported\n");
8333 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8335 "2.5G full not supported\n");
8339 advertising = (ADVERTISED_2500baseX_Full |
8344 if (cmd->duplex != DUPLEX_FULL) {
8345 DP(NETIF_MSG_LINK, "10G half not supported\n");
8349 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8350 DP(NETIF_MSG_LINK, "10G full not supported\n");
8354 advertising = (ADVERTISED_10000baseT_Full |
8359 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8363 bp->link_params.req_line_speed = cmd->speed;
8364 bp->link_params.req_duplex = cmd->duplex;
8365 bp->port.advertising = advertising;
8368 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8369 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8370 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8371 bp->port.advertising);
8373 if (netif_running(dev)) {
8374 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8381 #define PHY_FW_VER_LEN 10
8383 static void bnx2x_get_drvinfo(struct net_device *dev,
8384 struct ethtool_drvinfo *info)
8386 struct bnx2x *bp = netdev_priv(dev);
8387 u8 phy_fw_ver[PHY_FW_VER_LEN];
8389 strcpy(info->driver, DRV_MODULE_NAME);
8390 strcpy(info->version, DRV_MODULE_VERSION);
8392 phy_fw_ver[0] = '\0';
8394 bnx2x_acquire_phy_lock(bp);
8395 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8396 (bp->state != BNX2X_STATE_CLOSED),
8397 phy_fw_ver, PHY_FW_VER_LEN);
8398 bnx2x_release_phy_lock(bp);
8401 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8402 (bp->common.bc_ver & 0xff0000) >> 16,
8403 (bp->common.bc_ver & 0xff00) >> 8,
8404 (bp->common.bc_ver & 0xff),
8405 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8406 strcpy(info->bus_info, pci_name(bp->pdev));
8407 info->n_stats = BNX2X_NUM_STATS;
8408 info->testinfo_len = BNX2X_NUM_TESTS;
8409 info->eedump_len = bp->common.flash_size;
8410 info->regdump_len = 0;
8413 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8415 struct bnx2x *bp = netdev_priv(dev);
8417 if (bp->flags & NO_WOL_FLAG) {
8421 wol->supported = WAKE_MAGIC;
8423 wol->wolopts = WAKE_MAGIC;
8427 memset(&wol->sopass, 0, sizeof(wol->sopass));
8430 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8432 struct bnx2x *bp = netdev_priv(dev);
8434 if (wol->wolopts & ~WAKE_MAGIC)
8437 if (wol->wolopts & WAKE_MAGIC) {
8438 if (bp->flags & NO_WOL_FLAG)
8448 static u32 bnx2x_get_msglevel(struct net_device *dev)
8450 struct bnx2x *bp = netdev_priv(dev);
8452 return bp->msglevel;
8455 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8457 struct bnx2x *bp = netdev_priv(dev);
8459 if (capable(CAP_NET_ADMIN))
8460 bp->msglevel = level;
8463 static int bnx2x_nway_reset(struct net_device *dev)
8465 struct bnx2x *bp = netdev_priv(dev);
8470 if (netif_running(dev)) {
8471 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8478 static int bnx2x_get_eeprom_len(struct net_device *dev)
8480 struct bnx2x *bp = netdev_priv(dev);
8482 return bp->common.flash_size;
8485 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8487 int port = BP_PORT(bp);
8491 /* adjust timeout for emulation/FPGA */
8492 count = NVRAM_TIMEOUT_COUNT;
8493 if (CHIP_REV_IS_SLOW(bp))
8496 /* request access to nvram interface */
8497 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8498 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8500 for (i = 0; i < count*10; i++) {
8501 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8502 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8508 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8509 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8516 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8518 int port = BP_PORT(bp);
8522 /* adjust timeout for emulation/FPGA */
8523 count = NVRAM_TIMEOUT_COUNT;
8524 if (CHIP_REV_IS_SLOW(bp))
8527 /* relinquish nvram interface */
8528 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8529 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8531 for (i = 0; i < count*10; i++) {
8532 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8533 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8539 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8540 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8547 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8551 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8553 /* enable both bits, even on read */
8554 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8555 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8556 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8559 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8563 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8565 /* disable both bits, even after read */
8566 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8567 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8568 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8571 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8577 /* build the command word */
8578 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8580 /* need to clear DONE bit separately */
8581 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8583 /* address of the NVRAM to read from */
8584 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8585 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8587 /* issue a read command */
8588 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8590 /* adjust timeout for emulation/FPGA */
8591 count = NVRAM_TIMEOUT_COUNT;
8592 if (CHIP_REV_IS_SLOW(bp))
8595 /* wait for completion */
8598 for (i = 0; i < count; i++) {
8600 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8602 if (val & MCPR_NVM_COMMAND_DONE) {
8603 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8604 /* we read nvram data in cpu order
8605 * but ethtool sees it as an array of bytes
8606 * converting to big-endian will do the work */
8607 val = cpu_to_be32(val);
8617 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8624 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8626 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8631 if (offset + buf_size > bp->common.flash_size) {
8632 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8633 " buf_size (0x%x) > flash_size (0x%x)\n",
8634 offset, buf_size, bp->common.flash_size);
8638 /* request access to nvram interface */
8639 rc = bnx2x_acquire_nvram_lock(bp);
8643 /* enable access to nvram interface */
8644 bnx2x_enable_nvram_access(bp);
8646 /* read the first word(s) */
8647 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8648 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8649 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8650 memcpy(ret_buf, &val, 4);
8652 /* advance to the next dword */
8653 offset += sizeof(u32);
8654 ret_buf += sizeof(u32);
8655 buf_size -= sizeof(u32);
8660 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8661 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8662 memcpy(ret_buf, &val, 4);
8665 /* disable access to nvram interface */
8666 bnx2x_disable_nvram_access(bp);
8667 bnx2x_release_nvram_lock(bp);
8672 static int bnx2x_get_eeprom(struct net_device *dev,
8673 struct ethtool_eeprom *eeprom, u8 *eebuf)
8675 struct bnx2x *bp = netdev_priv(dev);
8678 if (!netif_running(dev))
8681 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8682 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8683 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8684 eeprom->len, eeprom->len);
8686 /* parameters already validated in ethtool_get_eeprom */
8688 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8693 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8698 /* build the command word */
8699 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8701 /* need to clear DONE bit separately */
8702 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8704 /* write the data */
8705 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8707 /* address of the NVRAM to write to */
8708 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8709 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8711 /* issue the write command */
8712 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8714 /* adjust timeout for emulation/FPGA */
8715 count = NVRAM_TIMEOUT_COUNT;
8716 if (CHIP_REV_IS_SLOW(bp))
8719 /* wait for completion */
8721 for (i = 0; i < count; i++) {
8723 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8724 if (val & MCPR_NVM_COMMAND_DONE) {
8733 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8735 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8743 if (offset + buf_size > bp->common.flash_size) {
8744 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8745 " buf_size (0x%x) > flash_size (0x%x)\n",
8746 offset, buf_size, bp->common.flash_size);
8750 /* request access to nvram interface */
8751 rc = bnx2x_acquire_nvram_lock(bp);
8755 /* enable access to nvram interface */
8756 bnx2x_enable_nvram_access(bp);
8758 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8759 align_offset = (offset & ~0x03);
8760 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8763 val &= ~(0xff << BYTE_OFFSET(offset));
8764 val |= (*data_buf << BYTE_OFFSET(offset));
8766 /* nvram data is returned as an array of bytes
8767 * convert it back to cpu order */
8768 val = be32_to_cpu(val);
8770 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8774 /* disable access to nvram interface */
8775 bnx2x_disable_nvram_access(bp);
8776 bnx2x_release_nvram_lock(bp);
8781 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8789 if (buf_size == 1) /* ethtool */
8790 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8792 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8794 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8799 if (offset + buf_size > bp->common.flash_size) {
8800 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8801 " buf_size (0x%x) > flash_size (0x%x)\n",
8802 offset, buf_size, bp->common.flash_size);
8806 /* request access to nvram interface */
8807 rc = bnx2x_acquire_nvram_lock(bp);
8811 /* enable access to nvram interface */
8812 bnx2x_enable_nvram_access(bp);
8815 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8816 while ((written_so_far < buf_size) && (rc == 0)) {
8817 if (written_so_far == (buf_size - sizeof(u32)))
8818 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8819 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8820 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8821 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8822 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8824 memcpy(&val, data_buf, 4);
8826 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8828 /* advance to the next dword */
8829 offset += sizeof(u32);
8830 data_buf += sizeof(u32);
8831 written_so_far += sizeof(u32);
8835 /* disable access to nvram interface */
8836 bnx2x_disable_nvram_access(bp);
8837 bnx2x_release_nvram_lock(bp);
8842 static int bnx2x_set_eeprom(struct net_device *dev,
8843 struct ethtool_eeprom *eeprom, u8 *eebuf)
8845 struct bnx2x *bp = netdev_priv(dev);
8848 if (!netif_running(dev))
8851 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8852 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8853 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854 eeprom->len, eeprom->len);
8856 /* parameters already validated in ethtool_set_eeprom */
8858 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8859 if (eeprom->magic == 0x00504859)
8862 bnx2x_acquire_phy_lock(bp);
8863 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8864 bp->link_params.ext_phy_config,
8865 (bp->state != BNX2X_STATE_CLOSED),
8866 eebuf, eeprom->len);
8867 if ((bp->state == BNX2X_STATE_OPEN) ||
8868 (bp->state == BNX2X_STATE_DISABLED)) {
8869 rc |= bnx2x_link_reset(&bp->link_params,
8871 rc |= bnx2x_phy_init(&bp->link_params,
8874 bnx2x_release_phy_lock(bp);
8876 } else /* Only the PMF can access the PHY */
8879 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8884 static int bnx2x_get_coalesce(struct net_device *dev,
8885 struct ethtool_coalesce *coal)
8887 struct bnx2x *bp = netdev_priv(dev);
8889 memset(coal, 0, sizeof(struct ethtool_coalesce));
8891 coal->rx_coalesce_usecs = bp->rx_ticks;
8892 coal->tx_coalesce_usecs = bp->tx_ticks;
8897 static int bnx2x_set_coalesce(struct net_device *dev,
8898 struct ethtool_coalesce *coal)
8900 struct bnx2x *bp = netdev_priv(dev);
8902 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8903 if (bp->rx_ticks > 3000)
8904 bp->rx_ticks = 3000;
8906 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8907 if (bp->tx_ticks > 0x3000)
8908 bp->tx_ticks = 0x3000;
8910 if (netif_running(dev))
8911 bnx2x_update_coalesce(bp);
8916 static void bnx2x_get_ringparam(struct net_device *dev,
8917 struct ethtool_ringparam *ering)
8919 struct bnx2x *bp = netdev_priv(dev);
8921 ering->rx_max_pending = MAX_RX_AVAIL;
8922 ering->rx_mini_max_pending = 0;
8923 ering->rx_jumbo_max_pending = 0;
8925 ering->rx_pending = bp->rx_ring_size;
8926 ering->rx_mini_pending = 0;
8927 ering->rx_jumbo_pending = 0;
8929 ering->tx_max_pending = MAX_TX_AVAIL;
8930 ering->tx_pending = bp->tx_ring_size;
8933 static int bnx2x_set_ringparam(struct net_device *dev,
8934 struct ethtool_ringparam *ering)
8936 struct bnx2x *bp = netdev_priv(dev);
8939 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8940 (ering->tx_pending > MAX_TX_AVAIL) ||
8941 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8944 bp->rx_ring_size = ering->rx_pending;
8945 bp->tx_ring_size = ering->tx_pending;
8947 if (netif_running(dev)) {
8948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8955 static void bnx2x_get_pauseparam(struct net_device *dev,
8956 struct ethtool_pauseparam *epause)
8958 struct bnx2x *bp = netdev_priv(dev);
8960 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8961 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8963 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8964 BNX2X_FLOW_CTRL_RX);
8965 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8966 BNX2X_FLOW_CTRL_TX);
8968 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8969 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8970 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8973 static int bnx2x_set_pauseparam(struct net_device *dev,
8974 struct ethtool_pauseparam *epause)
8976 struct bnx2x *bp = netdev_priv(dev);
8981 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8982 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8983 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8985 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8987 if (epause->rx_pause)
8988 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8990 if (epause->tx_pause)
8991 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8993 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8994 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8996 if (epause->autoneg) {
8997 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8998 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9002 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9003 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9007 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9009 if (netif_running(dev)) {
9010 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9017 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9019 struct bnx2x *bp = netdev_priv(dev);
9023 /* TPA requires Rx CSUM offloading */
9024 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9025 if (!(dev->features & NETIF_F_LRO)) {
9026 dev->features |= NETIF_F_LRO;
9027 bp->flags |= TPA_ENABLE_FLAG;
9031 } else if (dev->features & NETIF_F_LRO) {
9032 dev->features &= ~NETIF_F_LRO;
9033 bp->flags &= ~TPA_ENABLE_FLAG;
9037 if (changed && netif_running(dev)) {
9038 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9039 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9045 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9047 struct bnx2x *bp = netdev_priv(dev);
9052 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9054 struct bnx2x *bp = netdev_priv(dev);
9059 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9060 TPA'ed packets will be discarded due to wrong TCP CSUM */
9062 u32 flags = ethtool_op_get_flags(dev);
9064 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9070 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9073 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9074 dev->features |= NETIF_F_TSO6;
9076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9077 dev->features &= ~NETIF_F_TSO6;
9083 static const struct {
9084 char string[ETH_GSTRING_LEN];
9085 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9086 { "register_test (offline)" },
9087 { "memory_test (offline)" },
9088 { "loopback_test (offline)" },
9089 { "nvram_test (online)" },
9090 { "interrupt_test (online)" },
9091 { "link_test (online)" },
9092 { "idle check (online)" }
9095 static int bnx2x_self_test_count(struct net_device *dev)
9097 return BNX2X_NUM_TESTS;
9100 static int bnx2x_test_registers(struct bnx2x *bp)
9102 int idx, i, rc = -ENODEV;
9104 int port = BP_PORT(bp);
9105 static const struct {
9110 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9111 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9112 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9113 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9114 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9115 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9116 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9117 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9118 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9119 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9120 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9121 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9122 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9123 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9124 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9125 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9126 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9127 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9128 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9130 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9131 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9140 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9141 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9149 { 0xffffffff, 0, 0x00000000 }
9152 if (!netif_running(bp->dev))
9155 /* Repeat the test twice:
9156 First by writing 0x00000000, second by writing 0xffffffff */
9157 for (idx = 0; idx < 2; idx++) {
9164 wr_val = 0xffffffff;
9168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9169 u32 offset, mask, save_val, val;
9171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9172 mask = reg_tbl[i].mask;
9174 save_val = REG_RD(bp, offset);
9176 REG_WR(bp, offset, wr_val);
9177 val = REG_RD(bp, offset);
9179 /* Restore the original register's value */
9180 REG_WR(bp, offset, save_val);
9182 /* verify that value is as expected value */
9183 if ((val & mask) != (wr_val & mask))
9194 static int bnx2x_test_memory(struct bnx2x *bp)
9196 int i, j, rc = -ENODEV;
9198 static const struct {
9202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9212 static const struct {
9218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9225 { NULL, 0xffffffff, 0, 0 }
9228 if (!netif_running(bp->dev))
9231 /* Go through all the memories */
9232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9233 for (j = 0; j < mem_tbl[i].size; j++)
9234 REG_RD(bp, mem_tbl[i].offset + j*4);
9236 /* Check the parity status */
9237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9238 val = REG_RD(bp, prty_tbl[i].offset);
9239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9242 "%s is 0x%x\n", prty_tbl[i].name, val);
9253 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9258 while (bnx2x_link_test(bp) && cnt--)
9262 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9264 unsigned int pkt_size, num_pkts, i;
9265 struct sk_buff *skb;
9266 unsigned char *packet;
9267 struct bnx2x_fastpath *fp = &bp->fp[0];
9268 u16 tx_start_idx, tx_idx;
9269 u16 rx_start_idx, rx_idx;
9271 struct sw_tx_bd *tx_buf;
9272 struct eth_tx_bd *tx_bd;
9274 union eth_rx_cqe *cqe;
9276 struct sw_rx_bd *rx_buf;
9280 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9281 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9284 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9286 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9288 /* wait until link state is restored */
9290 while (cnt-- && bnx2x_test_link(&bp->link_params,
9297 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9300 goto test_loopback_exit;
9302 packet = skb_put(skb, pkt_size);
9303 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9304 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9305 for (i = ETH_HLEN; i < pkt_size; i++)
9306 packet[i] = (unsigned char) (i & 0xff);
9309 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9310 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9312 pkt_prod = fp->tx_pkt_prod++;
9313 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9314 tx_buf->first_bd = fp->tx_bd_prod;
9317 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9318 mapping = pci_map_single(bp->pdev, skb->data,
9319 skb_headlen(skb), PCI_DMA_TODEVICE);
9320 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9321 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9322 tx_bd->nbd = cpu_to_le16(1);
9323 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9324 tx_bd->vlan = cpu_to_le16(pkt_prod);
9325 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9326 ETH_TX_BD_FLAGS_END_BD);
9327 tx_bd->general_data = ((UNICAST_ADDRESS <<
9328 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9332 fp->hw_tx_prods->bds_prod =
9333 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9334 mb(); /* FW restriction: must not reorder writing nbd and packets */
9335 fp->hw_tx_prods->packets_prod =
9336 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9337 DOORBELL(bp, FP_IDX(fp), 0);
9343 bp->dev->trans_start = jiffies;
9347 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9348 if (tx_idx != tx_start_idx + num_pkts)
9349 goto test_loopback_exit;
9351 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9352 if (rx_idx != rx_start_idx + num_pkts)
9353 goto test_loopback_exit;
9355 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9356 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9357 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9358 goto test_loopback_rx_exit;
9360 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9361 if (len != pkt_size)
9362 goto test_loopback_rx_exit;
9364 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9366 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9367 for (i = ETH_HLEN; i < pkt_size; i++)
9368 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9369 goto test_loopback_rx_exit;
9373 test_loopback_rx_exit:
9375 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9376 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9377 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9378 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9380 /* Update producers */
9381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9385 bp->link_params.loopback_mode = LOOPBACK_NONE;
9390 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9394 if (!netif_running(bp->dev))
9395 return BNX2X_LOOPBACK_FAILED;
9397 bnx2x_netif_stop(bp, 1);
9398 bnx2x_acquire_phy_lock(bp);
9400 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9401 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9402 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9405 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9406 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9407 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9410 bnx2x_release_phy_lock(bp);
9411 bnx2x_netif_start(bp);
9416 #define CRC32_RESIDUAL 0xdebb20e3
9418 static int bnx2x_test_nvram(struct bnx2x *bp)
9420 static const struct {
9424 { 0, 0x14 }, /* bootstrap */
9425 { 0x14, 0xec }, /* dir */
9426 { 0x100, 0x350 }, /* manuf_info */
9427 { 0x450, 0xf0 }, /* feature_info */
9428 { 0x640, 0x64 }, /* upgrade_key_info */
9430 { 0x708, 0x70 }, /* manuf_key_info */
9435 u8 *data = (u8 *)buf;
9439 rc = bnx2x_nvram_read(bp, 0, data, 4);
9441 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9442 goto test_nvram_exit;
9445 magic = be32_to_cpu(buf[0]);
9446 if (magic != 0x669955aa) {
9447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9449 goto test_nvram_exit;
9452 for (i = 0; nvram_tbl[i].size; i++) {
9454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9458 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9459 goto test_nvram_exit;
9462 csum = ether_crc_le(nvram_tbl[i].size, data);
9463 if (csum != CRC32_RESIDUAL) {
9465 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9467 goto test_nvram_exit;
9475 static int bnx2x_test_intr(struct bnx2x *bp)
9477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9480 if (!netif_running(bp->dev))
9483 config->hdr.length = 0;
9485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9487 config->hdr.offset = BP_FUNC(bp);
9488 config->hdr.client_id = BP_CL_ID(bp);
9489 config->hdr.reserved1 = 0;
9491 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9492 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9493 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9495 bp->set_mac_pending++;
9496 for (i = 0; i < 10; i++) {
9497 if (!bp->set_mac_pending)
9499 msleep_interruptible(10);
9508 static void bnx2x_self_test(struct net_device *dev,
9509 struct ethtool_test *etest, u64 *buf)
9511 struct bnx2x *bp = netdev_priv(dev);
9513 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9515 if (!netif_running(dev))
9518 /* offline tests are not supported in MF mode */
9520 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9522 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9525 link_up = bp->link_vars.link_up;
9526 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9527 bnx2x_nic_load(bp, LOAD_DIAG);
9528 /* wait until link state is restored */
9529 bnx2x_wait_for_link(bp, link_up);
9531 if (bnx2x_test_registers(bp) != 0) {
9533 etest->flags |= ETH_TEST_FL_FAILED;
9535 if (bnx2x_test_memory(bp) != 0) {
9537 etest->flags |= ETH_TEST_FL_FAILED;
9539 buf[2] = bnx2x_test_loopback(bp, link_up);
9541 etest->flags |= ETH_TEST_FL_FAILED;
9543 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9544 bnx2x_nic_load(bp, LOAD_NORMAL);
9545 /* wait until link state is restored */
9546 bnx2x_wait_for_link(bp, link_up);
9548 if (bnx2x_test_nvram(bp) != 0) {
9550 etest->flags |= ETH_TEST_FL_FAILED;
9552 if (bnx2x_test_intr(bp) != 0) {
9554 etest->flags |= ETH_TEST_FL_FAILED;
9557 if (bnx2x_link_test(bp) != 0) {
9559 etest->flags |= ETH_TEST_FL_FAILED;
9562 #ifdef BNX2X_EXTRA_DEBUG
9563 bnx2x_panic_dump(bp);
9567 static const struct {
9570 u8 string[ETH_GSTRING_LEN];
9571 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9572 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9573 { Q_STATS_OFFSET32(error_bytes_received_hi),
9574 8, "[%d]: rx_error_bytes" },
9575 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9576 8, "[%d]: rx_ucast_packets" },
9577 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9578 8, "[%d]: rx_mcast_packets" },
9579 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9580 8, "[%d]: rx_bcast_packets" },
9581 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9582 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9583 4, "[%d]: rx_phy_ip_err_discards"},
9584 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9585 4, "[%d]: rx_skb_alloc_discard" },
9586 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9588 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9589 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9590 8, "[%d]: tx_packets" }
9593 static const struct {
9597 #define STATS_FLAGS_PORT 1
9598 #define STATS_FLAGS_FUNC 2
9599 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9600 u8 string[ETH_GSTRING_LEN];
9601 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9602 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9603 8, STATS_FLAGS_BOTH, "rx_bytes" },
9604 { STATS_OFFSET32(error_bytes_received_hi),
9605 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9606 { STATS_OFFSET32(total_unicast_packets_received_hi),
9607 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9608 { STATS_OFFSET32(total_multicast_packets_received_hi),
9609 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9610 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9611 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9612 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9613 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9614 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9615 8, STATS_FLAGS_PORT, "rx_align_errors" },
9616 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9617 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9618 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9619 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9620 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9621 8, STATS_FLAGS_PORT, "rx_fragments" },
9622 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9623 8, STATS_FLAGS_PORT, "rx_jabbers" },
9624 { STATS_OFFSET32(no_buff_discard_hi),
9625 8, STATS_FLAGS_BOTH, "rx_discards" },
9626 { STATS_OFFSET32(mac_filter_discard),
9627 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9628 { STATS_OFFSET32(xxoverflow_discard),
9629 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9630 { STATS_OFFSET32(brb_drop_hi),
9631 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9632 { STATS_OFFSET32(brb_truncate_hi),
9633 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9634 { STATS_OFFSET32(pause_frames_received_hi),
9635 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9636 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9637 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9638 { STATS_OFFSET32(nig_timer_max),
9639 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9640 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9641 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9642 { STATS_OFFSET32(rx_skb_alloc_failed),
9643 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9644 { STATS_OFFSET32(hw_csum_err),
9645 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9647 { STATS_OFFSET32(total_bytes_transmitted_hi),
9648 8, STATS_FLAGS_BOTH, "tx_bytes" },
9649 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9650 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9651 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652 8, STATS_FLAGS_BOTH, "tx_packets" },
9653 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9654 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9655 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9656 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9657 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9658 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9659 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9660 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9661 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9662 8, STATS_FLAGS_PORT, "tx_deferred" },
9663 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9664 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9665 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9666 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9667 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9668 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9669 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9670 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9671 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9672 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9673 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9674 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9675 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9676 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9677 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9678 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9679 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9680 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9681 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9682 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9683 { STATS_OFFSET32(pause_frames_sent_hi),
9684 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9687 #define IS_PORT_STAT(i) \
9688 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9689 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9690 #define IS_E1HMF_MODE_STAT(bp) \
9691 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9693 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9695 struct bnx2x *bp = netdev_priv(dev);
9698 switch (stringset) {
9702 for_each_queue(bp, i) {
9703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9704 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9705 bnx2x_q_stats_arr[j].string, i);
9706 k += BNX2X_NUM_Q_STATS;
9708 if (IS_E1HMF_MODE_STAT(bp))
9710 for (j = 0; j < BNX2X_NUM_STATS; j++)
9711 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9712 bnx2x_stats_arr[j].string);
9714 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9715 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9717 strcpy(buf + j*ETH_GSTRING_LEN,
9718 bnx2x_stats_arr[i].string);
9725 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9730 static int bnx2x_get_stats_count(struct net_device *dev)
9732 struct bnx2x *bp = netdev_priv(dev);
9736 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9737 if (!IS_E1HMF_MODE_STAT(bp))
9738 num_stats += BNX2X_NUM_STATS;
9740 if (IS_E1HMF_MODE_STAT(bp)) {
9742 for (i = 0; i < BNX2X_NUM_STATS; i++)
9743 if (IS_FUNC_STAT(i))
9746 num_stats = BNX2X_NUM_STATS;
9752 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9753 struct ethtool_stats *stats, u64 *buf)
9755 struct bnx2x *bp = netdev_priv(dev);
9756 u32 *hw_stats, *offset;
9761 for_each_queue(bp, i) {
9762 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9763 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9764 if (bnx2x_q_stats_arr[j].size == 0) {
9765 /* skip this counter */
9769 offset = (hw_stats +
9770 bnx2x_q_stats_arr[j].offset);
9771 if (bnx2x_q_stats_arr[j].size == 4) {
9772 /* 4-byte counter */
9773 buf[k + j] = (u64) *offset;
9776 /* 8-byte counter */
9777 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9779 k += BNX2X_NUM_Q_STATS;
9781 if (IS_E1HMF_MODE_STAT(bp))
9783 hw_stats = (u32 *)&bp->eth_stats;
9784 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9785 if (bnx2x_stats_arr[j].size == 0) {
9786 /* skip this counter */
9790 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9791 if (bnx2x_stats_arr[j].size == 4) {
9792 /* 4-byte counter */
9793 buf[k + j] = (u64) *offset;
9796 /* 8-byte counter */
9797 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9800 hw_stats = (u32 *)&bp->eth_stats;
9801 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9802 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9804 if (bnx2x_stats_arr[i].size == 0) {
9805 /* skip this counter */
9810 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9811 if (bnx2x_stats_arr[i].size == 4) {
9812 /* 4-byte counter */
9813 buf[j] = (u64) *offset;
9817 /* 8-byte counter */
9818 buf[j] = HILO_U64(*offset, *(offset + 1));
9824 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9826 struct bnx2x *bp = netdev_priv(dev);
9827 int port = BP_PORT(bp);
9830 if (!netif_running(dev))
9839 for (i = 0; i < (data * 2); i++) {
9841 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9842 bp->link_params.hw_led_mode,
9843 bp->link_params.chip_id);
9845 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9846 bp->link_params.hw_led_mode,
9847 bp->link_params.chip_id);
9849 msleep_interruptible(500);
9850 if (signal_pending(current))
9854 if (bp->link_vars.link_up)
9855 bnx2x_set_led(bp, port, LED_MODE_OPER,
9856 bp->link_vars.line_speed,
9857 bp->link_params.hw_led_mode,
9858 bp->link_params.chip_id);
9863 static struct ethtool_ops bnx2x_ethtool_ops = {
9864 .get_settings = bnx2x_get_settings,
9865 .set_settings = bnx2x_set_settings,
9866 .get_drvinfo = bnx2x_get_drvinfo,
9867 .get_wol = bnx2x_get_wol,
9868 .set_wol = bnx2x_set_wol,
9869 .get_msglevel = bnx2x_get_msglevel,
9870 .set_msglevel = bnx2x_set_msglevel,
9871 .nway_reset = bnx2x_nway_reset,
9872 .get_link = ethtool_op_get_link,
9873 .get_eeprom_len = bnx2x_get_eeprom_len,
9874 .get_eeprom = bnx2x_get_eeprom,
9875 .set_eeprom = bnx2x_set_eeprom,
9876 .get_coalesce = bnx2x_get_coalesce,
9877 .set_coalesce = bnx2x_set_coalesce,
9878 .get_ringparam = bnx2x_get_ringparam,
9879 .set_ringparam = bnx2x_set_ringparam,
9880 .get_pauseparam = bnx2x_get_pauseparam,
9881 .set_pauseparam = bnx2x_set_pauseparam,
9882 .get_rx_csum = bnx2x_get_rx_csum,
9883 .set_rx_csum = bnx2x_set_rx_csum,
9884 .get_tx_csum = ethtool_op_get_tx_csum,
9885 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9886 .set_flags = bnx2x_set_flags,
9887 .get_flags = ethtool_op_get_flags,
9888 .get_sg = ethtool_op_get_sg,
9889 .set_sg = ethtool_op_set_sg,
9890 .get_tso = ethtool_op_get_tso,
9891 .set_tso = bnx2x_set_tso,
9892 .self_test_count = bnx2x_self_test_count,
9893 .self_test = bnx2x_self_test,
9894 .get_strings = bnx2x_get_strings,
9895 .phys_id = bnx2x_phys_id,
9896 .get_stats_count = bnx2x_get_stats_count,
9897 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9900 /* end of ethtool_ops */
9902 /****************************************************************************
9903 * General service functions
9904 ****************************************************************************/
9906 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9910 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9914 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9915 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9916 PCI_PM_CTRL_PME_STATUS));
9918 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9919 /* delay required during transition out of D3hot */
9924 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9928 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9930 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9933 /* No more memory access after this point until
9934 * device is brought back to D0.
9944 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9948 /* Tell compiler that status block fields can change */
9950 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9951 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9953 return (fp->rx_comp_cons != rx_cons_sb);
9957 * net_device service functions
9960 static int bnx2x_poll(struct napi_struct *napi, int budget)
9962 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9964 struct bnx2x *bp = fp->bp;
9967 #ifdef BNX2X_STOP_ON_ERROR
9968 if (unlikely(bp->panic))
9972 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9973 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9974 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9976 bnx2x_update_fpsb_idx(fp);
9978 if (bnx2x_has_tx_work(fp))
9979 bnx2x_tx_int(fp, budget);
9981 if (bnx2x_has_rx_work(fp))
9982 work_done = bnx2x_rx_int(fp, budget);
9983 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9985 /* must not complete if we consumed full budget */
9986 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9988 #ifdef BNX2X_STOP_ON_ERROR
9991 napi_complete(napi);
9993 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9994 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9995 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9996 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10002 /* we split the first BD into headers and data BDs
10003 * to ease the pain of our fellow microcode engineers
10004 * we use one mapping for both BDs
10005 * So far this has only been observed to happen
10006 * in Other Operating Systems(TM)
10008 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10009 struct bnx2x_fastpath *fp,
10010 struct eth_tx_bd **tx_bd, u16 hlen,
10011 u16 bd_prod, int nbd)
10013 struct eth_tx_bd *h_tx_bd = *tx_bd;
10014 struct eth_tx_bd *d_tx_bd;
10015 dma_addr_t mapping;
10016 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10018 /* first fix first BD */
10019 h_tx_bd->nbd = cpu_to_le16(nbd);
10020 h_tx_bd->nbytes = cpu_to_le16(hlen);
10022 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10023 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10024 h_tx_bd->addr_lo, h_tx_bd->nbd);
10026 /* now get a new data BD
10027 * (after the pbd) and fill it */
10028 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10029 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10031 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10032 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10034 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10035 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10036 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10038 /* this marks the BD as one that has no individual mapping
10039 * the FW ignores this flag in a BD not marked start
10041 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10042 DP(NETIF_MSG_TX_QUEUED,
10043 "TSO split data size is %d (%x:%x)\n",
10044 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10046 /* update tx_bd for marking the last BD flag */
10052 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10055 csum = (u16) ~csum_fold(csum_sub(csum,
10056 csum_partial(t_header - fix, fix, 0)));
10059 csum = (u16) ~csum_fold(csum_add(csum,
10060 csum_partial(t_header, -fix, 0)));
10062 return swab16(csum);
10065 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10069 if (skb->ip_summed != CHECKSUM_PARTIAL)
10073 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10075 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10076 rc |= XMIT_CSUM_TCP;
10080 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10081 rc |= XMIT_CSUM_TCP;
10085 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10088 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10094 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10095 /* check if packet requires linearization (packet is too fragmented) */
10096 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10101 int first_bd_sz = 0;
10103 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10104 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10106 if (xmit_type & XMIT_GSO) {
10107 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10108 /* Check if LSO packet needs to be copied:
10109 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10110 int wnd_size = MAX_FETCH_BD - 3;
10111 /* Number of windows to check */
10112 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10117 /* Headers length */
10118 hlen = (int)(skb_transport_header(skb) - skb->data) +
10121 /* Amount of data (w/o headers) on linear part of SKB*/
10122 first_bd_sz = skb_headlen(skb) - hlen;
10124 wnd_sum = first_bd_sz;
10126 /* Calculate the first sum - it's special */
10127 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10129 skb_shinfo(skb)->frags[frag_idx].size;
10131 /* If there was data on linear skb data - check it */
10132 if (first_bd_sz > 0) {
10133 if (unlikely(wnd_sum < lso_mss)) {
10138 wnd_sum -= first_bd_sz;
10141 /* Others are easier: run through the frag list and
10142 check all windows */
10143 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10145 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10147 if (unlikely(wnd_sum < lso_mss)) {
10152 skb_shinfo(skb)->frags[wnd_idx].size;
10156 /* in non-LSO too fragmented packet should always
10163 if (unlikely(to_copy))
10164 DP(NETIF_MSG_TX_QUEUED,
10165 "Linearization IS REQUIRED for %s packet. "
10166 "num_frags %d hlen %d first_bd_sz %d\n",
10167 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10168 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10174 /* called with netif_tx_lock
10175 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10176 * netif_wake_queue()
10178 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10180 struct bnx2x *bp = netdev_priv(dev);
10181 struct bnx2x_fastpath *fp;
10182 struct netdev_queue *txq;
10183 struct sw_tx_bd *tx_buf;
10184 struct eth_tx_bd *tx_bd;
10185 struct eth_tx_parse_bd *pbd = NULL;
10186 u16 pkt_prod, bd_prod;
10188 dma_addr_t mapping;
10189 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10190 int vlan_off = (bp->e1hov ? 4 : 0);
10194 #ifdef BNX2X_STOP_ON_ERROR
10195 if (unlikely(bp->panic))
10196 return NETDEV_TX_BUSY;
10199 fp_index = skb_get_queue_mapping(skb);
10200 txq = netdev_get_tx_queue(dev, fp_index);
10202 fp = &bp->fp[fp_index];
10204 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10205 fp->eth_q_stats.driver_xoff++,
10206 netif_tx_stop_queue(txq);
10207 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10208 return NETDEV_TX_BUSY;
10211 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10212 " gso type %x xmit_type %x\n",
10213 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10214 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10216 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10217 /* First, check if we need to linearize the skb
10218 (due to FW restrictions) */
10219 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10220 /* Statistics of linearization */
10222 if (skb_linearize(skb) != 0) {
10223 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10224 "silently dropping this SKB\n");
10225 dev_kfree_skb_any(skb);
10226 return NETDEV_TX_OK;
10232 Please read carefully. First we use one BD which we mark as start,
10233 then for TSO or xsum we have a parsing info BD,
10234 and only then we have the rest of the TSO BDs.
10235 (don't forget to mark the last one as last,
10236 and to unmap only AFTER you write to the BD ...)
10237 And above all, all pdb sizes are in words - NOT DWORDS!
10240 pkt_prod = fp->tx_pkt_prod++;
10241 bd_prod = TX_BD(fp->tx_bd_prod);
10243 /* get a tx_buf and first BD */
10244 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10245 tx_bd = &fp->tx_desc_ring[bd_prod];
10247 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10248 tx_bd->general_data = (UNICAST_ADDRESS <<
10249 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10251 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10253 /* remember the first BD of the packet */
10254 tx_buf->first_bd = fp->tx_bd_prod;
10257 DP(NETIF_MSG_TX_QUEUED,
10258 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10259 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10262 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10263 (bp->flags & HW_VLAN_TX_FLAG)) {
10264 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10265 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10269 tx_bd->vlan = cpu_to_le16(pkt_prod);
10272 /* turn on parsing and get a BD */
10273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10276 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10279 if (xmit_type & XMIT_CSUM) {
10280 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10282 /* for now NS flag is not used in Linux */
10283 pbd->global_data = (hlen |
10284 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10285 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10287 pbd->ip_hlen = (skb_transport_header(skb) -
10288 skb_network_header(skb)) / 2;
10290 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10292 pbd->total_hlen = cpu_to_le16(hlen);
10293 hlen = hlen*2 - vlan_off;
10295 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10297 if (xmit_type & XMIT_CSUM_V4)
10298 tx_bd->bd_flags.as_bitfield |=
10299 ETH_TX_BD_FLAGS_IP_CSUM;
10301 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10303 if (xmit_type & XMIT_CSUM_TCP) {
10304 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10307 s8 fix = SKB_CS_OFF(skb); /* signed! */
10309 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10310 pbd->cs_offset = fix / 2;
10312 DP(NETIF_MSG_TX_QUEUED,
10313 "hlen %d offset %d fix %d csum before fix %x\n",
10314 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10317 /* HW bug: fixup the CSUM */
10318 pbd->tcp_pseudo_csum =
10319 bnx2x_csum_fix(skb_transport_header(skb),
10322 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10323 pbd->tcp_pseudo_csum);
10327 mapping = pci_map_single(bp->pdev, skb->data,
10328 skb_headlen(skb), PCI_DMA_TODEVICE);
10330 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10331 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10332 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10333 tx_bd->nbd = cpu_to_le16(nbd);
10334 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10336 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10337 " nbytes %d flags %x vlan %x\n",
10338 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10339 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10340 le16_to_cpu(tx_bd->vlan));
10342 if (xmit_type & XMIT_GSO) {
10344 DP(NETIF_MSG_TX_QUEUED,
10345 "TSO packet len %d hlen %d total len %d tso size %d\n",
10346 skb->len, hlen, skb_headlen(skb),
10347 skb_shinfo(skb)->gso_size);
10349 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10351 if (unlikely(skb_headlen(skb) > hlen))
10352 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10355 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10356 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10357 pbd->tcp_flags = pbd_tcp_flags(skb);
10359 if (xmit_type & XMIT_GSO_V4) {
10360 pbd->ip_id = swab16(ip_hdr(skb)->id);
10361 pbd->tcp_pseudo_csum =
10362 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10363 ip_hdr(skb)->daddr,
10364 0, IPPROTO_TCP, 0));
10367 pbd->tcp_pseudo_csum =
10368 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10369 &ipv6_hdr(skb)->daddr,
10370 0, IPPROTO_TCP, 0));
10372 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10375 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10376 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10378 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10379 tx_bd = &fp->tx_desc_ring[bd_prod];
10381 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10382 frag->size, PCI_DMA_TODEVICE);
10384 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10385 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10386 tx_bd->nbytes = cpu_to_le16(frag->size);
10387 tx_bd->vlan = cpu_to_le16(pkt_prod);
10388 tx_bd->bd_flags.as_bitfield = 0;
10390 DP(NETIF_MSG_TX_QUEUED,
10391 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10392 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10393 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10396 /* now at last mark the BD as the last BD */
10397 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10399 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10400 tx_bd, tx_bd->bd_flags.as_bitfield);
10402 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10404 /* now send a tx doorbell, counting the next BD
10405 * if the packet contains or ends with it
10407 if (TX_BD_POFF(bd_prod) < nbd)
10411 DP(NETIF_MSG_TX_QUEUED,
10412 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10413 " tcp_flags %x xsum %x seq %u hlen %u\n",
10414 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10415 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10416 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10418 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10421 * Make sure that the BD data is updated before updating the producer
10422 * since FW might read the BD right after the producer is updated.
10423 * This is only applicable for weak-ordered memory model archs such
10424 * as IA-64. The following barrier is also mandatory since FW will
10425 * assumes packets must have BDs.
10429 fp->hw_tx_prods->bds_prod =
10430 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10431 mb(); /* FW restriction: must not reorder writing nbd and packets */
10432 fp->hw_tx_prods->packets_prod =
10433 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10434 DOORBELL(bp, FP_IDX(fp), 0);
10438 fp->tx_bd_prod += nbd;
10439 dev->trans_start = jiffies;
10441 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10442 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10443 if we put Tx into XOFF state. */
10445 netif_tx_stop_queue(txq);
10446 fp->eth_q_stats.driver_xoff++;
10447 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10448 netif_tx_wake_queue(txq);
10452 return NETDEV_TX_OK;
10455 /* called with rtnl_lock */
10456 static int bnx2x_open(struct net_device *dev)
10458 struct bnx2x *bp = netdev_priv(dev);
10460 netif_carrier_off(dev);
10462 bnx2x_set_power_state(bp, PCI_D0);
10464 return bnx2x_nic_load(bp, LOAD_OPEN);
10467 /* called with rtnl_lock */
10468 static int bnx2x_close(struct net_device *dev)
10470 struct bnx2x *bp = netdev_priv(dev);
10472 /* Unload the driver, release IRQs */
10473 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10474 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10475 if (!CHIP_REV_IS_SLOW(bp))
10476 bnx2x_set_power_state(bp, PCI_D3hot);
10481 /* called with netif_tx_lock from set_multicast */
10482 static void bnx2x_set_rx_mode(struct net_device *dev)
10484 struct bnx2x *bp = netdev_priv(dev);
10485 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10486 int port = BP_PORT(bp);
10488 if (bp->state != BNX2X_STATE_OPEN) {
10489 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10493 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10495 if (dev->flags & IFF_PROMISC)
10496 rx_mode = BNX2X_RX_MODE_PROMISC;
10498 else if ((dev->flags & IFF_ALLMULTI) ||
10499 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10500 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10502 else { /* some multicasts */
10503 if (CHIP_IS_E1(bp)) {
10504 int i, old, offset;
10505 struct dev_mc_list *mclist;
10506 struct mac_configuration_cmd *config =
10507 bnx2x_sp(bp, mcast_config);
10509 for (i = 0, mclist = dev->mc_list;
10510 mclist && (i < dev->mc_count);
10511 i++, mclist = mclist->next) {
10513 config->config_table[i].
10514 cam_entry.msb_mac_addr =
10515 swab16(*(u16 *)&mclist->dmi_addr[0]);
10516 config->config_table[i].
10517 cam_entry.middle_mac_addr =
10518 swab16(*(u16 *)&mclist->dmi_addr[2]);
10519 config->config_table[i].
10520 cam_entry.lsb_mac_addr =
10521 swab16(*(u16 *)&mclist->dmi_addr[4]);
10522 config->config_table[i].cam_entry.flags =
10524 config->config_table[i].
10525 target_table_entry.flags = 0;
10526 config->config_table[i].
10527 target_table_entry.client_id = 0;
10528 config->config_table[i].
10529 target_table_entry.vlan_id = 0;
10532 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10533 config->config_table[i].
10534 cam_entry.msb_mac_addr,
10535 config->config_table[i].
10536 cam_entry.middle_mac_addr,
10537 config->config_table[i].
10538 cam_entry.lsb_mac_addr);
10540 old = config->hdr.length;
10542 for (; i < old; i++) {
10543 if (CAM_IS_INVALID(config->
10544 config_table[i])) {
10545 /* already invalidated */
10549 CAM_INVALIDATE(config->
10554 if (CHIP_REV_IS_SLOW(bp))
10555 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10557 offset = BNX2X_MAX_MULTICAST*(1 + port);
10559 config->hdr.length = i;
10560 config->hdr.offset = offset;
10561 config->hdr.client_id = bp->fp->cl_id;
10562 config->hdr.reserved1 = 0;
10564 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10565 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10566 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10569 /* Accept one or more multicasts */
10570 struct dev_mc_list *mclist;
10571 u32 mc_filter[MC_HASH_SIZE];
10572 u32 crc, bit, regidx;
10575 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10577 for (i = 0, mclist = dev->mc_list;
10578 mclist && (i < dev->mc_count);
10579 i++, mclist = mclist->next) {
10581 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10584 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10585 bit = (crc >> 24) & 0xff;
10588 mc_filter[regidx] |= (1 << bit);
10591 for (i = 0; i < MC_HASH_SIZE; i++)
10592 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10597 bp->rx_mode = rx_mode;
10598 bnx2x_set_storm_rx_mode(bp);
10601 /* called with rtnl_lock */
10602 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10604 struct sockaddr *addr = p;
10605 struct bnx2x *bp = netdev_priv(dev);
10607 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10610 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10611 if (netif_running(dev)) {
10612 if (CHIP_IS_E1(bp))
10613 bnx2x_set_mac_addr_e1(bp, 1);
10615 bnx2x_set_mac_addr_e1h(bp, 1);
10621 /* called with rtnl_lock */
10622 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10624 struct mii_ioctl_data *data = if_mii(ifr);
10625 struct bnx2x *bp = netdev_priv(dev);
10626 int port = BP_PORT(bp);
10631 data->phy_id = bp->port.phy_addr;
10635 case SIOCGMIIREG: {
10638 if (!netif_running(dev))
10641 mutex_lock(&bp->port.phy_mutex);
10642 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10643 DEFAULT_PHY_DEV_ADDR,
10644 (data->reg_num & 0x1f), &mii_regval);
10645 data->val_out = mii_regval;
10646 mutex_unlock(&bp->port.phy_mutex);
10651 if (!capable(CAP_NET_ADMIN))
10654 if (!netif_running(dev))
10657 mutex_lock(&bp->port.phy_mutex);
10658 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10659 DEFAULT_PHY_DEV_ADDR,
10660 (data->reg_num & 0x1f), data->val_in);
10661 mutex_unlock(&bp->port.phy_mutex);
10669 return -EOPNOTSUPP;
10672 /* called with rtnl_lock */
10673 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10675 struct bnx2x *bp = netdev_priv(dev);
10678 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10679 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10682 /* This does not race with packet allocation
10683 * because the actual alloc size is
10684 * only updated as part of load
10686 dev->mtu = new_mtu;
10688 if (netif_running(dev)) {
10689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10690 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10696 static void bnx2x_tx_timeout(struct net_device *dev)
10698 struct bnx2x *bp = netdev_priv(dev);
10700 #ifdef BNX2X_STOP_ON_ERROR
10704 /* This allows the netif to be shutdown gracefully before resetting */
10705 schedule_work(&bp->reset_task);
10709 /* called with rtnl_lock */
10710 static void bnx2x_vlan_rx_register(struct net_device *dev,
10711 struct vlan_group *vlgrp)
10713 struct bnx2x *bp = netdev_priv(dev);
10717 /* Set flags according to the required capabilities */
10718 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10720 if (dev->features & NETIF_F_HW_VLAN_TX)
10721 bp->flags |= HW_VLAN_TX_FLAG;
10723 if (dev->features & NETIF_F_HW_VLAN_RX)
10724 bp->flags |= HW_VLAN_RX_FLAG;
10726 if (netif_running(dev))
10727 bnx2x_set_client_config(bp);
10732 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10733 static void poll_bnx2x(struct net_device *dev)
10735 struct bnx2x *bp = netdev_priv(dev);
10737 disable_irq(bp->pdev->irq);
10738 bnx2x_interrupt(bp->pdev->irq, dev);
10739 enable_irq(bp->pdev->irq);
10743 static const struct net_device_ops bnx2x_netdev_ops = {
10744 .ndo_open = bnx2x_open,
10745 .ndo_stop = bnx2x_close,
10746 .ndo_start_xmit = bnx2x_start_xmit,
10747 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10748 .ndo_set_mac_address = bnx2x_change_mac_addr,
10749 .ndo_validate_addr = eth_validate_addr,
10750 .ndo_do_ioctl = bnx2x_ioctl,
10751 .ndo_change_mtu = bnx2x_change_mtu,
10752 .ndo_tx_timeout = bnx2x_tx_timeout,
10754 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10756 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10757 .ndo_poll_controller = poll_bnx2x,
10762 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10763 struct net_device *dev)
10768 SET_NETDEV_DEV(dev, &pdev->dev);
10769 bp = netdev_priv(dev);
10774 bp->func = PCI_FUNC(pdev->devfn);
10776 rc = pci_enable_device(pdev);
10778 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10782 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10783 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10786 goto err_out_disable;
10789 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10790 printk(KERN_ERR PFX "Cannot find second PCI device"
10791 " base address, aborting\n");
10793 goto err_out_disable;
10796 if (atomic_read(&pdev->enable_cnt) == 1) {
10797 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10799 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10801 goto err_out_disable;
10804 pci_set_master(pdev);
10805 pci_save_state(pdev);
10808 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10809 if (bp->pm_cap == 0) {
10810 printk(KERN_ERR PFX "Cannot find power management"
10811 " capability, aborting\n");
10813 goto err_out_release;
10816 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10817 if (bp->pcie_cap == 0) {
10818 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10821 goto err_out_release;
10824 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10825 bp->flags |= USING_DAC_FLAG;
10826 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10827 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10828 " failed, aborting\n");
10830 goto err_out_release;
10833 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10834 printk(KERN_ERR PFX "System does not support DMA,"
10837 goto err_out_release;
10840 dev->mem_start = pci_resource_start(pdev, 0);
10841 dev->base_addr = dev->mem_start;
10842 dev->mem_end = pci_resource_end(pdev, 0);
10844 dev->irq = pdev->irq;
10846 bp->regview = pci_ioremap_bar(pdev, 0);
10847 if (!bp->regview) {
10848 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10850 goto err_out_release;
10853 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10854 min_t(u64, BNX2X_DB_SIZE,
10855 pci_resource_len(pdev, 2)));
10856 if (!bp->doorbells) {
10857 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10859 goto err_out_unmap;
10862 bnx2x_set_power_state(bp, PCI_D0);
10864 /* clean indirect addresses */
10865 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10866 PCICFG_VENDOR_ID_OFFSET);
10867 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10868 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10869 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10870 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10872 dev->watchdog_timeo = TX_TIMEOUT;
10874 dev->netdev_ops = &bnx2x_netdev_ops;
10875 dev->ethtool_ops = &bnx2x_ethtool_ops;
10876 dev->features |= NETIF_F_SG;
10877 dev->features |= NETIF_F_HW_CSUM;
10878 if (bp->flags & USING_DAC_FLAG)
10879 dev->features |= NETIF_F_HIGHDMA;
10881 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10882 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10884 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10885 dev->features |= NETIF_F_TSO6;
10891 iounmap(bp->regview);
10892 bp->regview = NULL;
10894 if (bp->doorbells) {
10895 iounmap(bp->doorbells);
10896 bp->doorbells = NULL;
10900 if (atomic_read(&pdev->enable_cnt) == 1)
10901 pci_release_regions(pdev);
10904 pci_disable_device(pdev);
10905 pci_set_drvdata(pdev, NULL);
10911 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10913 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10915 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10919 /* return value of 1=2.5GHz 2=5GHz */
10920 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10922 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10924 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10928 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10929 const struct pci_device_id *ent)
10931 static int version_printed;
10932 struct net_device *dev = NULL;
10936 if (version_printed++ == 0)
10937 printk(KERN_INFO "%s", version);
10939 /* dev zeroed in init_etherdev */
10940 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10942 printk(KERN_ERR PFX "Cannot allocate net device\n");
10946 bp = netdev_priv(dev);
10947 bp->msglevel = debug;
10949 rc = bnx2x_init_dev(pdev, dev);
10955 pci_set_drvdata(pdev, dev);
10957 rc = bnx2x_init_bp(bp);
10959 goto init_one_exit;
10961 rc = register_netdev(dev);
10963 dev_err(&pdev->dev, "Cannot register net device\n");
10964 goto init_one_exit;
10967 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10968 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
10969 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10970 bnx2x_get_pcie_width(bp),
10971 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10972 dev->base_addr, bp->pdev->irq);
10973 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10978 iounmap(bp->regview);
10981 iounmap(bp->doorbells);
10985 if (atomic_read(&pdev->enable_cnt) == 1)
10986 pci_release_regions(pdev);
10988 pci_disable_device(pdev);
10989 pci_set_drvdata(pdev, NULL);
10994 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10996 struct net_device *dev = pci_get_drvdata(pdev);
11000 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11003 bp = netdev_priv(dev);
11005 unregister_netdev(dev);
11008 iounmap(bp->regview);
11011 iounmap(bp->doorbells);
11015 if (atomic_read(&pdev->enable_cnt) == 1)
11016 pci_release_regions(pdev);
11018 pci_disable_device(pdev);
11019 pci_set_drvdata(pdev, NULL);
11022 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11024 struct net_device *dev = pci_get_drvdata(pdev);
11028 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11031 bp = netdev_priv(dev);
11035 pci_save_state(pdev);
11037 if (!netif_running(dev)) {
11042 netif_device_detach(dev);
11044 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11046 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11053 static int bnx2x_resume(struct pci_dev *pdev)
11055 struct net_device *dev = pci_get_drvdata(pdev);
11060 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11063 bp = netdev_priv(dev);
11067 pci_restore_state(pdev);
11069 if (!netif_running(dev)) {
11074 bnx2x_set_power_state(bp, PCI_D0);
11075 netif_device_attach(dev);
11077 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11084 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11088 bp->state = BNX2X_STATE_ERROR;
11090 bp->rx_mode = BNX2X_RX_MODE_NONE;
11092 bnx2x_netif_stop(bp, 0);
11094 del_timer_sync(&bp->timer);
11095 bp->stats_state = STATS_STATE_DISABLED;
11096 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11099 bnx2x_free_irq(bp);
11101 if (CHIP_IS_E1(bp)) {
11102 struct mac_configuration_cmd *config =
11103 bnx2x_sp(bp, mcast_config);
11105 for (i = 0; i < config->hdr.length; i++)
11106 CAM_INVALIDATE(config->config_table[i]);
11109 /* Free SKBs, SGEs, TPA pool and driver internals */
11110 bnx2x_free_skbs(bp);
11111 for_each_rx_queue(bp, i)
11112 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11113 for_each_rx_queue(bp, i)
11114 netif_napi_del(&bnx2x_fp(bp, i, napi));
11115 bnx2x_free_mem(bp);
11117 bp->state = BNX2X_STATE_CLOSED;
11119 netif_carrier_off(bp->dev);
11124 static void bnx2x_eeh_recover(struct bnx2x *bp)
11128 mutex_init(&bp->port.phy_mutex);
11130 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11131 bp->link_params.shmem_base = bp->common.shmem_base;
11132 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11134 if (!bp->common.shmem_base ||
11135 (bp->common.shmem_base < 0xA0000) ||
11136 (bp->common.shmem_base >= 0xC0000)) {
11137 BNX2X_DEV_INFO("MCP not active\n");
11138 bp->flags |= NO_MCP_FLAG;
11142 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11143 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11144 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11145 BNX2X_ERR("BAD MCP validity signature\n");
11147 if (!BP_NOMCP(bp)) {
11148 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11149 & DRV_MSG_SEQ_NUMBER_MASK);
11150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11155 * bnx2x_io_error_detected - called when PCI error is detected
11156 * @pdev: Pointer to PCI device
11157 * @state: The current pci connection state
11159 * This function is called after a PCI bus error affecting
11160 * this device has been detected.
11162 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11163 pci_channel_state_t state)
11165 struct net_device *dev = pci_get_drvdata(pdev);
11166 struct bnx2x *bp = netdev_priv(dev);
11170 netif_device_detach(dev);
11172 if (netif_running(dev))
11173 bnx2x_eeh_nic_unload(bp);
11175 pci_disable_device(pdev);
11179 /* Request a slot reset */
11180 return PCI_ERS_RESULT_NEED_RESET;
11184 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11185 * @pdev: Pointer to PCI device
11187 * Restart the card from scratch, as if from a cold-boot.
11189 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11191 struct net_device *dev = pci_get_drvdata(pdev);
11192 struct bnx2x *bp = netdev_priv(dev);
11196 if (pci_enable_device(pdev)) {
11197 dev_err(&pdev->dev,
11198 "Cannot re-enable PCI device after reset\n");
11200 return PCI_ERS_RESULT_DISCONNECT;
11203 pci_set_master(pdev);
11204 pci_restore_state(pdev);
11206 if (netif_running(dev))
11207 bnx2x_set_power_state(bp, PCI_D0);
11211 return PCI_ERS_RESULT_RECOVERED;
11215 * bnx2x_io_resume - called when traffic can start flowing again
11216 * @pdev: Pointer to PCI device
11218 * This callback is called when the error recovery driver tells us that
11219 * its OK to resume normal operation.
11221 static void bnx2x_io_resume(struct pci_dev *pdev)
11223 struct net_device *dev = pci_get_drvdata(pdev);
11224 struct bnx2x *bp = netdev_priv(dev);
11228 bnx2x_eeh_recover(bp);
11230 if (netif_running(dev))
11231 bnx2x_nic_load(bp, LOAD_NORMAL);
11233 netif_device_attach(dev);
11238 static struct pci_error_handlers bnx2x_err_handler = {
11239 .error_detected = bnx2x_io_error_detected,
11240 .slot_reset = bnx2x_io_slot_reset,
11241 .resume = bnx2x_io_resume,
11244 static struct pci_driver bnx2x_pci_driver = {
11245 .name = DRV_MODULE_NAME,
11246 .id_table = bnx2x_pci_tbl,
11247 .probe = bnx2x_init_one,
11248 .remove = __devexit_p(bnx2x_remove_one),
11249 .suspend = bnx2x_suspend,
11250 .resume = bnx2x_resume,
11251 .err_handler = &bnx2x_err_handler,
11254 static int __init bnx2x_init(void)
11256 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11257 if (bnx2x_wq == NULL) {
11258 printk(KERN_ERR PFX "Cannot create workqueue\n");
11262 return pci_register_driver(&bnx2x_pci_driver);
11265 static void __exit bnx2x_cleanup(void)
11267 pci_unregister_driver(&bnx2x_pci_driver);
11269 destroy_workqueue(bnx2x_wq);
11272 module_init(bnx2x_init);
11273 module_exit(bnx2x_cleanup);