]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2x_main.c
bnx2x: Driver description update
[karo-tx-linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.23"
61 #define DRV_MODULE_RELDATE      "2008/11/03"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       SGE_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages >
1221             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284 #ifdef BCM_VLAN
1285                 int is_vlan_cqe =
1286                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287                          PARSING_FLAGS_VLAN);
1288                 int is_not_hwaccel_vlan_cqe =
1289                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290 #endif
1291
1292                 prefetch(skb);
1293                 prefetch(((char *)(skb)) + 128);
1294
1295 #ifdef BNX2X_STOP_ON_ERROR
1296                 if (pad + len > bp->rx_buf_size) {
1297                         BNX2X_ERR("skb_put is about to fail...  "
1298                                   "pad %d  len %d  rx_buf_size %d\n",
1299                                   pad, len, bp->rx_buf_size);
1300                         bnx2x_panic();
1301                         return;
1302                 }
1303 #endif
1304
1305                 skb_reserve(skb, pad);
1306                 skb_put(skb, len);
1307
1308                 skb->protocol = eth_type_trans(skb, bp->dev);
1309                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310
1311                 {
1312                         struct iphdr *iph;
1313
1314                         iph = (struct iphdr *)skb->data;
1315 #ifdef BCM_VLAN
1316                         /* If there is no Rx VLAN offloading -
1317                            take VLAN tag into an account */
1318                         if (unlikely(is_not_hwaccel_vlan_cqe))
1319                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320 #endif
1321                         iph->check = 0;
1322                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1323                 }
1324
1325                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326                                          &cqe->fast_path_cqe, cqe_idx)) {
1327 #ifdef BCM_VLAN
1328                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329                             (!is_not_hwaccel_vlan_cqe))
1330                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331                                                 le16_to_cpu(cqe->fast_path_cqe.
1332                                                             vlan_tag));
1333                         else
1334 #endif
1335                                 netif_receive_skb(skb);
1336                 } else {
1337                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338                            " - dropping packet!\n");
1339                         dev_kfree_skb(skb);
1340                 }
1341
1342
1343                 /* put new skb in bin */
1344                 fp->tpa_pool[queue].skb = new_skb;
1345
1346         } else {
1347                 /* else drop the packet and keep the buffer in the bin */
1348                 DP(NETIF_MSG_RX_STATUS,
1349                    "Failed to allocate new skb - dropping packet!\n");
1350                 bp->eth_stats.rx_skb_alloc_failed++;
1351         }
1352
1353         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1354 }
1355
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357                                         struct bnx2x_fastpath *fp,
1358                                         u16 bd_prod, u16 rx_comp_prod,
1359                                         u16 rx_sge_prod)
1360 {
1361         struct tstorm_eth_rx_producers rx_prods = {0};
1362         int i;
1363
1364         /* Update producers */
1365         rx_prods.bd_prod = bd_prod;
1366         rx_prods.cqe_prod = rx_comp_prod;
1367         rx_prods.sge_prod = rx_sge_prod;
1368
1369         /*
1370          * Make sure that the BD and SGE data is updated before updating the
1371          * producers since FW might read the BD/SGE right after the producer
1372          * is updated.
1373          * This is only applicable for weak-ordered memory model archs such
1374          * as IA-64. The following barrier is also mandatory since FW will
1375          * assumes BDs must have buffers.
1376          */
1377         wmb();
1378
1379         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382                        ((u32 *)&rx_prods)[i]);
1383
1384         mmiowb(); /* keep prod updates ordered */
1385
1386         DP(NETIF_MSG_RX_STATUS,
1387            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1388            bd_prod, rx_comp_prod, rx_sge_prod);
1389 }
1390
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1392 {
1393         struct bnx2x *bp = fp->bp;
1394         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396         int rx_pkt = 0;
1397
1398 #ifdef BNX2X_STOP_ON_ERROR
1399         if (unlikely(bp->panic))
1400                 return 0;
1401 #endif
1402
1403         /* CQ "next element" is of the size of the regular element,
1404            that's why it's ok here */
1405         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407                 hw_comp_cons++;
1408
1409         bd_cons = fp->rx_bd_cons;
1410         bd_prod = fp->rx_bd_prod;
1411         bd_prod_fw = bd_prod;
1412         sw_comp_cons = fp->rx_comp_cons;
1413         sw_comp_prod = fp->rx_comp_prod;
1414
1415         /* Memory barrier necessary as speculative reads of the rx
1416          * buffer can be ahead of the index in the status block
1417          */
1418         rmb();
1419
1420         DP(NETIF_MSG_RX_STATUS,
1421            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1422            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1423
1424         while (sw_comp_cons != hw_comp_cons) {
1425                 struct sw_rx_bd *rx_buf = NULL;
1426                 struct sk_buff *skb;
1427                 union eth_rx_cqe *cqe;
1428                 u8 cqe_fp_flags;
1429                 u16 len, pad;
1430
1431                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432                 bd_prod = RX_BD(bd_prod);
1433                 bd_cons = RX_BD(bd_cons);
1434
1435                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1437
1438                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1439                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1440                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1444
1445                 /* is this a slowpath msg? */
1446                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447                         bnx2x_sp_event(fp, cqe);
1448                         goto next_cqe;
1449
1450                 /* this is an rx packet */
1451                 } else {
1452                         rx_buf = &fp->rx_buf_ring[bd_cons];
1453                         skb = rx_buf->skb;
1454                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455                         pad = cqe->fast_path_cqe.placement_offset;
1456
1457                         /* If CQE is marked both TPA_START and TPA_END
1458                            it is a non-TPA CQE */
1459                         if ((!fp->disable_tpa) &&
1460                             (TPA_TYPE(cqe_fp_flags) !=
1461                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1462                                 u16 queue = cqe->fast_path_cqe.queue_index;
1463
1464                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465                                         DP(NETIF_MSG_RX_STATUS,
1466                                            "calling tpa_start on queue %d\n",
1467                                            queue);
1468
1469                                         bnx2x_tpa_start(fp, queue, skb,
1470                                                         bd_cons, bd_prod);
1471                                         goto next_rx;
1472                                 }
1473
1474                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475                                         DP(NETIF_MSG_RX_STATUS,
1476                                            "calling tpa_stop on queue %d\n",
1477                                            queue);
1478
1479                                         if (!BNX2X_RX_SUM_FIX(cqe))
1480                                                 BNX2X_ERR("STOP on none TCP "
1481                                                           "data\n");
1482
1483                                         /* This is a size of the linear data
1484                                            on this skb */
1485                                         len = le16_to_cpu(cqe->fast_path_cqe.
1486                                                                 len_on_bd);
1487                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1488                                                     len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1490                                         if (bp->panic)
1491                                                 return -EINVAL;
1492 #endif
1493
1494                                         bnx2x_update_sge_prod(fp,
1495                                                         &cqe->fast_path_cqe);
1496                                         goto next_cqe;
1497                                 }
1498                         }
1499
1500                         pci_dma_sync_single_for_device(bp->pdev,
1501                                         pci_unmap_addr(rx_buf, mapping),
1502                                                        pad + RX_COPY_THRESH,
1503                                                        PCI_DMA_FROMDEVICE);
1504                         prefetch(skb);
1505                         prefetch(((char *)(skb)) + 128);
1506
1507                         /* is this an error packet? */
1508                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509                                 DP(NETIF_MSG_RX_ERR,
1510                                    "ERROR  flags %x  rx packet %u\n",
1511                                    cqe_fp_flags, sw_comp_cons);
1512                                 bp->eth_stats.rx_err_discard_pkt++;
1513                                 goto reuse_rx;
1514                         }
1515
1516                         /* Since we don't have a jumbo ring
1517                          * copy small packets if mtu > 1500
1518                          */
1519                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520                             (len <= RX_COPY_THRESH)) {
1521                                 struct sk_buff *new_skb;
1522
1523                                 new_skb = netdev_alloc_skb(bp->dev,
1524                                                            len + pad);
1525                                 if (new_skb == NULL) {
1526                                         DP(NETIF_MSG_RX_ERR,
1527                                            "ERROR  packet dropped "
1528                                            "because of alloc failure\n");
1529                                         bp->eth_stats.rx_skb_alloc_failed++;
1530                                         goto reuse_rx;
1531                                 }
1532
1533                                 /* aligned copy */
1534                                 skb_copy_from_linear_data_offset(skb, pad,
1535                                                     new_skb->data + pad, len);
1536                                 skb_reserve(new_skb, pad);
1537                                 skb_put(new_skb, len);
1538
1539                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1540
1541                                 skb = new_skb;
1542
1543                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544                                 pci_unmap_single(bp->pdev,
1545                                         pci_unmap_addr(rx_buf, mapping),
1546                                                  bp->rx_buf_size,
1547                                                  PCI_DMA_FROMDEVICE);
1548                                 skb_reserve(skb, pad);
1549                                 skb_put(skb, len);
1550
1551                         } else {
1552                                 DP(NETIF_MSG_RX_ERR,
1553                                    "ERROR  packet dropped because "
1554                                    "of alloc failure\n");
1555                                 bp->eth_stats.rx_skb_alloc_failed++;
1556 reuse_rx:
1557                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558                                 goto next_rx;
1559                         }
1560
1561                         skb->protocol = eth_type_trans(skb, bp->dev);
1562
1563                         skb->ip_summed = CHECKSUM_NONE;
1564                         if (bp->rx_csum) {
1565                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1567                                 else
1568                                         bp->eth_stats.hw_csum_err++;
1569                         }
1570                 }
1571
1572 #ifdef BCM_VLAN
1573                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575                      PARSING_FLAGS_VLAN))
1576                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578                 else
1579 #endif
1580                         netif_receive_skb(skb);
1581
1582
1583 next_rx:
1584                 rx_buf->skb = NULL;
1585
1586                 bd_cons = NEXT_RX_IDX(bd_cons);
1587                 bd_prod = NEXT_RX_IDX(bd_prod);
1588                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589                 rx_pkt++;
1590 next_cqe:
1591                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593
1594                 if (rx_pkt == budget)
1595                         break;
1596         } /* while */
1597
1598         fp->rx_bd_cons = bd_cons;
1599         fp->rx_bd_prod = bd_prod_fw;
1600         fp->rx_comp_cons = sw_comp_cons;
1601         fp->rx_comp_prod = sw_comp_prod;
1602
1603         /* Update producers */
1604         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605                              fp->rx_sge_prod);
1606
1607         fp->rx_pkt += rx_pkt;
1608         fp->rx_calls++;
1609
1610         return rx_pkt;
1611 }
1612
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614 {
1615         struct bnx2x_fastpath *fp = fp_cookie;
1616         struct bnx2x *bp = fp->bp;
1617         int index = FP_IDX(fp);
1618
1619         /* Return here if interrupt is disabled */
1620         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622                 return IRQ_HANDLED;
1623         }
1624
1625         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626            index, FP_SB_ID(fp));
1627         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1628
1629 #ifdef BNX2X_STOP_ON_ERROR
1630         if (unlikely(bp->panic))
1631                 return IRQ_HANDLED;
1632 #endif
1633
1634         prefetch(fp->rx_cons_sb);
1635         prefetch(fp->tx_cons_sb);
1636         prefetch(&fp->status_blk->c_status_block.status_block_index);
1637         prefetch(&fp->status_blk->u_status_block.status_block_index);
1638
1639         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1640
1641         return IRQ_HANDLED;
1642 }
1643
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1645 {
1646         struct net_device *dev = dev_instance;
1647         struct bnx2x *bp = netdev_priv(dev);
1648         u16 status = bnx2x_ack_int(bp);
1649         u16 mask;
1650
1651         /* Return here if interrupt is shared and it's not for us */
1652         if (unlikely(status == 0)) {
1653                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654                 return IRQ_NONE;
1655         }
1656         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1657
1658         /* Return here if interrupt is disabled */
1659         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661                 return IRQ_HANDLED;
1662         }
1663
1664 #ifdef BNX2X_STOP_ON_ERROR
1665         if (unlikely(bp->panic))
1666                 return IRQ_HANDLED;
1667 #endif
1668
1669         mask = 0x2 << bp->fp[0].sb_id;
1670         if (status & mask) {
1671                 struct bnx2x_fastpath *fp = &bp->fp[0];
1672
1673                 prefetch(fp->rx_cons_sb);
1674                 prefetch(fp->tx_cons_sb);
1675                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1677
1678                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1679
1680                 status &= ~mask;
1681         }
1682
1683
1684         if (unlikely(status & 0x1)) {
1685                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1686
1687                 status &= ~0x1;
1688                 if (!status)
1689                         return IRQ_HANDLED;
1690         }
1691
1692         if (status)
1693                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694                    status);
1695
1696         return IRQ_HANDLED;
1697 }
1698
1699 /* end of fast path */
1700
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1702
1703 /* Link */
1704
1705 /*
1706  * General service functions
1707  */
1708
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1710 {
1711         u32 lock_status;
1712         u32 resource_bit = (1 << resource);
1713         int func = BP_FUNC(bp);
1714         u32 hw_lock_control_reg;
1715         int cnt;
1716
1717         /* Validating that the resource is within range */
1718         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719                 DP(NETIF_MSG_HW,
1720                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722                 return -EINVAL;
1723         }
1724
1725         if (func <= 5) {
1726                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727         } else {
1728                 hw_lock_control_reg =
1729                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1730         }
1731
1732         /* Validating that the resource is not already taken */
1733         lock_status = REG_RD(bp, hw_lock_control_reg);
1734         if (lock_status & resource_bit) {
1735                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1736                    lock_status, resource_bit);
1737                 return -EEXIST;
1738         }
1739
1740         /* Try for 5 second every 5ms */
1741         for (cnt = 0; cnt < 1000; cnt++) {
1742                 /* Try to acquire the lock */
1743                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744                 lock_status = REG_RD(bp, hw_lock_control_reg);
1745                 if (lock_status & resource_bit)
1746                         return 0;
1747
1748                 msleep(5);
1749         }
1750         DP(NETIF_MSG_HW, "Timeout\n");
1751         return -EAGAIN;
1752 }
1753
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1755 {
1756         u32 lock_status;
1757         u32 resource_bit = (1 << resource);
1758         int func = BP_FUNC(bp);
1759         u32 hw_lock_control_reg;
1760
1761         /* Validating that the resource is within range */
1762         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763                 DP(NETIF_MSG_HW,
1764                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766                 return -EINVAL;
1767         }
1768
1769         if (func <= 5) {
1770                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771         } else {
1772                 hw_lock_control_reg =
1773                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1774         }
1775
1776         /* Validating that the resource is currently taken */
1777         lock_status = REG_RD(bp, hw_lock_control_reg);
1778         if (!(lock_status & resource_bit)) {
1779                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1780                    lock_status, resource_bit);
1781                 return -EFAULT;
1782         }
1783
1784         REG_WR(bp, hw_lock_control_reg, resource_bit);
1785         return 0;
1786 }
1787
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1790 {
1791         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1792
1793         mutex_lock(&bp->port.phy_mutex);
1794
1795         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 }
1799
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1807
1808         mutex_unlock(&bp->port.phy_mutex);
1809 }
1810
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1812 {
1813         /* The GPIO should be swapped if swap register is set and active */
1814         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816         int gpio_shift = gpio_num +
1817                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818         u32 gpio_mask = (1 << gpio_shift);
1819         u32 gpio_reg;
1820
1821         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823                 return -EINVAL;
1824         }
1825
1826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827         /* read GPIO and mask except the float bits */
1828         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1829
1830         switch (mode) {
1831         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833                    gpio_num, gpio_shift);
1834                 /* clear FLOAT and set CLR */
1835                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837                 break;
1838
1839         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841                    gpio_num, gpio_shift);
1842                 /* clear FLOAT and set SET */
1843                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845                 break;
1846
1847         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849                    gpio_num, gpio_shift);
1850                 /* set FLOAT */
1851                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852                 break;
1853
1854         default:
1855                 break;
1856         }
1857
1858         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1860
1861         return 0;
1862 }
1863
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1865 {
1866         u32 spio_mask = (1 << spio_num);
1867         u32 spio_reg;
1868
1869         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870             (spio_num > MISC_REGISTERS_SPIO_7)) {
1871                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872                 return -EINVAL;
1873         }
1874
1875         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876         /* read SPIO and mask except the float bits */
1877         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1878
1879         switch (mode) {
1880         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882                 /* clear FLOAT and set CLR */
1883                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885                 break;
1886
1887         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889                 /* clear FLOAT and set SET */
1890                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892                 break;
1893
1894         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896                 /* set FLOAT */
1897                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898                 break;
1899
1900         default:
1901                 break;
1902         }
1903
1904         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1906
1907         return 0;
1908 }
1909
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1911 {
1912         switch (bp->link_vars.ieee_fc &
1913                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916                                           ADVERTISED_Pause);
1917                 break;
1918         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1920                                          ADVERTISED_Pause);
1921                 break;
1922         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1924                 break;
1925         default:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         }
1930 }
1931
1932 static void bnx2x_link_report(struct bnx2x *bp)
1933 {
1934         if (bp->link_vars.link_up) {
1935                 if (bp->state == BNX2X_STATE_OPEN)
1936                         netif_carrier_on(bp->dev);
1937                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1938
1939                 printk("%d Mbps ", bp->link_vars.line_speed);
1940
1941                 if (bp->link_vars.duplex == DUPLEX_FULL)
1942                         printk("full duplex");
1943                 else
1944                         printk("half duplex");
1945
1946                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948                                 printk(", receive ");
1949                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950                                         printk("& transmit ");
1951                         } else {
1952                                 printk(", transmit ");
1953                         }
1954                         printk("flow control ON");
1955                 }
1956                 printk("\n");
1957
1958         } else { /* link_down */
1959                 netif_carrier_off(bp->dev);
1960                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1961         }
1962 }
1963
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1965 {
1966         if (!BP_NOMCP(bp)) {
1967                 u8 rc;
1968
1969                 /* Initialize link parameters structure variables */
1970                 /* It is recommended to turn off RX FC for jumbo frames
1971                    for better performance */
1972                 if (IS_E1HMF(bp))
1973                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974                 else if (bp->dev->mtu > 5000)
1975                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1976                 else
1977                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1978
1979                 bnx2x_acquire_phy_lock(bp);
1980                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981                 bnx2x_release_phy_lock(bp);
1982
1983                 bnx2x_calc_fc_adv(bp);
1984
1985                 if (bp->link_vars.link_up)
1986                         bnx2x_link_report(bp);
1987
1988
1989                 return rc;
1990         }
1991         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992         return -EINVAL;
1993 }
1994
1995 static void bnx2x_link_set(struct bnx2x *bp)
1996 {
1997         if (!BP_NOMCP(bp)) {
1998                 bnx2x_acquire_phy_lock(bp);
1999                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003         } else
2004                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2005 }
2006
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2008 {
2009         if (!BP_NOMCP(bp)) {
2010                 bnx2x_acquire_phy_lock(bp);
2011                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012                 bnx2x_release_phy_lock(bp);
2013         } else
2014                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2015 }
2016
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2018 {
2019         u8 rc;
2020
2021         bnx2x_acquire_phy_lock(bp);
2022         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023         bnx2x_release_phy_lock(bp);
2024
2025         return rc;
2026 }
2027
2028 /* Calculates the sum of vn_min_rates.
2029    It's needed for further normalizing of the min_rates.
2030
2031    Returns:
2032      sum of vn_min_rates
2033        or
2034      0 - if all the min_rates are 0.
2035      In the later case fairness algorithm should be deactivated.
2036      If not all min_rates are zero then those that are zeroes will
2037      be set to 1.
2038  */
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2040 {
2041         int i, port = BP_PORT(bp);
2042         u32 wsum = 0;
2043         int all_zero = 1;
2044
2045         for (i = 0; i < E1HVN_MAX; i++) {
2046                 u32 vn_cfg =
2047                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051                         /* If min rate is zero - set it to 1 */
2052                         if (!vn_min_rate)
2053                                 vn_min_rate = DEF_MIN_RATE;
2054                         else
2055                                 all_zero = 0;
2056
2057                         wsum += vn_min_rate;
2058                 }
2059         }
2060
2061         /* ... only if all min rates are zeros - disable FAIRNESS */
2062         if (all_zero)
2063                 return 0;
2064
2065         return wsum;
2066 }
2067
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069                                    int en_fness,
2070                                    u16 port_rate,
2071                                    struct cmng_struct_per_port *m_cmng_port)
2072 {
2073         u32 r_param = port_rate / 8;
2074         int port = BP_PORT(bp);
2075         int i;
2076
2077         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2078
2079         /* Enable minmax only if we are in e1hmf mode */
2080         if (IS_E1HMF(bp)) {
2081                 u32 fair_periodic_timeout_usec;
2082                 u32 t_fair;
2083
2084                 /* Enable rate shaping and fairness */
2085                 m_cmng_port->flags.cmng_vn_enable = 1;
2086                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087                 m_cmng_port->flags.rate_shaping_enable = 1;
2088
2089                 if (!en_fness)
2090                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091                            "  fairness will be disabled\n");
2092
2093                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094                 m_cmng_port->rs_vars.rs_periodic_timeout =
2095                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2096
2097                 /* this is the threshold below which no timer arming will occur
2098                    1.25 coefficient is for the threshold to be a little bigger
2099                    than the real time, to compensate for timer in-accuracy */
2100                 m_cmng_port->rs_vars.rs_threshold =
2101                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2102
2103                 /* resolution of fairness timer */
2104                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106                 t_fair = T_FAIR_COEF / port_rate;
2107
2108                 /* this is the threshold below which we won't arm
2109                    the timer anymore */
2110                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2111
2112                 /* we multiply by 1e3/8 to get bytes/msec.
2113                    We don't want the credits to pass a credit
2114                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115                 m_cmng_port->fair_vars.upper_bound =
2116                                                 r_param * t_fair * FAIR_MEM;
2117                 /* since each tick is 4 usec */
2118                 m_cmng_port->fair_vars.fairness_timeout =
2119                                                 fair_periodic_timeout_usec / 4;
2120
2121         } else {
2122                 /* Disable rate shaping and fairness */
2123                 m_cmng_port->flags.cmng_vn_enable = 0;
2124                 m_cmng_port->flags.fairness_enable = 0;
2125                 m_cmng_port->flags.rate_shaping_enable = 0;
2126
2127                 DP(NETIF_MSG_IFUP,
2128                    "Single function mode  minmax will be disabled\n");
2129         }
2130
2131         /* Store it to internal memory */
2132         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135                        ((u32 *)(m_cmng_port))[i]);
2136 }
2137
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139                                    u32 wsum, u16 port_rate,
2140                                  struct cmng_struct_per_port *m_cmng_port)
2141 {
2142         struct rate_shaping_vars_per_vn m_rs_vn;
2143         struct fairness_vars_per_vn m_fair_vn;
2144         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145         u16 vn_min_rate, vn_max_rate;
2146         int i;
2147
2148         /* If function is hidden - set min and max to zeroes */
2149         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150                 vn_min_rate = 0;
2151                 vn_max_rate = 0;
2152
2153         } else {
2154                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157                    if current min rate is zero - set it to 1.
2158                    This is a requirement of the algorithm. */
2159                 if ((vn_min_rate == 0) && wsum)
2160                         vn_min_rate = DEF_MIN_RATE;
2161                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2163         }
2164
2165         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2166            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2167
2168         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2170
2171         /* global vn counter - maximal Mbps for this vn */
2172         m_rs_vn.vn_counter.rate = vn_max_rate;
2173
2174         /* quota - number of bytes transmitted in this period */
2175         m_rs_vn.vn_counter.quota =
2176                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2177
2178 #ifdef BNX2X_PER_PROT_QOS
2179         /* per protocol counter */
2180         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181                 /* maximal Mbps for this protocol */
2182                 m_rs_vn.protocol_counters[protocol].rate =
2183                                                 protocol_max_rate[protocol];
2184                 /* the quota in each timer period -
2185                    number of bytes transmitted in this period */
2186                 m_rs_vn.protocol_counters[protocol].quota =
2187                         (u32)(rs_periodic_timeout_usec *
2188                           ((double)m_rs_vn.
2189                                    protocol_counters[protocol].rate/8));
2190         }
2191 #endif
2192
2193         if (wsum) {
2194                 /* credit for each period of the fairness algorithm:
2195                    number of bytes in T_FAIR (the vn share the port rate).
2196                    wsum should not be larger than 10000, thus
2197                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198                 m_fair_vn.vn_credit_delta =
2199                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202                    m_fair_vn.vn_credit_delta);
2203         }
2204
2205 #ifdef BNX2X_PER_PROT_QOS
2206         do {
2207                 u32 protocolWeightSum = 0;
2208
2209                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210                         protocolWeightSum +=
2211                                         drvInit.protocol_min_rate[protocol];
2212                 /* per protocol counter -
2213                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214                 if (protocolWeightSum > 0) {
2215                         for (protocol = 0;
2216                              protocol < NUM_OF_PROTOCOLS; protocol++)
2217                                 /* credit for each period of the
2218                                    fairness algorithm - number of bytes in
2219                                    T_FAIR (the protocol share the vn rate) */
2220                                 m_fair_vn.protocol_credit_delta[protocol] =
2221                                         (u32)((vn_min_rate / 8) * t_fair *
2222                                         protocol_min_rate / protocolWeightSum);
2223                 }
2224         } while (0);
2225 #endif
2226
2227         /* Store it to internal memory */
2228         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231                        ((u32 *)(&m_rs_vn))[i]);
2232
2233         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236                        ((u32 *)(&m_fair_vn))[i]);
2237 }
2238
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2241 {
2242         int vn;
2243
2244         /* Make sure that we are synced with the current statistics */
2245         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2246
2247         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2248
2249         if (bp->link_vars.link_up) {
2250
2251                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252                         struct host_port_stats *pstats;
2253
2254                         pstats = bnx2x_sp(bp, port_stats);
2255                         /* reset old bmac stats */
2256                         memset(&(pstats->mac_stx[0]), 0,
2257                                sizeof(struct mac_stx));
2258                 }
2259                 if ((bp->state == BNX2X_STATE_OPEN) ||
2260                     (bp->state == BNX2X_STATE_DISABLED))
2261                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2262         }
2263
2264         /* indicate link status */
2265         bnx2x_link_report(bp);
2266
2267         if (IS_E1HMF(bp)) {
2268                 int func;
2269
2270                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271                         if (vn == BP_E1HVN(bp))
2272                                 continue;
2273
2274                         func = ((vn << 1) | BP_PORT(bp));
2275
2276                         /* Set the attention towards other drivers
2277                            on the same port */
2278                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2280                 }
2281         }
2282
2283         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284                 struct cmng_struct_per_port m_cmng_port;
2285                 u32 wsum;
2286                 int port = BP_PORT(bp);
2287
2288                 /* Init RATE SHAPING and FAIRNESS contexts */
2289                 wsum = bnx2x_calc_vn_wsum(bp);
2290                 bnx2x_init_port_minmax(bp, (int)wsum,
2291                                         bp->link_vars.line_speed,
2292                                         &m_cmng_port);
2293                 if (IS_E1HMF(bp))
2294                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296                                         wsum, bp->link_vars.line_speed,
2297                                                      &m_cmng_port);
2298         }
2299 }
2300
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2302 {
2303         if (bp->state != BNX2X_STATE_OPEN)
2304                 return;
2305
2306         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2307
2308         if (bp->link_vars.link_up)
2309                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310         else
2311                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2312
2313         /* indicate link status */
2314         bnx2x_link_report(bp);
2315 }
2316
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2318 {
2319         int port = BP_PORT(bp);
2320         u32 val;
2321
2322         bp->port.pmf = 1;
2323         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324
2325         /* enable nig attention */
2326         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2329
2330         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2331 }
2332
2333 /* end of Link */
2334
2335 /* slow path */
2336
2337 /*
2338  * General service functions
2339  */
2340
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343                          u32 data_hi, u32 data_lo, int common)
2344 {
2345         int func = BP_FUNC(bp);
2346
2347         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2349            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2352
2353 #ifdef BNX2X_STOP_ON_ERROR
2354         if (unlikely(bp->panic))
2355                 return -EIO;
2356 #endif
2357
2358         spin_lock_bh(&bp->spq_lock);
2359
2360         if (!bp->spq_left) {
2361                 BNX2X_ERR("BUG! SPQ ring full!\n");
2362                 spin_unlock_bh(&bp->spq_lock);
2363                 bnx2x_panic();
2364                 return -EBUSY;
2365         }
2366
2367         /* CID needs port number to be encoded int it */
2368         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370                                      HW_CID(bp, cid)));
2371         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372         if (common)
2373                 bp->spq_prod_bd->hdr.type |=
2374                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2375
2376         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2378
2379         bp->spq_left--;
2380
2381         if (bp->spq_prod_bd == bp->spq_last_bd) {
2382                 bp->spq_prod_bd = bp->spq;
2383                 bp->spq_prod_idx = 0;
2384                 DP(NETIF_MSG_TIMER, "end of spq\n");
2385
2386         } else {
2387                 bp->spq_prod_bd++;
2388                 bp->spq_prod_idx++;
2389         }
2390
2391         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2392                bp->spq_prod_idx);
2393
2394         spin_unlock_bh(&bp->spq_lock);
2395         return 0;
2396 }
2397
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2400 {
2401         u32 i, j, val;
2402         int rc = 0;
2403
2404         might_sleep();
2405         i = 100;
2406         for (j = 0; j < i*10; j++) {
2407                 val = (1UL << 31);
2408                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410                 if (val & (1L << 31))
2411                         break;
2412
2413                 msleep(5);
2414         }
2415         if (!(val & (1L << 31))) {
2416                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2417                 rc = -EBUSY;
2418         }
2419
2420         return rc;
2421 }
2422
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2425 {
2426         u32 val = 0;
2427
2428         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2429 }
2430
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2432 {
2433         struct host_def_status_block *def_sb = bp->def_status_blk;
2434         u16 rc = 0;
2435
2436         barrier(); /* status block is written to by the chip */
2437         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439                 rc |= 1;
2440         }
2441         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443                 rc |= 2;
2444         }
2445         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447                 rc |= 4;
2448         }
2449         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451                 rc |= 8;
2452         }
2453         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455                 rc |= 16;
2456         }
2457         return rc;
2458 }
2459
2460 /*
2461  * slow path service functions
2462  */
2463
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2465 {
2466         int port = BP_PORT(bp);
2467         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468                        COMMAND_REG_ATTN_BITS_SET);
2469         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472                                        NIG_REG_MASK_INTERRUPT_PORT0;
2473         u32 aeu_mask;
2474
2475         if (bp->attn_state & asserted)
2476                 BNX2X_ERR("IGU ERROR\n");
2477
2478         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479         aeu_mask = REG_RD(bp, aeu_addr);
2480
2481         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2482            aeu_mask, asserted);
2483         aeu_mask &= ~(asserted & 0xff);
2484         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2485
2486         REG_WR(bp, aeu_addr, aeu_mask);
2487         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2488
2489         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490         bp->attn_state |= asserted;
2491         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2492
2493         if (asserted & ATTN_HARD_WIRED_MASK) {
2494                 if (asserted & ATTN_NIG_FOR_FUNC) {
2495
2496                         bnx2x_acquire_phy_lock(bp);
2497
2498                         /* save nig interrupt mask */
2499                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500                         REG_WR(bp, nig_int_mask_addr, 0);
2501
2502                         bnx2x_link_attn(bp);
2503
2504                         /* handle unicore attn? */
2505                 }
2506                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2508
2509                 if (asserted & GPIO_2_FUNC)
2510                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2511
2512                 if (asserted & GPIO_3_FUNC)
2513                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2514
2515                 if (asserted & GPIO_4_FUNC)
2516                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2517
2518                 if (port == 0) {
2519                         if (asserted & ATTN_GENERAL_ATTN_1) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2522                         }
2523                         if (asserted & ATTN_GENERAL_ATTN_2) {
2524                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2526                         }
2527                         if (asserted & ATTN_GENERAL_ATTN_3) {
2528                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2530                         }
2531                 } else {
2532                         if (asserted & ATTN_GENERAL_ATTN_4) {
2533                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2535                         }
2536                         if (asserted & ATTN_GENERAL_ATTN_5) {
2537                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2539                         }
2540                         if (asserted & ATTN_GENERAL_ATTN_6) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2543                         }
2544                 }
2545
2546         } /* if hardwired */
2547
2548         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549            asserted, hc_addr);
2550         REG_WR(bp, hc_addr, asserted);
2551
2552         /* now set back the mask */
2553         if (asserted & ATTN_NIG_FOR_FUNC) {
2554                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555                 bnx2x_release_phy_lock(bp);
2556         }
2557 }
2558
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2560 {
2561         int port = BP_PORT(bp);
2562         int reg_offset;
2563         u32 val;
2564
2565         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2567
2568         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2569
2570                 val = REG_RD(bp, reg_offset);
2571                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572                 REG_WR(bp, reg_offset, val);
2573
2574                 BNX2X_ERR("SPIO5 hw attention\n");
2575
2576                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579                         /* Fan failure attention */
2580
2581                         /* The PHY reset is controlled by GPIO 1 */
2582                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584                         /* Low power mode is controlled by GPIO 2 */
2585                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587                         /* mark the failure */
2588                         bp->link_params.ext_phy_config &=
2589                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590                         bp->link_params.ext_phy_config |=
2591                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592                         SHMEM_WR(bp,
2593                                  dev_info.port_hw_config[port].
2594                                                         external_phy_config,
2595                                  bp->link_params.ext_phy_config);
2596                         /* log the failure */
2597                         printk(KERN_ERR PFX "Fan Failure on Network"
2598                                " Controller %s has caused the driver to"
2599                                " shutdown the card to prevent permanent"
2600                                " damage.  Please contact Dell Support for"
2601                                " assistance\n", bp->dev->name);
2602                         break;
2603
2604                 default:
2605                         break;
2606                 }
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2610
2611                 val = REG_RD(bp, reg_offset);
2612                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613                 REG_WR(bp, reg_offset, val);
2614
2615                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616                           (attn & HW_INTERRUT_ASSERT_SET_0));
2617                 bnx2x_panic();
2618         }
2619 }
2620
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2622 {
2623         u32 val;
2624
2625         if (attn & BNX2X_DOORQ_ASSERT) {
2626
2627                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629                 /* DORQ discard attention */
2630                 if (val & 0x2)
2631                         BNX2X_ERR("FATAL error from DORQ\n");
2632         }
2633
2634         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2635
2636                 int port = BP_PORT(bp);
2637                 int reg_offset;
2638
2639                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2641
2642                 val = REG_RD(bp, reg_offset);
2643                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644                 REG_WR(bp, reg_offset, val);
2645
2646                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647                           (attn & HW_INTERRUT_ASSERT_SET_1));
2648                 bnx2x_panic();
2649         }
2650 }
2651
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2653 {
2654         u32 val;
2655
2656         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2657
2658                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660                 /* CFC error attention */
2661                 if (val & 0x2)
2662                         BNX2X_ERR("FATAL error from CFC\n");
2663         }
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669                 /* RQ_USDMDP_FIFO_OVERFLOW */
2670                 if (val & 0x18000)
2671                         BNX2X_ERR("FATAL error from PXP\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_2));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2697
2698                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699                         int func = BP_FUNC(bp);
2700
2701                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702                         bnx2x__link_status_update(bp);
2703                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704                                                         DRV_STATUS_PMF)
2705                                 bnx2x_pmf_update(bp);
2706
2707                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2708
2709                         BNX2X_ERR("MC assert!\n");
2710                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714                         bnx2x_panic();
2715
2716                 } else if (attn & BNX2X_MCP_ASSERT) {
2717
2718                         BNX2X_ERR("MCP assert!\n");
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2720                         bnx2x_fw_dump(bp);
2721
2722                 } else
2723                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2724         }
2725
2726         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728                 if (attn & BNX2X_GRC_TIMEOUT) {
2729                         val = CHIP_IS_E1H(bp) ?
2730                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2732                 }
2733                 if (attn & BNX2X_GRC_RSV) {
2734                         val = CHIP_IS_E1H(bp) ?
2735                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2737                 }
2738                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2739         }
2740 }
2741
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2743 {
2744         struct attn_route attn;
2745         struct attn_route group_mask;
2746         int port = BP_PORT(bp);
2747         int index;
2748         u32 reg_addr;
2749         u32 val;
2750         u32 aeu_mask;
2751
2752         /* need to take HW lock because MCP or other port might also
2753            try to handle this event */
2754         bnx2x_acquire_alr(bp);
2755
2756         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2762
2763         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764                 if (deasserted & (1 << index)) {
2765                         group_mask = bp->attn_group[index];
2766
2767                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768                            index, group_mask.sig[0], group_mask.sig[1],
2769                            group_mask.sig[2], group_mask.sig[3]);
2770
2771                         bnx2x_attn_int_deasserted3(bp,
2772                                         attn.sig[3] & group_mask.sig[3]);
2773                         bnx2x_attn_int_deasserted1(bp,
2774                                         attn.sig[1] & group_mask.sig[1]);
2775                         bnx2x_attn_int_deasserted2(bp,
2776                                         attn.sig[2] & group_mask.sig[2]);
2777                         bnx2x_attn_int_deasserted0(bp,
2778                                         attn.sig[0] & group_mask.sig[0]);
2779
2780                         if ((attn.sig[0] & group_mask.sig[0] &
2781                                                 HW_PRTY_ASSERT_SET_0) ||
2782                             (attn.sig[1] & group_mask.sig[1] &
2783                                                 HW_PRTY_ASSERT_SET_1) ||
2784                             (attn.sig[2] & group_mask.sig[2] &
2785                                                 HW_PRTY_ASSERT_SET_2))
2786                                 BNX2X_ERR("FATAL HW block parity attention\n");
2787                 }
2788         }
2789
2790         bnx2x_release_alr(bp);
2791
2792         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2793
2794         val = ~deasserted;
2795         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796            val, reg_addr);
2797         REG_WR(bp, reg_addr, val);
2798
2799         if (~bp->attn_state & deasserted)
2800                 BNX2X_ERR("IGU ERROR\n");
2801
2802         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2804
2805         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806         aeu_mask = REG_RD(bp, reg_addr);
2807
2808         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2809            aeu_mask, deasserted);
2810         aeu_mask |= (deasserted & 0xff);
2811         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2812
2813         REG_WR(bp, reg_addr, aeu_mask);
2814         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2815
2816         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817         bp->attn_state &= ~deasserted;
2818         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819 }
2820
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2822 {
2823         /* read local copy of bits */
2824         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825                                                                 attn_bits);
2826         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827                                                                 attn_bits_ack);
2828         u32 attn_state = bp->attn_state;
2829
2830         /* look for changed bits */
2831         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2832         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2833
2834         DP(NETIF_MSG_HW,
2835            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2836            attn_bits, attn_ack, asserted, deasserted);
2837
2838         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839                 BNX2X_ERR("BAD attention state\n");
2840
2841         /* handle bits that were raised */
2842         if (asserted)
2843                 bnx2x_attn_int_asserted(bp, asserted);
2844
2845         if (deasserted)
2846                 bnx2x_attn_int_deasserted(bp, deasserted);
2847 }
2848
2849 static void bnx2x_sp_task(struct work_struct *work)
2850 {
2851         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2852         u16 status;
2853
2854
2855         /* Return here if interrupt is disabled */
2856         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2858                 return;
2859         }
2860
2861         status = bnx2x_update_dsb_idx(bp);
2862 /*      if (status == 0)                                     */
2863 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2864
2865         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2866
2867         /* HW attentions */
2868         if (status & 0x1)
2869                 bnx2x_attn_int(bp);
2870
2871         /* CStorm events: query_stats, port delete ramrod */
2872         if (status & 0x2)
2873                 bp->stats_pending = 0;
2874
2875         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2876                      IGU_INT_NOP, 1);
2877         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878                      IGU_INT_NOP, 1);
2879         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880                      IGU_INT_NOP, 1);
2881         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882                      IGU_INT_NOP, 1);
2883         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884                      IGU_INT_ENABLE, 1);
2885
2886 }
2887
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2889 {
2890         struct net_device *dev = dev_instance;
2891         struct bnx2x *bp = netdev_priv(dev);
2892
2893         /* Return here if interrupt is disabled */
2894         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2896                 return IRQ_HANDLED;
2897         }
2898
2899         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2900
2901 #ifdef BNX2X_STOP_ON_ERROR
2902         if (unlikely(bp->panic))
2903                 return IRQ_HANDLED;
2904 #endif
2905
2906         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2907
2908         return IRQ_HANDLED;
2909 }
2910
2911 /* end of slow path */
2912
2913 /* Statistics */
2914
2915 /****************************************************************************
2916 * Macros
2917 ****************************************************************************/
2918
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921         do { \
2922                 s_lo += a_lo; \
2923                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2924         } while (0)
2925
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928         do { \
2929                 if (m_lo < s_lo) { \
2930                         /* underflow */ \
2931                         d_hi = m_hi - s_hi; \
2932                         if (d_hi > 0) { \
2933                                 /* we can 'loan' 1 */ \
2934                                 d_hi--; \
2935                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2936                         } else { \
2937                                 /* m_hi <= s_hi */ \
2938                                 d_hi = 0; \
2939                                 d_lo = 0; \
2940                         } \
2941                 } else { \
2942                         /* m_lo >= s_lo */ \
2943                         if (m_hi < s_hi) { \
2944                                 d_hi = 0; \
2945                                 d_lo = 0; \
2946                         } else { \
2947                                 /* m_hi >= s_hi */ \
2948                                 d_hi = m_hi - s_hi; \
2949                                 d_lo = m_lo - s_lo; \
2950                         } \
2951                 } \
2952         } while (0)
2953
2954 #define UPDATE_STAT64(s, t) \
2955         do { \
2956                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961                        pstats->mac_stx[1].t##_lo, diff.lo); \
2962         } while (0)
2963
2964 #define UPDATE_STAT64_NIG(s, t) \
2965         do { \
2966                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967                         diff.lo, new->s##_lo, old->s##_lo); \
2968                 ADD_64(estats->t##_hi, diff.hi, \
2969                        estats->t##_lo, diff.lo); \
2970         } while (0)
2971
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2974         do { \
2975                 s_lo += a; \
2976                 s_hi += (s_lo < a) ? 1 : 0; \
2977         } while (0)
2978
2979 #define UPDATE_EXTEND_STAT(s) \
2980         do { \
2981                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982                               pstats->mac_stx[1].s##_lo, \
2983                               new->s); \
2984         } while (0)
2985
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2987         do { \
2988                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989                 old_tclient->s = le32_to_cpu(tclient->s); \
2990                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991         } while (0)
2992
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2994         do { \
2995                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996                 old_xclient->s = le32_to_cpu(xclient->s); \
2997                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2998         } while (0)
2999
3000 /*
3001  * General service functions
3002  */
3003
3004 static inline long bnx2x_hilo(u32 *hiref)
3005 {
3006         u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3008         u32 hi = *hiref;
3009
3010         return HILO_U64(hi, lo);
3011 #else
3012         return lo;
3013 #endif
3014 }
3015
3016 /*
3017  * Init service functions
3018  */
3019
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3021 {
3022         if (!bp->stats_pending) {
3023                 struct eth_query_ramrod_data ramrod_data = {0};
3024                 int rc;
3025
3026                 ramrod_data.drv_counter = bp->stats_counter++;
3027                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3029
3030                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031                                    ((u32 *)&ramrod_data)[1],
3032                                    ((u32 *)&ramrod_data)[0], 0);
3033                 if (rc == 0) {
3034                         /* stats ramrod has it's own slot on the spq */
3035                         bp->spq_left++;
3036                         bp->stats_pending = 1;
3037                 }
3038         }
3039 }
3040
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3042 {
3043         int port = BP_PORT(bp);
3044
3045         bp->executer_idx = 0;
3046         bp->stats_counter = 0;
3047
3048         /* port stats */
3049         if (!BP_NOMCP(bp))
3050                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051         else
3052                 bp->port.port_stx = 0;
3053         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3054
3055         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056         bp->port.old_nig_stats.brb_discard =
3057                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058         bp->port.old_nig_stats.brb_truncate =
3059                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3064
3065         /* function stats */
3066         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3070
3071         bp->stats_state = STATS_STATE_DISABLED;
3072         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3074 }
3075
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3077 {
3078         struct dmae_command *dmae = &bp->stats_dmae;
3079         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080
3081         *stats_comp = DMAE_COMP_VAL;
3082
3083         /* loader */
3084         if (bp->executer_idx) {
3085                 int loader_idx = PMF_DMAE_C(bp);
3086
3087                 memset(dmae, 0, sizeof(struct dmae_command));
3088
3089                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091                                 DMAE_CMD_DST_RESET |
3092 #ifdef __BIG_ENDIAN
3093                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094 #else
3095                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3096 #endif
3097                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098                                                DMAE_CMD_PORT_0) |
3099                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103                                      sizeof(struct dmae_command) *
3104                                      (loader_idx + 1)) >> 2;
3105                 dmae->dst_addr_hi = 0;
3106                 dmae->len = sizeof(struct dmae_command) >> 2;
3107                 if (CHIP_IS_E1(bp))
3108                         dmae->len--;
3109                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110                 dmae->comp_addr_hi = 0;
3111                 dmae->comp_val = 1;
3112
3113                 *stats_comp = 0;
3114                 bnx2x_post_dmae(bp, dmae, loader_idx);
3115
3116         } else if (bp->func_stx) {
3117                 *stats_comp = 0;
3118                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3119         }
3120 }
3121
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3123 {
3124         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125         int cnt = 10;
3126
3127         might_sleep();
3128         while (*stats_comp != DMAE_COMP_VAL) {
3129                 if (!cnt) {
3130                         BNX2X_ERR("timeout waiting for stats finished\n");
3131                         break;
3132                 }
3133                 cnt--;
3134                 msleep(1);
3135         }
3136         return 1;
3137 }
3138
3139 /*
3140  * Statistics service functions
3141  */
3142
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3144 {
3145         struct dmae_command *dmae;
3146         u32 opcode;
3147         int loader_idx = PMF_DMAE_C(bp);
3148         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3149
3150         /* sanity */
3151         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152                 BNX2X_ERR("BUG!\n");
3153                 return;
3154         }
3155
3156         bp->executer_idx = 0;
3157
3158         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159                   DMAE_CMD_C_ENABLE |
3160                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161 #ifdef __BIG_ENDIAN
3162                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163 #else
3164                   DMAE_CMD_ENDIANITY_DW_SWAP |
3165 #endif
3166                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3168
3169         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171         dmae->src_addr_lo = bp->port.port_stx >> 2;
3172         dmae->src_addr_hi = 0;
3173         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175         dmae->len = DMAE_LEN32_RD_MAX;
3176         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177         dmae->comp_addr_hi = 0;
3178         dmae->comp_val = 1;
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185                                    DMAE_LEN32_RD_MAX * 4);
3186         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187                                    DMAE_LEN32_RD_MAX * 4);
3188         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191         dmae->comp_val = DMAE_COMP_VAL;
3192
3193         *stats_comp = 0;
3194         bnx2x_hw_stats_post(bp);
3195         bnx2x_stats_comp(bp);
3196 }
3197
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3199 {
3200         struct dmae_command *dmae;
3201         int port = BP_PORT(bp);
3202         int vn = BP_E1HVN(bp);
3203         u32 opcode;
3204         int loader_idx = PMF_DMAE_C(bp);
3205         u32 mac_addr;
3206         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3207
3208         /* sanity */
3209         if (!bp->link_vars.link_up || !bp->port.pmf) {
3210                 BNX2X_ERR("BUG!\n");
3211                 return;
3212         }
3213
3214         bp->executer_idx = 0;
3215
3216         /* MCP */
3217         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3220 #ifdef __BIG_ENDIAN
3221                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3222 #else
3223                   DMAE_CMD_ENDIANITY_DW_SWAP |
3224 #endif
3225                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226                   (vn << DMAE_CMD_E1HVN_SHIFT));
3227
3228         if (bp->port.port_stx) {
3229
3230                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231                 dmae->opcode = opcode;
3232                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235                 dmae->dst_addr_hi = 0;
3236                 dmae->len = sizeof(struct host_port_stats) >> 2;
3237                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238                 dmae->comp_addr_hi = 0;
3239                 dmae->comp_val = 1;
3240         }
3241
3242         if (bp->func_stx) {
3243
3244                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245                 dmae->opcode = opcode;
3246                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248                 dmae->dst_addr_lo = bp->func_stx >> 2;
3249                 dmae->dst_addr_hi = 0;
3250                 dmae->len = sizeof(struct host_func_stats) >> 2;
3251                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252                 dmae->comp_addr_hi = 0;
3253                 dmae->comp_val = 1;
3254         }
3255
3256         /* MAC */
3257         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260 #ifdef __BIG_ENDIAN
3261                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262 #else
3263                   DMAE_CMD_ENDIANITY_DW_SWAP |
3264 #endif
3265                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266                   (vn << DMAE_CMD_E1HVN_SHIFT));
3267
3268         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3269
3270                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271                                    NIG_REG_INGRESS_BMAC0_MEM);
3272
3273                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3275                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276                 dmae->opcode = opcode;
3277                 dmae->src_addr_lo = (mac_addr +
3278                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279                 dmae->src_addr_hi = 0;
3280                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291                 dmae->opcode = opcode;
3292                 dmae->src_addr_lo = (mac_addr +
3293                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294                 dmae->src_addr_hi = 0;
3295                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302                 dmae->comp_addr_hi = 0;
3303                 dmae->comp_val = 1;
3304
3305         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3306
3307                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3308
3309                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311                 dmae->opcode = opcode;
3312                 dmae->src_addr_lo = (mac_addr +
3313                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314                 dmae->src_addr_hi = 0;
3315                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319                 dmae->comp_addr_hi = 0;
3320                 dmae->comp_val = 1;
3321
3322                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324                 dmae->opcode = opcode;
3325                 dmae->src_addr_lo = (mac_addr +
3326                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327                 dmae->src_addr_hi = 0;
3328                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3332                 dmae->len = 1;
3333                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334                 dmae->comp_addr_hi = 0;
3335                 dmae->comp_val = 1;
3336
3337                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339                 dmae->opcode = opcode;
3340                 dmae->src_addr_lo = (mac_addr +
3341                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342                 dmae->src_addr_hi = 0;
3343                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349                 dmae->comp_addr_hi = 0;
3350                 dmae->comp_val = 1;
3351         }
3352
3353         /* NIG */
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = opcode;
3356         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358         dmae->src_addr_hi = 0;
3359         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363         dmae->comp_addr_hi = 0;
3364         dmae->comp_val = 1;
3365
3366         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367         dmae->opcode = opcode;
3368         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370         dmae->src_addr_hi = 0;
3371         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375         dmae->len = (2*sizeof(u32)) >> 2;
3376         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377         dmae->comp_addr_hi = 0;
3378         dmae->comp_val = 1;
3379
3380         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384 #ifdef __BIG_ENDIAN
3385                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386 #else
3387                         DMAE_CMD_ENDIANITY_DW_SWAP |
3388 #endif
3389                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390                         (vn << DMAE_CMD_E1HVN_SHIFT));
3391         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393         dmae->src_addr_hi = 0;
3394         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398         dmae->len = (2*sizeof(u32)) >> 2;
3399         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401         dmae->comp_val = DMAE_COMP_VAL;
3402
3403         *stats_comp = 0;
3404 }
3405
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3407 {
3408         struct dmae_command *dmae = &bp->stats_dmae;
3409         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3410
3411         /* sanity */
3412         if (!bp->func_stx) {
3413                 BNX2X_ERR("BUG!\n");
3414                 return;
3415         }
3416
3417         bp->executer_idx = 0;
3418         memset(dmae, 0, sizeof(struct dmae_command));
3419
3420         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426                         DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432         dmae->dst_addr_lo = bp->func_stx >> 2;
3433         dmae->dst_addr_hi = 0;
3434         dmae->len = sizeof(struct host_func_stats) >> 2;
3435         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437         dmae->comp_val = DMAE_COMP_VAL;
3438
3439         *stats_comp = 0;
3440 }
3441
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3443 {
3444         if (bp->port.pmf)
3445                 bnx2x_port_stats_init(bp);
3446
3447         else if (bp->func_stx)
3448                 bnx2x_func_stats_init(bp);
3449
3450         bnx2x_hw_stats_post(bp);
3451         bnx2x_storm_stats_post(bp);
3452 }
3453
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3455 {
3456         bnx2x_stats_comp(bp);
3457         bnx2x_stats_pmf_update(bp);
3458         bnx2x_stats_start(bp);
3459 }
3460
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3462 {
3463         bnx2x_stats_comp(bp);
3464         bnx2x_stats_start(bp);
3465 }
3466
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3468 {
3469         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471         struct regpair diff;
3472
3473         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485         UPDATE_STAT64(tx_stat_gt127,
3486                                 tx_stat_etherstatspkts65octetsto127octets);
3487         UPDATE_STAT64(tx_stat_gt255,
3488                                 tx_stat_etherstatspkts128octetsto255octets);
3489         UPDATE_STAT64(tx_stat_gt511,
3490                                 tx_stat_etherstatspkts256octetsto511octets);
3491         UPDATE_STAT64(tx_stat_gt1023,
3492                                 tx_stat_etherstatspkts512octetsto1023octets);
3493         UPDATE_STAT64(tx_stat_gt1518,
3494                                 tx_stat_etherstatspkts1024octetsto1522octets);
3495         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499         UPDATE_STAT64(tx_stat_gterr,
3500                                 tx_stat_dot3statsinternalmactransmiterrors);
3501         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3502 }
3503
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3505 {
3506         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3508
3509         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3540 }
3541
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3543 {
3544         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545         struct nig_stats *old = &(bp->port.old_nig_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct regpair diff;
3549
3550         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551                 bnx2x_bmac_stats_update(bp);
3552
3553         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554                 bnx2x_emac_stats_update(bp);
3555
3556         else { /* unreached */
3557                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558                 return -1;
3559         }
3560
3561         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562                       new->brb_discard - old->brb_discard);
3563         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564                       new->brb_truncate - old->brb_truncate);
3565
3566         UPDATE_STAT64_NIG(egress_mac_pkt0,
3567                                         etherstatspkts1024octetsto1522octets);
3568         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3569
3570         memcpy(old, new, sizeof(struct nig_stats));
3571
3572         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573                sizeof(struct mac_stx));
3574         estats->brb_drop_hi = pstats->brb_drop_hi;
3575         estats->brb_drop_lo = pstats->brb_drop_lo;
3576
3577         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3578
3579         return 0;
3580 }
3581
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3583 {
3584         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585         int cl_id = BP_CL_ID(bp);
3586         struct tstorm_per_port_stats *tport =
3587                                 &stats->tstorm_common.port_statistics;
3588         struct tstorm_per_client_stats *tclient =
3589                         &stats->tstorm_common.client_statistics[cl_id];
3590         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591         struct xstorm_per_client_stats *xclient =
3592                         &stats->xstorm_common.client_statistics[cl_id];
3593         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596         u32 diff;
3597
3598         /* are storm stats valid? */
3599         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600                                                         bp->stats_counter) {
3601                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602                    "  tstorm counter (%d) != stats_counter (%d)\n",
3603                    tclient->stats_counter, bp->stats_counter);
3604                 return -1;
3605         }
3606         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607                                                         bp->stats_counter) {
3608                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609                    "  xstorm counter (%d) != stats_counter (%d)\n",
3610                    xclient->stats_counter, bp->stats_counter);
3611                 return -2;
3612         }
3613
3614         fstats->total_bytes_received_hi =
3615         fstats->valid_bytes_received_hi =
3616                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617         fstats->total_bytes_received_lo =
3618         fstats->valid_bytes_received_lo =
3619                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3620
3621         estats->error_bytes_received_hi =
3622                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623         estats->error_bytes_received_lo =
3624                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625         ADD_64(estats->error_bytes_received_hi,
3626                estats->rx_stat_ifhcinbadoctets_hi,
3627                estats->error_bytes_received_lo,
3628                estats->rx_stat_ifhcinbadoctets_lo);
3629
3630         ADD_64(fstats->total_bytes_received_hi,
3631                estats->error_bytes_received_hi,
3632                fstats->total_bytes_received_lo,
3633                estats->error_bytes_received_lo);
3634
3635         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637                                 total_multicast_packets_received);
3638         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639                                 total_broadcast_packets_received);
3640
3641         fstats->total_bytes_transmitted_hi =
3642                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3643         fstats->total_bytes_transmitted_lo =
3644                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3645
3646         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647                                 total_unicast_packets_transmitted);
3648         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649                                 total_multicast_packets_transmitted);
3650         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651                                 total_broadcast_packets_transmitted);
3652
3653         memcpy(estats, &(fstats->total_bytes_received_hi),
3654                sizeof(struct host_func_stats) - 2*sizeof(u32));
3655
3656         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658         estats->brb_truncate_discard =
3659                                 le32_to_cpu(tport->brb_truncate_discard);
3660         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3661
3662         old_tclient->rcv_unicast_bytes.hi =
3663                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664         old_tclient->rcv_unicast_bytes.lo =
3665                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666         old_tclient->rcv_broadcast_bytes.hi =
3667                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668         old_tclient->rcv_broadcast_bytes.lo =
3669                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670         old_tclient->rcv_multicast_bytes.hi =
3671                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672         old_tclient->rcv_multicast_bytes.lo =
3673                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3675
3676         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677         old_tclient->packets_too_big_discard =
3678                                 le32_to_cpu(tclient->packets_too_big_discard);
3679         estats->no_buff_discard =
3680         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3682
3683         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684         old_xclient->unicast_bytes_sent.hi =
3685                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686         old_xclient->unicast_bytes_sent.lo =
3687                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688         old_xclient->multicast_bytes_sent.hi =
3689                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690         old_xclient->multicast_bytes_sent.lo =
3691                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692         old_xclient->broadcast_bytes_sent.hi =
3693                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694         old_xclient->broadcast_bytes_sent.lo =
3695                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3696
3697         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3698
3699         return 0;
3700 }
3701
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3703 {
3704         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706         struct net_device_stats *nstats = &bp->dev->stats;
3707
3708         nstats->rx_packets =
3709                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3712
3713         nstats->tx_packets =
3714                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3717
3718         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3719
3720         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3721
3722         nstats->rx_dropped = old_tclient->checksum_discard +
3723                              estats->mac_discard;
3724         nstats->tx_dropped = 0;
3725
3726         nstats->multicast =
3727                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3728
3729         nstats->collisions =
3730                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3731                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732                         estats->tx_stat_dot3statslatecollisions_lo +
3733                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3734
3735         estats->jabber_packets_received =
3736                                 old_tclient->packets_too_big_discard +
3737                                 estats->rx_stat_dot3statsframestoolong_lo;
3738
3739         nstats->rx_length_errors =
3740                                 estats->rx_stat_etherstatsundersizepkts_lo +
3741                                 estats->jabber_packets_received;
3742         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746         nstats->rx_missed_errors = estats->xxoverflow_discard;
3747
3748         nstats->rx_errors = nstats->rx_length_errors +
3749                             nstats->rx_over_errors +
3750                             nstats->rx_crc_errors +
3751                             nstats->rx_frame_errors +
3752                             nstats->rx_fifo_errors +
3753                             nstats->rx_missed_errors;
3754
3755         nstats->tx_aborted_errors =
3756                         estats->tx_stat_dot3statslatecollisions_lo +
3757                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3758         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759         nstats->tx_fifo_errors = 0;
3760         nstats->tx_heartbeat_errors = 0;
3761         nstats->tx_window_errors = 0;
3762
3763         nstats->tx_errors = nstats->tx_aborted_errors +
3764                             nstats->tx_carrier_errors;
3765 }
3766
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3768 {
3769         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770         int update = 0;
3771
3772         if (*stats_comp != DMAE_COMP_VAL)
3773                 return;
3774
3775         if (bp->port.pmf)
3776                 update = (bnx2x_hw_stats_update(bp) == 0);
3777
3778         update |= (bnx2x_storm_stats_update(bp) == 0);
3779
3780         if (update)
3781                 bnx2x_net_stats_update(bp);
3782
3783         else {
3784                 if (bp->stats_pending) {
3785                         bp->stats_pending++;
3786                         if (bp->stats_pending == 3) {
3787                                 BNX2X_ERR("stats not updated for 3 times\n");
3788                                 bnx2x_panic();
3789                                 return;
3790                         }
3791                 }
3792         }
3793
3794         if (bp->msglevel & NETIF_MSG_TIMER) {
3795                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797                 struct net_device_stats *nstats = &bp->dev->stats;
3798                 int i;
3799
3800                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3802                                   "  tx pkt (%lx)\n",
3803                        bnx2x_tx_avail(bp->fp),
3804                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3806                                   "  rx pkt (%lx)\n",
3807                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808                              bp->fp->rx_comp_cons),
3809                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3811                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812                        estats->driver_xoff, estats->brb_drop_lo);
3813                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3814                         "packets_too_big_discard %u  no_buff_discard %u  "
3815                         "mac_discard %u  mac_filter_discard %u  "
3816                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3817                         "ttl0_discard %u\n",
3818                        old_tclient->checksum_discard,
3819                        old_tclient->packets_too_big_discard,
3820                        old_tclient->no_buff_discard, estats->mac_discard,
3821                        estats->mac_filter_discard, estats->xxoverflow_discard,
3822                        estats->brb_truncate_discard,
3823                        old_tclient->ttl0_discard);
3824
3825                 for_each_queue(bp, i) {
3826                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827                                bnx2x_fp(bp, i, tx_pkt),
3828                                bnx2x_fp(bp, i, rx_pkt),
3829                                bnx2x_fp(bp, i, rx_calls));
3830                 }
3831         }
3832
3833         bnx2x_hw_stats_post(bp);
3834         bnx2x_storm_stats_post(bp);
3835 }
3836
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3838 {
3839         struct dmae_command *dmae;
3840         u32 opcode;
3841         int loader_idx = PMF_DMAE_C(bp);
3842         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3843
3844         bp->executer_idx = 0;
3845
3846         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847                   DMAE_CMD_C_ENABLE |
3848                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3849 #ifdef __BIG_ENDIAN
3850                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3851 #else
3852                   DMAE_CMD_ENDIANITY_DW_SWAP |
3853 #endif
3854                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3856
3857         if (bp->port.port_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 if (bp->func_stx)
3861                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862                 else
3863                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867                 dmae->dst_addr_hi = 0;
3868                 dmae->len = sizeof(struct host_port_stats) >> 2;
3869                 if (bp->func_stx) {
3870                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871                         dmae->comp_addr_hi = 0;
3872                         dmae->comp_val = 1;
3873                 } else {
3874                         dmae->comp_addr_lo =
3875                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876                         dmae->comp_addr_hi =
3877                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878                         dmae->comp_val = DMAE_COMP_VAL;
3879
3880                         *stats_comp = 0;
3881                 }
3882         }
3883
3884         if (bp->func_stx) {
3885
3886                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890                 dmae->dst_addr_lo = bp->func_stx >> 2;
3891                 dmae->dst_addr_hi = 0;
3892                 dmae->len = sizeof(struct host_func_stats) >> 2;
3893                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895                 dmae->comp_val = DMAE_COMP_VAL;
3896
3897                 *stats_comp = 0;
3898         }
3899 }
3900
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3902 {
3903         int update = 0;
3904
3905         bnx2x_stats_comp(bp);
3906
3907         if (bp->port.pmf)
3908                 update = (bnx2x_hw_stats_update(bp) == 0);
3909
3910         update |= (bnx2x_storm_stats_update(bp) == 0);
3911
3912         if (update) {
3913                 bnx2x_net_stats_update(bp);
3914
3915                 if (bp->port.pmf)
3916                         bnx2x_port_stats_stop(bp);
3917
3918                 bnx2x_hw_stats_post(bp);
3919                 bnx2x_stats_comp(bp);
3920         }
3921 }
3922
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3924 {
3925 }
3926
3927 static const struct {
3928         void (*action)(struct bnx2x *bp);
3929         enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931 /* state        event   */
3932 {
3933 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3935 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3937 },
3938 {
3939 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3940 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3941 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3942 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3943 }
3944 };
3945
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3947 {
3948         enum bnx2x_stats_state state = bp->stats_state;
3949
3950         bnx2x_stats_stm[state][event].action(bp);
3951         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3952
3953         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955                    state, event, bp->stats_state);
3956 }
3957
3958 static void bnx2x_timer(unsigned long data)
3959 {
3960         struct bnx2x *bp = (struct bnx2x *) data;
3961
3962         if (!netif_running(bp->dev))
3963                 return;
3964
3965         if (atomic_read(&bp->intr_sem) != 0)
3966                 goto timer_restart;
3967
3968         if (poll) {
3969                 struct bnx2x_fastpath *fp = &bp->fp[0];
3970                 int rc;
3971
3972                 bnx2x_tx_int(fp, 1000);
3973                 rc = bnx2x_rx_int(fp, 1000);
3974         }
3975
3976         if (!BP_NOMCP(bp)) {
3977                 int func = BP_FUNC(bp);
3978                 u32 drv_pulse;
3979                 u32 mcp_pulse;
3980
3981                 ++bp->fw_drv_pulse_wr_seq;
3982                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983                 /* TBD - add SYSTEM_TIME */
3984                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3986
3987                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988                              MCP_PULSE_SEQ_MASK);
3989                 /* The delta between driver pulse and mcp response
3990                  * should be 1 (before mcp response) or 0 (after mcp response)
3991                  */
3992                 if ((drv_pulse != mcp_pulse) &&
3993                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994                         /* someone lost a heartbeat... */
3995                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996                                   drv_pulse, mcp_pulse);
3997                 }
3998         }
3999
4000         if ((bp->state == BNX2X_STATE_OPEN) ||
4001             (bp->state == BNX2X_STATE_DISABLED))
4002                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4003
4004 timer_restart:
4005         mod_timer(&bp->timer, jiffies + bp->current_interval);
4006 }
4007
4008 /* end of Statistics */
4009
4010 /* nic init */
4011
4012 /*
4013  * nic init service functions
4014  */
4015
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4017 {
4018         int port = BP_PORT(bp);
4019
4020         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022                         sizeof(struct ustorm_status_block)/4);
4023         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025                         sizeof(struct cstorm_status_block)/4);
4026 }
4027
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029                           dma_addr_t mapping, int sb_id)
4030 {
4031         int port = BP_PORT(bp);
4032         int func = BP_FUNC(bp);
4033         int index;
4034         u64 section;
4035
4036         /* USTORM */
4037         section = ((u64)mapping) + offsetof(struct host_status_block,
4038                                             u_status_block);
4039         sb->u_status_block.status_block_id = sb_id;
4040
4041         REG_WR(bp, BAR_USTRORM_INTMEM +
4042                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043         REG_WR(bp, BAR_USTRORM_INTMEM +
4044                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4045                U64_HI(section));
4046         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4048
4049         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4052
4053         /* CSTORM */
4054         section = ((u64)mapping) + offsetof(struct host_status_block,
4055                                             c_status_block);
4056         sb->c_status_block.status_block_id = sb_id;
4057
4058         REG_WR(bp, BAR_CSTRORM_INTMEM +
4059                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060         REG_WR(bp, BAR_CSTRORM_INTMEM +
4061                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4062                U64_HI(section));
4063         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4065
4066         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4069
4070         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4071 }
4072
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4074 {
4075         int func = BP_FUNC(bp);
4076
4077         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079                         sizeof(struct ustorm_def_status_block)/4);
4080         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082                         sizeof(struct cstorm_def_status_block)/4);
4083         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085                         sizeof(struct xstorm_def_status_block)/4);
4086         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088                         sizeof(struct tstorm_def_status_block)/4);
4089 }
4090
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092                               struct host_def_status_block *def_sb,
4093                               dma_addr_t mapping, int sb_id)
4094 {
4095         int port = BP_PORT(bp);
4096         int func = BP_FUNC(bp);
4097         int index, val, reg_offset;
4098         u64 section;
4099
4100         /* ATTN */
4101         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102                                             atten_status_block);
4103         def_sb->atten_status_block.status_block_id = sb_id;
4104
4105         bp->attn_state = 0;
4106
4107         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4109
4110         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111                 bp->attn_group[index].sig[0] = REG_RD(bp,
4112                                                      reg_offset + 0x10*index);
4113                 bp->attn_group[index].sig[1] = REG_RD(bp,
4114                                                reg_offset + 0x4 + 0x10*index);
4115                 bp->attn_group[index].sig[2] = REG_RD(bp,
4116                                                reg_offset + 0x8 + 0x10*index);
4117                 bp->attn_group[index].sig[3] = REG_RD(bp,
4118                                                reg_offset + 0xc + 0x10*index);
4119         }
4120
4121         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122                              HC_REG_ATTN_MSG0_ADDR_L);
4123
4124         REG_WR(bp, reg_offset, U64_LO(section));
4125         REG_WR(bp, reg_offset + 4, U64_HI(section));
4126
4127         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4128
4129         val = REG_RD(bp, reg_offset);
4130         val |= sb_id;
4131         REG_WR(bp, reg_offset, val);
4132
4133         /* USTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             u_def_status_block);
4136         def_sb->u_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_USTRORM_INTMEM +
4139                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_USTRORM_INTMEM +
4141                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* CSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             c_def_status_block);
4153         def_sb->c_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_CSTRORM_INTMEM +
4156                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_CSTRORM_INTMEM +
4158                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         /* TSTORM */
4168         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169                                             t_def_status_block);
4170         def_sb->t_def_status_block.status_block_id = sb_id;
4171
4172         REG_WR(bp, BAR_TSTRORM_INTMEM +
4173                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174         REG_WR(bp, BAR_TSTRORM_INTMEM +
4175                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176                U64_HI(section));
4177         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4179
4180         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4183
4184         /* XSTORM */
4185         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186                                             x_def_status_block);
4187         def_sb->x_def_status_block.status_block_id = sb_id;
4188
4189         REG_WR(bp, BAR_XSTRORM_INTMEM +
4190                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191         REG_WR(bp, BAR_XSTRORM_INTMEM +
4192                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193                U64_HI(section));
4194         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4196
4197         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4200
4201         bp->stats_pending = 0;
4202         bp->set_mac_pending = 0;
4203
4204         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4205 }
4206
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4208 {
4209         int port = BP_PORT(bp);
4210         int i;
4211
4212         for_each_queue(bp, i) {
4213                 int sb_id = bp->fp[i].sb_id;
4214
4215                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218                                                     U_SB_ETH_RX_CQ_INDEX),
4219                         bp->rx_ticks/12);
4220                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222                                                      U_SB_ETH_RX_CQ_INDEX),
4223                          bp->rx_ticks ? 0 : 1);
4224                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226                                                      U_SB_ETH_RX_BD_INDEX),
4227                          bp->rx_ticks ? 0 : 1);
4228
4229                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232                                                     C_SB_ETH_TX_CQ_INDEX),
4233                         bp->tx_ticks/12);
4234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236                                                      C_SB_ETH_TX_CQ_INDEX),
4237                          bp->tx_ticks ? 0 : 1);
4238         }
4239 }
4240
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242                                        struct bnx2x_fastpath *fp, int last)
4243 {
4244         int i;
4245
4246         for (i = 0; i < last; i++) {
4247                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248                 struct sk_buff *skb = rx_buf->skb;
4249
4250                 if (skb == NULL) {
4251                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252                         continue;
4253                 }
4254
4255                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256                         pci_unmap_single(bp->pdev,
4257                                          pci_unmap_addr(rx_buf, mapping),
4258                                          bp->rx_buf_size,
4259                                          PCI_DMA_FROMDEVICE);
4260
4261                 dev_kfree_skb(skb);
4262                 rx_buf->skb = NULL;
4263         }
4264 }
4265
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4267 {
4268         int func = BP_FUNC(bp);
4269         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4271         u16 ring_prod, cqe_ring_prod;
4272         int i, j;
4273
4274         bp->rx_buf_size = bp->dev->mtu;
4275         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276                 BCM_RX_ETH_PAYLOAD_ALIGN;
4277
4278         if (bp->flags & TPA_ENABLE_FLAG) {
4279                 DP(NETIF_MSG_IFUP,
4280                    "rx_buf_size %d  effective_mtu %d\n",
4281                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4282
4283                 for_each_queue(bp, j) {
4284                         struct bnx2x_fastpath *fp = &bp->fp[j];
4285
4286                         for (i = 0; i < max_agg_queues; i++) {
4287                                 fp->tpa_pool[i].skb =
4288                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289                                 if (!fp->tpa_pool[i].skb) {
4290                                         BNX2X_ERR("Failed to allocate TPA "
4291                                                   "skb pool for queue[%d] - "
4292                                                   "disabling TPA on this "
4293                                                   "queue!\n", j);
4294                                         bnx2x_free_tpa_pool(bp, fp, i);
4295                                         fp->disable_tpa = 1;
4296                                         break;
4297                                 }
4298                                 pci_unmap_addr_set((struct sw_rx_bd *)
4299                                                         &bp->fp->tpa_pool[i],
4300                                                    mapping, 0);
4301                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4302                         }
4303                 }
4304         }
4305
4306         for_each_queue(bp, j) {
4307                 struct bnx2x_fastpath *fp = &bp->fp[j];
4308
4309                 fp->rx_bd_cons = 0;
4310                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4312
4313                 /* "next page" elements initialization */
4314                 /* SGE ring */
4315                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316                         struct eth_rx_sge *sge;
4317
4318                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319                         sge->addr_hi =
4320                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322                         sge->addr_lo =
4323                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4325                 }
4326
4327                 bnx2x_init_sge_ring_bit_mask(fp);
4328
4329                 /* RX BD ring */
4330                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331                         struct eth_rx_bd *rx_bd;
4332
4333                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334                         rx_bd->addr_hi =
4335                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4337                         rx_bd->addr_lo =
4338                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4340                 }
4341
4342                 /* CQ ring */
4343                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344                         struct eth_rx_cqe_next_page *nextpg;
4345
4346                         nextpg = (struct eth_rx_cqe_next_page *)
4347                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348                         nextpg->addr_hi =
4349                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351                         nextpg->addr_lo =
4352                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4354                 }
4355
4356                 /* Allocate SGEs and initialize the ring elements */
4357                 for (i = 0, ring_prod = 0;
4358                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4359
4360                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361                                 BNX2X_ERR("was only able to allocate "
4362                                           "%d rx sges\n", i);
4363                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364                                 /* Cleanup already allocated elements */
4365                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367                                 fp->disable_tpa = 1;
4368                                 ring_prod = 0;
4369                                 break;
4370                         }
4371                         ring_prod = NEXT_SGE_IDX(ring_prod);
4372                 }
4373                 fp->rx_sge_prod = ring_prod;
4374
4375                 /* Allocate BDs and initialize BD ring */
4376                 fp->rx_comp_cons = 0;
4377                 cqe_ring_prod = ring_prod = 0;
4378                 for (i = 0; i < bp->rx_ring_size; i++) {
4379                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380                                 BNX2X_ERR("was only able to allocate "
4381                                           "%d rx skbs\n", i);
4382                                 bp->eth_stats.rx_skb_alloc_failed++;
4383                                 break;
4384                         }
4385                         ring_prod = NEXT_RX_IDX(ring_prod);
4386                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387                         WARN_ON(ring_prod <= i);
4388                 }
4389
4390                 fp->rx_bd_prod = ring_prod;
4391                 /* must not have more available CQEs than BDs */
4392                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393                                        cqe_ring_prod);
4394                 fp->rx_pkt = fp->rx_calls = 0;
4395
4396                 /* Warning!
4397                  * this will generate an interrupt (to the TSTORM)
4398                  * must only be done after chip is initialized
4399                  */
4400                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401                                      fp->rx_sge_prod);
4402                 if (j != 0)
4403                         continue;
4404
4405                 REG_WR(bp, BAR_USTRORM_INTMEM +
4406                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407                        U64_LO(fp->rx_comp_mapping));
4408                 REG_WR(bp, BAR_USTRORM_INTMEM +
4409                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410                        U64_HI(fp->rx_comp_mapping));
4411         }
4412 }
4413
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4415 {
4416         int i, j;
4417
4418         for_each_queue(bp, j) {
4419                 struct bnx2x_fastpath *fp = &bp->fp[j];
4420
4421                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422                         struct eth_tx_bd *tx_bd =
4423                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424
4425                         tx_bd->addr_hi =
4426                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428                         tx_bd->addr_lo =
4429                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4431                 }
4432
4433                 fp->tx_pkt_prod = 0;
4434                 fp->tx_pkt_cons = 0;
4435                 fp->tx_bd_prod = 0;
4436                 fp->tx_bd_cons = 0;
4437                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438                 fp->tx_pkt = 0;
4439         }
4440 }
4441
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4443 {
4444         int func = BP_FUNC(bp);
4445
4446         spin_lock_init(&bp->spq_lock);
4447
4448         bp->spq_left = MAX_SPQ_PENDING;
4449         bp->spq_prod_idx = 0;
4450         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451         bp->spq_prod_bd = bp->spq;
4452         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4453
4454         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455                U64_LO(bp->spq_mapping));
4456         REG_WR(bp,
4457                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458                U64_HI(bp->spq_mapping));
4459
4460         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4461                bp->spq_prod_idx);
4462 }
4463
4464 static void bnx2x_init_context(struct bnx2x *bp)
4465 {
4466         int i;
4467
4468         for_each_queue(bp, i) {
4469                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470                 struct bnx2x_fastpath *fp = &bp->fp[i];
4471                 u8 sb_id = FP_SB_ID(fp);
4472
4473                 context->xstorm_st_context.tx_bd_page_base_hi =
4474                                                 U64_HI(fp->tx_desc_mapping);
4475                 context->xstorm_st_context.tx_bd_page_base_lo =
4476                                                 U64_LO(fp->tx_desc_mapping);
4477                 context->xstorm_st_context.db_data_addr_hi =
4478                                                 U64_HI(fp->tx_prods_mapping);
4479                 context->xstorm_st_context.db_data_addr_lo =
4480                                                 U64_LO(fp->tx_prods_mapping);
4481                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4483
4484                 context->ustorm_st_context.common.sb_index_numbers =
4485                                                 BNX2X_RX_SB_INDEX_NUM;
4486                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487                 context->ustorm_st_context.common.status_block_id = sb_id;
4488                 context->ustorm_st_context.common.flags =
4489                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490                 context->ustorm_st_context.common.mc_alignment_size =
4491                         BCM_RX_ETH_PAYLOAD_ALIGN;
4492                 context->ustorm_st_context.common.bd_buff_size =
4493                                                 bp->rx_buf_size;
4494                 context->ustorm_st_context.common.bd_page_base_hi =
4495                                                 U64_HI(fp->rx_desc_mapping);
4496                 context->ustorm_st_context.common.bd_page_base_lo =
4497                                                 U64_LO(fp->rx_desc_mapping);
4498                 if (!fp->disable_tpa) {
4499                         context->ustorm_st_context.common.flags |=
4500                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502                         context->ustorm_st_context.common.sge_buff_size =
4503                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504                         context->ustorm_st_context.common.sge_page_base_hi =
4505                                                 U64_HI(fp->rx_sge_mapping);
4506                         context->ustorm_st_context.common.sge_page_base_lo =
4507                                                 U64_LO(fp->rx_sge_mapping);
4508                 }
4509
4510                 context->cstorm_st_context.sb_index_number =
4511                                                 C_SB_ETH_TX_CQ_INDEX;
4512                 context->cstorm_st_context.status_block_id = sb_id;
4513
4514                 context->xstorm_ag_context.cdu_reserved =
4515                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516                                                CDU_REGION_NUMBER_XCM_AG,
4517                                                ETH_CONNECTION_TYPE);
4518                 context->ustorm_ag_context.cdu_usage =
4519                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520                                                CDU_REGION_NUMBER_UCM_AG,
4521                                                ETH_CONNECTION_TYPE);
4522         }
4523 }
4524
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4526 {
4527         int func = BP_FUNC(bp);
4528         int i;
4529
4530         if (!is_multi(bp))
4531                 return;
4532
4533         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4537                         BP_CL_ID(bp) + (i % bp->num_queues));
4538 }
4539
4540 static void bnx2x_set_client_config(struct bnx2x *bp)
4541 {
4542         struct tstorm_eth_client_config tstorm_client = {0};
4543         int port = BP_PORT(bp);
4544         int i;
4545
4546         tstorm_client.mtu = bp->dev->mtu;
4547         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4548         tstorm_client.config_flags =
4549                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4550 #ifdef BCM_VLAN
4551         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4552                 tstorm_client.config_flags |=
4553                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4554                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4555         }
4556 #endif
4557
4558         if (bp->flags & TPA_ENABLE_FLAG) {
4559                 tstorm_client.max_sges_for_packet =
4560                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4561                 tstorm_client.max_sges_for_packet =
4562                         ((tstorm_client.max_sges_for_packet +
4563                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4564                         PAGES_PER_SGE_SHIFT;
4565
4566                 tstorm_client.config_flags |=
4567                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4568         }
4569
4570         for_each_queue(bp, i) {
4571                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4572                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4573                        ((u32 *)&tstorm_client)[0]);
4574                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4575                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4576                        ((u32 *)&tstorm_client)[1]);
4577         }
4578
4579         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4580            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4581 }
4582
4583 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4584 {
4585         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4586         int mode = bp->rx_mode;
4587         int mask = (1 << BP_L_ID(bp));
4588         int func = BP_FUNC(bp);
4589         int i;
4590
4591         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4592
4593         switch (mode) {
4594         case BNX2X_RX_MODE_NONE: /* no Rx */
4595                 tstorm_mac_filter.ucast_drop_all = mask;
4596                 tstorm_mac_filter.mcast_drop_all = mask;
4597                 tstorm_mac_filter.bcast_drop_all = mask;
4598                 break;
4599         case BNX2X_RX_MODE_NORMAL:
4600                 tstorm_mac_filter.bcast_accept_all = mask;
4601                 break;
4602         case BNX2X_RX_MODE_ALLMULTI:
4603                 tstorm_mac_filter.mcast_accept_all = mask;
4604                 tstorm_mac_filter.bcast_accept_all = mask;
4605                 break;
4606         case BNX2X_RX_MODE_PROMISC:
4607                 tstorm_mac_filter.ucast_accept_all = mask;
4608                 tstorm_mac_filter.mcast_accept_all = mask;
4609                 tstorm_mac_filter.bcast_accept_all = mask;
4610                 break;
4611         default:
4612                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4613                 break;
4614         }
4615
4616         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4617                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4618                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4619                        ((u32 *)&tstorm_mac_filter)[i]);
4620
4621 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4622                    ((u32 *)&tstorm_mac_filter)[i]); */
4623         }
4624
4625         if (mode != BNX2X_RX_MODE_NONE)
4626                 bnx2x_set_client_config(bp);
4627 }
4628
4629 static void bnx2x_init_internal_common(struct bnx2x *bp)
4630 {
4631         int i;
4632
4633         if (bp->flags & TPA_ENABLE_FLAG) {
4634                 struct tstorm_eth_tpa_exist tpa = {0};
4635
4636                 tpa.tpa_exist = 1;
4637
4638                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4639                        ((u32 *)&tpa)[0]);
4640                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4641                        ((u32 *)&tpa)[1]);
4642         }
4643
4644         /* Zero this manually as its initialization is
4645            currently missing in the initTool */
4646         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4647                 REG_WR(bp, BAR_USTRORM_INTMEM +
4648                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4649 }
4650
4651 static void bnx2x_init_internal_port(struct bnx2x *bp)
4652 {
4653         int port = BP_PORT(bp);
4654
4655         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4656         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4657         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659 }
4660
4661 static void bnx2x_init_internal_func(struct bnx2x *bp)
4662 {
4663         struct tstorm_eth_function_common_config tstorm_config = {0};
4664         struct stats_indication_flags stats_flags = {0};
4665         int port = BP_PORT(bp);
4666         int func = BP_FUNC(bp);
4667         int i;
4668         u16 max_agg_size;
4669
4670         if (is_multi(bp)) {
4671                 tstorm_config.config_flags = MULTI_FLAGS;
4672                 tstorm_config.rss_result_mask = MULTI_MASK;
4673         }
4674
4675         tstorm_config.leading_client_id = BP_L_ID(bp);
4676
4677         REG_WR(bp, BAR_TSTRORM_INTMEM +
4678                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4679                (*(u32 *)&tstorm_config));
4680
4681         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4682         bnx2x_set_storm_rx_mode(bp);
4683
4684         /* reset xstorm per client statistics */
4685         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4686                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4688                        i*4, 0);
4689         }
4690         /* reset tstorm per client statistics */
4691         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4692                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4694                        i*4, 0);
4695         }
4696
4697         /* Init statistics related context */
4698         stats_flags.collect_eth = 1;
4699
4700         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4701                ((u32 *)&stats_flags)[0]);
4702         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703                ((u32 *)&stats_flags)[1]);
4704
4705         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4706                ((u32 *)&stats_flags)[0]);
4707         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4708                ((u32 *)&stats_flags)[1]);
4709
4710         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4711                ((u32 *)&stats_flags)[0]);
4712         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4713                ((u32 *)&stats_flags)[1]);
4714
4715         REG_WR(bp, BAR_XSTRORM_INTMEM +
4716                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4717                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4718         REG_WR(bp, BAR_XSTRORM_INTMEM +
4719                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4720                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4721
4722         REG_WR(bp, BAR_TSTRORM_INTMEM +
4723                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4724                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4725         REG_WR(bp, BAR_TSTRORM_INTMEM +
4726                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4727                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4728
4729         if (CHIP_IS_E1H(bp)) {
4730                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4731                         IS_E1HMF(bp));
4732                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4733                         IS_E1HMF(bp));
4734                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4735                         IS_E1HMF(bp));
4736                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4737                         IS_E1HMF(bp));
4738
4739                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4740                          bp->e1hov);
4741         }
4742
4743         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4744         max_agg_size =
4745                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4746                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4747                     (u32)0xffff);
4748         for_each_queue(bp, i) {
4749                 struct bnx2x_fastpath *fp = &bp->fp[i];
4750
4751                 REG_WR(bp, BAR_USTRORM_INTMEM +
4752                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4753                        U64_LO(fp->rx_comp_mapping));
4754                 REG_WR(bp, BAR_USTRORM_INTMEM +
4755                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4756                        U64_HI(fp->rx_comp_mapping));
4757
4758                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4759                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4760                          max_agg_size);
4761         }
4762 }
4763
4764 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4765 {
4766         switch (load_code) {
4767         case FW_MSG_CODE_DRV_LOAD_COMMON:
4768                 bnx2x_init_internal_common(bp);
4769                 /* no break */
4770
4771         case FW_MSG_CODE_DRV_LOAD_PORT:
4772                 bnx2x_init_internal_port(bp);
4773                 /* no break */
4774
4775         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4776                 bnx2x_init_internal_func(bp);
4777                 break;
4778
4779         default:
4780                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4781                 break;
4782         }
4783 }
4784
4785 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4786 {
4787         int i;
4788
4789         for_each_queue(bp, i) {
4790                 struct bnx2x_fastpath *fp = &bp->fp[i];
4791
4792                 fp->bp = bp;
4793                 fp->state = BNX2X_FP_STATE_CLOSED;
4794                 fp->index = i;
4795                 fp->cl_id = BP_L_ID(bp) + i;
4796                 fp->sb_id = fp->cl_id;
4797                 DP(NETIF_MSG_IFUP,
4798                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4799                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4800                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4801                               FP_SB_ID(fp));
4802                 bnx2x_update_fpsb_idx(fp);
4803         }
4804
4805         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4806                           DEF_SB_ID);
4807         bnx2x_update_dsb_idx(bp);
4808         bnx2x_update_coalesce(bp);
4809         bnx2x_init_rx_rings(bp);
4810         bnx2x_init_tx_ring(bp);
4811         bnx2x_init_sp_ring(bp);
4812         bnx2x_init_context(bp);
4813         bnx2x_init_internal(bp, load_code);
4814         bnx2x_init_ind_table(bp);
4815         bnx2x_stats_init(bp);
4816
4817         /* At this point, we are ready for interrupts */
4818         atomic_set(&bp->intr_sem, 0);
4819
4820         /* flush all before enabling interrupts */
4821         mb();
4822         mmiowb();
4823
4824         bnx2x_int_enable(bp);
4825 }
4826
4827 /* end of nic init */
4828
4829 /*
4830  * gzip service functions
4831  */
4832
4833 static int bnx2x_gunzip_init(struct bnx2x *bp)
4834 {
4835         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4836                                               &bp->gunzip_mapping);
4837         if (bp->gunzip_buf  == NULL)
4838                 goto gunzip_nomem1;
4839
4840         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4841         if (bp->strm  == NULL)
4842                 goto gunzip_nomem2;
4843
4844         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4845                                       GFP_KERNEL);
4846         if (bp->strm->workspace == NULL)
4847                 goto gunzip_nomem3;
4848
4849         return 0;
4850
4851 gunzip_nomem3:
4852         kfree(bp->strm);
4853         bp->strm = NULL;
4854
4855 gunzip_nomem2:
4856         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4857                             bp->gunzip_mapping);
4858         bp->gunzip_buf = NULL;
4859
4860 gunzip_nomem1:
4861         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4862                " un-compression\n", bp->dev->name);
4863         return -ENOMEM;
4864 }
4865
4866 static void bnx2x_gunzip_end(struct bnx2x *bp)
4867 {
4868         kfree(bp->strm->workspace);
4869
4870         kfree(bp->strm);
4871         bp->strm = NULL;
4872
4873         if (bp->gunzip_buf) {
4874                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4875                                     bp->gunzip_mapping);
4876                 bp->gunzip_buf = NULL;
4877         }
4878 }
4879
4880 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4881 {
4882         int n, rc;
4883
4884         /* check gzip header */
4885         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4886                 return -EINVAL;
4887
4888         n = 10;
4889
4890 #define FNAME                           0x8
4891
4892         if (zbuf[3] & FNAME)
4893                 while ((zbuf[n++] != 0) && (n < len));
4894
4895         bp->strm->next_in = zbuf + n;
4896         bp->strm->avail_in = len - n;
4897         bp->strm->next_out = bp->gunzip_buf;
4898         bp->strm->avail_out = FW_BUF_SIZE;
4899
4900         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4901         if (rc != Z_OK)
4902                 return rc;
4903
4904         rc = zlib_inflate(bp->strm, Z_FINISH);
4905         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4906                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4907                        bp->dev->name, bp->strm->msg);
4908
4909         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4910         if (bp->gunzip_outlen & 0x3)
4911                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4912                                     " gunzip_outlen (%d) not aligned\n",
4913                        bp->dev->name, bp->gunzip_outlen);
4914         bp->gunzip_outlen >>= 2;
4915
4916         zlib_inflateEnd(bp->strm);
4917
4918         if (rc == Z_STREAM_END)
4919                 return 0;
4920
4921         return rc;
4922 }
4923
4924 /* nic load/unload */
4925
4926 /*
4927  * General service functions
4928  */
4929
4930 /* send a NIG loopback debug packet */
4931 static void bnx2x_lb_pckt(struct bnx2x *bp)
4932 {
4933         u32 wb_write[3];
4934
4935         /* Ethernet source and destination addresses */
4936         wb_write[0] = 0x55555555;
4937         wb_write[1] = 0x55555555;
4938         wb_write[2] = 0x20;             /* SOP */
4939         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4940
4941         /* NON-IP protocol */
4942         wb_write[0] = 0x09000000;
4943         wb_write[1] = 0x55555555;
4944         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4945         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4946 }
4947
4948 /* some of the internal memories
4949  * are not directly readable from the driver
4950  * to test them we send debug packets
4951  */
4952 static int bnx2x_int_mem_test(struct bnx2x *bp)
4953 {
4954         int factor;
4955         int count, i;
4956         u32 val = 0;
4957
4958         if (CHIP_REV_IS_FPGA(bp))
4959                 factor = 120;
4960         else if (CHIP_REV_IS_EMUL(bp))
4961                 factor = 200;
4962         else
4963                 factor = 1;
4964
4965         DP(NETIF_MSG_HW, "start part1\n");
4966
4967         /* Disable inputs of parser neighbor blocks */
4968         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4969         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4970         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4971         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4972
4973         /*  Write 0 to parser credits for CFC search request */
4974         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4975
4976         /* send Ethernet packet */
4977         bnx2x_lb_pckt(bp);
4978
4979         /* TODO do i reset NIG statistic? */
4980         /* Wait until NIG register shows 1 packet of size 0x10 */
4981         count = 1000 * factor;
4982         while (count) {
4983
4984                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4985                 val = *bnx2x_sp(bp, wb_data[0]);
4986                 if (val == 0x10)
4987                         break;
4988
4989                 msleep(10);
4990                 count--;
4991         }
4992         if (val != 0x10) {
4993                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4994                 return -1;
4995         }
4996
4997         /* Wait until PRS register shows 1 packet */
4998         count = 1000 * factor;
4999         while (count) {
5000                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5001                 if (val == 1)
5002                         break;
5003
5004                 msleep(10);
5005                 count--;
5006         }
5007         if (val != 0x1) {
5008                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5009                 return -2;
5010         }
5011
5012         /* Reset and init BRB, PRS */
5013         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5014         msleep(50);
5015         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5016         msleep(50);
5017         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5018         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5019
5020         DP(NETIF_MSG_HW, "part2\n");
5021
5022         /* Disable inputs of parser neighbor blocks */
5023         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5024         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5025         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5026         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5027
5028         /* Write 0 to parser credits for CFC search request */
5029         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5030
5031         /* send 10 Ethernet packets */
5032         for (i = 0; i < 10; i++)
5033                 bnx2x_lb_pckt(bp);
5034
5035         /* Wait until NIG register shows 10 + 1
5036            packets of size 11*0x10 = 0xb0 */
5037         count = 1000 * factor;
5038         while (count) {
5039
5040                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5041                 val = *bnx2x_sp(bp, wb_data[0]);
5042                 if (val == 0xb0)
5043                         break;
5044
5045                 msleep(10);
5046                 count--;
5047         }
5048         if (val != 0xb0) {
5049                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5050                 return -3;
5051         }
5052
5053         /* Wait until PRS register shows 2 packets */
5054         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5055         if (val != 2)
5056                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5057
5058         /* Write 1 to parser credits for CFC search request */
5059         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5060
5061         /* Wait until PRS register shows 3 packets */
5062         msleep(10 * factor);
5063         /* Wait until NIG register shows 1 packet of size 0x10 */
5064         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5065         if (val != 3)
5066                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5067
5068         /* clear NIG EOP FIFO */
5069         for (i = 0; i < 11; i++)
5070                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5071         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5072         if (val != 1) {
5073                 BNX2X_ERR("clear of NIG failed\n");
5074                 return -4;
5075         }
5076
5077         /* Reset and init BRB, PRS, NIG */
5078         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5079         msleep(50);
5080         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5081         msleep(50);
5082         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5083         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5084 #ifndef BCM_ISCSI
5085         /* set NIC mode */
5086         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5087 #endif
5088
5089         /* Enable inputs of parser neighbor blocks */
5090         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5091         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5092         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5093         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5094
5095         DP(NETIF_MSG_HW, "done\n");
5096
5097         return 0; /* OK */
5098 }
5099
5100 static void enable_blocks_attention(struct bnx2x *bp)
5101 {
5102         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5103         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5104         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5105         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5106         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5107         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5108         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5109         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5110         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5111 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5112 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5113         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5114         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5115         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5116 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5117 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5118         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5119         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5120         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5121         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5122 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5124         if (CHIP_REV_IS_FPGA(bp))
5125                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5126         else
5127                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5128         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5129         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5130         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5131 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5132 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5133         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5134         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5135 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5136         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5137 }
5138
5139
5140 static int bnx2x_init_common(struct bnx2x *bp)
5141 {
5142         u32 val, i;
5143
5144         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5145
5146         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5147         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5148
5149         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5150         if (CHIP_IS_E1H(bp))
5151                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5152
5153         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5154         msleep(30);
5155         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5156
5157         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5158         if (CHIP_IS_E1(bp)) {
5159                 /* enable HW interrupt from PXP on USDM overflow
5160                    bit 16 on INT_MASK_0 */
5161                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5162         }
5163
5164         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5165         bnx2x_init_pxp(bp);
5166
5167 #ifdef __BIG_ENDIAN
5168         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5169         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5170         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5171         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5172         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5173
5174 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5175         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5176         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5177         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5178         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5179 #endif
5180
5181         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5182 #ifdef BCM_ISCSI
5183         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5184         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5185         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5186 #endif
5187
5188         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5189                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5190
5191         /* let the HW do it's magic ... */
5192         msleep(100);
5193         /* finish PXP init */
5194         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5195         if (val != 1) {
5196                 BNX2X_ERR("PXP2 CFG failed\n");
5197                 return -EBUSY;
5198         }
5199         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5200         if (val != 1) {
5201                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5202                 return -EBUSY;
5203         }
5204
5205         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5206         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5207
5208         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5209
5210         /* clean the DMAE memory */
5211         bp->dmae_ready = 1;
5212         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5213
5214         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5215         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5216         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5217         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5218
5219         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5220         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5221         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5222         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5223
5224         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5225         /* soft reset pulse */
5226         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5227         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5228
5229 #ifdef BCM_ISCSI
5230         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5231 #endif
5232
5233         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5234         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5235         if (!CHIP_REV_IS_SLOW(bp)) {
5236                 /* enable hw interrupt from doorbell Q */
5237                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5238         }
5239
5240         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5241         if (CHIP_REV_IS_SLOW(bp)) {
5242                 /* fix for emulation and FPGA for no pause */
5243                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5244                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5245                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5246                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5247         }
5248
5249         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5250         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5251         /* set NIC mode */
5252         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5253         if (CHIP_IS_E1H(bp))
5254                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5255
5256         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5257         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5258         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5259         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5260
5261         if (CHIP_IS_E1H(bp)) {
5262                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5263                                 STORM_INTMEM_SIZE_E1H/2);
5264                 bnx2x_init_fill(bp,
5265                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5266                                 0, STORM_INTMEM_SIZE_E1H/2);
5267                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5268                                 STORM_INTMEM_SIZE_E1H/2);
5269                 bnx2x_init_fill(bp,
5270                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5271                                 0, STORM_INTMEM_SIZE_E1H/2);
5272                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5273                                 STORM_INTMEM_SIZE_E1H/2);
5274                 bnx2x_init_fill(bp,
5275                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5276                                 0, STORM_INTMEM_SIZE_E1H/2);
5277                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5278                                 STORM_INTMEM_SIZE_E1H/2);
5279                 bnx2x_init_fill(bp,
5280                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5281                                 0, STORM_INTMEM_SIZE_E1H/2);
5282         } else { /* E1 */
5283                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5284                                 STORM_INTMEM_SIZE_E1);
5285                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5286                                 STORM_INTMEM_SIZE_E1);
5287                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5288                                 STORM_INTMEM_SIZE_E1);
5289                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5290                                 STORM_INTMEM_SIZE_E1);
5291         }
5292
5293         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5294         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5295         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5296         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5297
5298         /* sync semi rtc */
5299         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5300                0x80000000);
5301         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5302                0x80000000);
5303
5304         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5305         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5306         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5307
5308         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5309         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5310                 REG_WR(bp, i, 0xc0cac01a);
5311                 /* TODO: replace with something meaningful */
5312         }
5313         if (CHIP_IS_E1H(bp))
5314                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5315         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5316
5317         if (sizeof(union cdu_context) != 1024)
5318                 /* we currently assume that a context is 1024 bytes */
5319                 printk(KERN_ALERT PFX "please adjust the size of"
5320                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5321
5322         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5323         val = (4 << 24) + (0 << 12) + 1024;
5324         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5325         if (CHIP_IS_E1(bp)) {
5326                 /* !!! fix pxp client crdit until excel update */
5327                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5328                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5329         }
5330
5331         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5332         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5333
5334         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5335         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5336
5337         /* PXPCS COMMON comes here */
5338         /* Reset PCIE errors for debug */
5339         REG_WR(bp, 0x2814, 0xffffffff);
5340         REG_WR(bp, 0x3820, 0xffffffff);
5341
5342         /* EMAC0 COMMON comes here */
5343         /* EMAC1 COMMON comes here */
5344         /* DBU COMMON comes here */
5345         /* DBG COMMON comes here */
5346
5347         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5348         if (CHIP_IS_E1H(bp)) {
5349                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5350                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5351         }
5352
5353         if (CHIP_REV_IS_SLOW(bp))
5354                 msleep(200);
5355
5356         /* finish CFC init */
5357         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5358         if (val != 1) {
5359                 BNX2X_ERR("CFC LL_INIT failed\n");
5360                 return -EBUSY;
5361         }
5362         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5363         if (val != 1) {
5364                 BNX2X_ERR("CFC AC_INIT failed\n");
5365                 return -EBUSY;
5366         }
5367         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5368         if (val != 1) {
5369                 BNX2X_ERR("CFC CAM_INIT failed\n");
5370                 return -EBUSY;
5371         }
5372         REG_WR(bp, CFC_REG_DEBUG0, 0);
5373
5374         /* read NIG statistic
5375            to see if this is our first up since powerup */
5376         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5377         val = *bnx2x_sp(bp, wb_data[0]);
5378
5379         /* do internal memory self test */
5380         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5381                 BNX2X_ERR("internal mem self test failed\n");
5382                 return -EBUSY;
5383         }
5384
5385         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5386         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5387         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5388                 /* Fan failure is indicated by SPIO 5 */
5389                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5390                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5391
5392                 /* set to active low mode */
5393                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5394                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5395                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5396                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5397
5398                 /* enable interrupt to signal the IGU */
5399                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5400                 val |= (1 << MISC_REGISTERS_SPIO_5);
5401                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5402                 break;
5403
5404         default:
5405                 break;
5406         }
5407
5408         /* clear PXP2 attentions */
5409         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5410
5411         enable_blocks_attention(bp);
5412
5413         if (!BP_NOMCP(bp)) {
5414                 bnx2x_acquire_phy_lock(bp);
5415                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5416                 bnx2x_release_phy_lock(bp);
5417         } else
5418                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5419
5420         return 0;
5421 }
5422
5423 static int bnx2x_init_port(struct bnx2x *bp)
5424 {
5425         int port = BP_PORT(bp);
5426         u32 val;
5427
5428         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5429
5430         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5431
5432         /* Port PXP comes here */
5433         /* Port PXP2 comes here */
5434 #ifdef BCM_ISCSI
5435         /* Port0  1
5436          * Port1  385 */
5437         i++;
5438         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5439         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5440         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5442
5443         /* Port0  2
5444          * Port1  386 */
5445         i++;
5446         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5447         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5448         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5450
5451         /* Port0  3
5452          * Port1  387 */
5453         i++;
5454         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5455         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5456         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5457         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5458 #endif
5459         /* Port CMs come here */
5460
5461         /* Port QM comes here */
5462 #ifdef BCM_ISCSI
5463         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5464         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5465
5466         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5467                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5468 #endif
5469         /* Port DQ comes here */
5470         /* Port BRB1 comes here */
5471         /* Port PRS comes here */
5472         /* Port TSDM comes here */
5473         /* Port CSDM comes here */
5474         /* Port USDM comes here */
5475         /* Port XSDM comes here */
5476         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5477                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5478         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5479                              port ? USEM_PORT1_END : USEM_PORT0_END);
5480         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5481                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5482         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5483                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5484         /* Port UPB comes here */
5485         /* Port XPB comes here */
5486
5487         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5488                              port ? PBF_PORT1_END : PBF_PORT0_END);
5489
5490         /* configure PBF to work without PAUSE mtu 9000 */
5491         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5492
5493         /* update threshold */
5494         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5495         /* update init credit */
5496         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5497
5498         /* probe changes */
5499         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5500         msleep(5);
5501         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5502
5503 #ifdef BCM_ISCSI
5504         /* tell the searcher where the T2 table is */
5505         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5506
5507         wb_write[0] = U64_LO(bp->t2_mapping);
5508         wb_write[1] = U64_HI(bp->t2_mapping);
5509         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5510         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5511         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5512         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5513
5514         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5515         /* Port SRCH comes here */
5516 #endif
5517         /* Port CDU comes here */
5518         /* Port CFC comes here */
5519
5520         if (CHIP_IS_E1(bp)) {
5521                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5522                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5523         }
5524         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5525                              port ? HC_PORT1_END : HC_PORT0_END);
5526
5527         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5528                                     MISC_AEU_PORT0_START,
5529                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5530         /* init aeu_mask_attn_func_0/1:
5531          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5532          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5533          *             bits 4-7 are used for "per vn group attention" */
5534         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5535                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5536
5537         /* Port PXPCS comes here */
5538         /* Port EMAC0 comes here */
5539         /* Port EMAC1 comes here */
5540         /* Port DBU comes here */
5541         /* Port DBG comes here */
5542         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5543                              port ? NIG_PORT1_END : NIG_PORT0_END);
5544
5545         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5546
5547         if (CHIP_IS_E1H(bp)) {
5548                 u32 wsum;
5549                 struct cmng_struct_per_port m_cmng_port;
5550                 int vn;
5551
5552                 /* 0x2 disable e1hov, 0x1 enable */
5553                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5554                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5555
5556                 /* Init RATE SHAPING and FAIRNESS contexts.
5557                    Initialize as if there is 10G link. */
5558                 wsum = bnx2x_calc_vn_wsum(bp);
5559                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5560                 if (IS_E1HMF(bp))
5561                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5562                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5563                                         wsum, 10000, &m_cmng_port);
5564         }
5565
5566         /* Port MCP comes here */
5567         /* Port DMAE comes here */
5568
5569         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5570         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5571         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5572                 /* add SPIO 5 to group 0 */
5573                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5574                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5575                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5576                 break;
5577
5578         default:
5579                 break;
5580         }
5581
5582         bnx2x__link_reset(bp);
5583
5584         return 0;
5585 }
5586
5587 #define ILT_PER_FUNC            (768/2)
5588 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5589 /* the phys address is shifted right 12 bits and has an added
5590    1=valid bit added to the 53rd bit
5591    then since this is a wide register(TM)
5592    we split it into two 32 bit writes
5593  */
5594 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5595 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5596 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5597 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5598
5599 #define CNIC_ILT_LINES          0
5600
5601 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5602 {
5603         int reg;
5604
5605         if (CHIP_IS_E1H(bp))
5606                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5607         else /* E1 */
5608                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5609
5610         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5611 }
5612
5613 static int bnx2x_init_func(struct bnx2x *bp)
5614 {
5615         int port = BP_PORT(bp);
5616         int func = BP_FUNC(bp);
5617         int i;
5618
5619         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5620
5621         i = FUNC_ILT_BASE(func);
5622
5623         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5624         if (CHIP_IS_E1H(bp)) {
5625                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5626                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5627         } else /* E1 */
5628                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5629                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5630
5631
5632         if (CHIP_IS_E1H(bp)) {
5633                 for (i = 0; i < 9; i++)
5634                         bnx2x_init_block(bp,
5635                                          cm_start[func][i], cm_end[func][i]);
5636
5637                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5638                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5639         }
5640
5641         /* HC init per function */
5642         if (CHIP_IS_E1H(bp)) {
5643                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5644
5645                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5646                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5647         }
5648         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5649
5650         if (CHIP_IS_E1H(bp))
5651                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5652
5653         /* Reset PCIE errors for debug */
5654         REG_WR(bp, 0x2114, 0xffffffff);
5655         REG_WR(bp, 0x2120, 0xffffffff);
5656
5657         return 0;
5658 }
5659
5660 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5661 {
5662         int i, rc = 0;
5663
5664         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5665            BP_FUNC(bp), load_code);
5666
5667         bp->dmae_ready = 0;
5668         mutex_init(&bp->dmae_mutex);
5669         bnx2x_gunzip_init(bp);
5670
5671         switch (load_code) {
5672         case FW_MSG_CODE_DRV_LOAD_COMMON:
5673                 rc = bnx2x_init_common(bp);
5674                 if (rc)
5675                         goto init_hw_err;
5676                 /* no break */
5677
5678         case FW_MSG_CODE_DRV_LOAD_PORT:
5679                 bp->dmae_ready = 1;
5680                 rc = bnx2x_init_port(bp);
5681                 if (rc)
5682                         goto init_hw_err;
5683                 /* no break */
5684
5685         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5686                 bp->dmae_ready = 1;
5687                 rc = bnx2x_init_func(bp);
5688                 if (rc)
5689                         goto init_hw_err;
5690                 break;
5691
5692         default:
5693                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5694                 break;
5695         }
5696
5697         if (!BP_NOMCP(bp)) {
5698                 int func = BP_FUNC(bp);
5699
5700                 bp->fw_drv_pulse_wr_seq =
5701                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5702                                  DRV_PULSE_SEQ_MASK);
5703                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5704                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5705                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5706         } else
5707                 bp->func_stx = 0;
5708
5709         /* this needs to be done before gunzip end */
5710         bnx2x_zero_def_sb(bp);
5711         for_each_queue(bp, i)
5712                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5713
5714 init_hw_err:
5715         bnx2x_gunzip_end(bp);
5716
5717         return rc;
5718 }
5719
5720 /* send the MCP a request, block until there is a reply */
5721 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5722 {
5723         int func = BP_FUNC(bp);
5724         u32 seq = ++bp->fw_seq;
5725         u32 rc = 0;
5726         u32 cnt = 1;
5727         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5728
5729         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5730         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5731
5732         do {
5733                 /* let the FW do it's magic ... */
5734                 msleep(delay);
5735
5736                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5737
5738                 /* Give the FW up to 2 second (200*10ms) */
5739         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5740
5741         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5742            cnt*delay, rc, seq);
5743
5744         /* is this a reply to our command? */
5745         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5746                 rc &= FW_MSG_CODE_MASK;
5747
5748         } else {
5749                 /* FW BUG! */
5750                 BNX2X_ERR("FW failed to respond!\n");
5751                 bnx2x_fw_dump(bp);
5752                 rc = 0;
5753         }
5754
5755         return rc;
5756 }
5757
5758 static void bnx2x_free_mem(struct bnx2x *bp)
5759 {
5760
5761 #define BNX2X_PCI_FREE(x, y, size) \
5762         do { \
5763                 if (x) { \
5764                         pci_free_consistent(bp->pdev, size, x, y); \
5765                         x = NULL; \
5766                         y = 0; \
5767                 } \
5768         } while (0)
5769
5770 #define BNX2X_FREE(x) \
5771         do { \
5772                 if (x) { \
5773                         vfree(x); \
5774                         x = NULL; \
5775                 } \
5776         } while (0)
5777
5778         int i;
5779
5780         /* fastpath */
5781         for_each_queue(bp, i) {
5782
5783                 /* Status blocks */
5784                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5785                                bnx2x_fp(bp, i, status_blk_mapping),
5786                                sizeof(struct host_status_block) +
5787                                sizeof(struct eth_tx_db_data));
5788
5789                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5790                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5791                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5792                                bnx2x_fp(bp, i, tx_desc_mapping),
5793                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5794
5795                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5796                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5797                                bnx2x_fp(bp, i, rx_desc_mapping),
5798                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5799
5800                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5801                                bnx2x_fp(bp, i, rx_comp_mapping),
5802                                sizeof(struct eth_fast_path_rx_cqe) *
5803                                NUM_RCQ_BD);
5804
5805                 /* SGE ring */
5806                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5807                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5808                                bnx2x_fp(bp, i, rx_sge_mapping),
5809                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5810         }
5811         /* end of fastpath */
5812
5813         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5814                        sizeof(struct host_def_status_block));
5815
5816         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5817                        sizeof(struct bnx2x_slowpath));
5818
5819 #ifdef BCM_ISCSI
5820         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5821         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5822         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5823         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5824 #endif
5825         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5826
5827 #undef BNX2X_PCI_FREE
5828 #undef BNX2X_KFREE
5829 }
5830
5831 static int bnx2x_alloc_mem(struct bnx2x *bp)
5832 {
5833
5834 #define BNX2X_PCI_ALLOC(x, y, size) \
5835         do { \
5836                 x = pci_alloc_consistent(bp->pdev, size, y); \
5837                 if (x == NULL) \
5838                         goto alloc_mem_err; \
5839                 memset(x, 0, size); \
5840         } while (0)
5841
5842 #define BNX2X_ALLOC(x, size) \
5843         do { \
5844                 x = vmalloc(size); \
5845                 if (x == NULL) \
5846                         goto alloc_mem_err; \
5847                 memset(x, 0, size); \
5848         } while (0)
5849
5850         int i;
5851
5852         /* fastpath */
5853         for_each_queue(bp, i) {
5854                 bnx2x_fp(bp, i, bp) = bp;
5855
5856                 /* Status blocks */
5857                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5858                                 &bnx2x_fp(bp, i, status_blk_mapping),
5859                                 sizeof(struct host_status_block) +
5860                                 sizeof(struct eth_tx_db_data));
5861
5862                 bnx2x_fp(bp, i, hw_tx_prods) =
5863                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5864
5865                 bnx2x_fp(bp, i, tx_prods_mapping) =
5866                                 bnx2x_fp(bp, i, status_blk_mapping) +
5867                                 sizeof(struct host_status_block);
5868
5869                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5870                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5871                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5872                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5873                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5874                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5875
5876                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5877                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5878                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5879                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5880                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5881
5882                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5883                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5884                                 sizeof(struct eth_fast_path_rx_cqe) *
5885                                 NUM_RCQ_BD);
5886
5887                 /* SGE ring */
5888                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5889                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5890                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5891                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5892                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5893         }
5894         /* end of fastpath */
5895
5896         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5897                         sizeof(struct host_def_status_block));
5898
5899         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5900                         sizeof(struct bnx2x_slowpath));
5901
5902 #ifdef BCM_ISCSI
5903         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5904
5905         /* Initialize T1 */
5906         for (i = 0; i < 64*1024; i += 64) {
5907                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5908                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5909         }
5910
5911         /* allocate searcher T2 table
5912            we allocate 1/4 of alloc num for T2
5913           (which is not entered into the ILT) */
5914         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5915
5916         /* Initialize T2 */
5917         for (i = 0; i < 16*1024; i += 64)
5918                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5919
5920         /* now fixup the last line in the block to point to the next block */
5921         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5922
5923         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5924         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5925
5926         /* QM queues (128*MAX_CONN) */
5927         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5928 #endif
5929
5930         /* Slow path ring */
5931         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5932
5933         return 0;
5934
5935 alloc_mem_err:
5936         bnx2x_free_mem(bp);
5937         return -ENOMEM;
5938
5939 #undef BNX2X_PCI_ALLOC
5940 #undef BNX2X_ALLOC
5941 }
5942
5943 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5944 {
5945         int i;
5946
5947         for_each_queue(bp, i) {
5948                 struct bnx2x_fastpath *fp = &bp->fp[i];
5949
5950                 u16 bd_cons = fp->tx_bd_cons;
5951                 u16 sw_prod = fp->tx_pkt_prod;
5952                 u16 sw_cons = fp->tx_pkt_cons;
5953
5954                 while (sw_cons != sw_prod) {
5955                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5956                         sw_cons++;
5957                 }
5958         }
5959 }
5960
5961 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5962 {
5963         int i, j;
5964
5965         for_each_queue(bp, j) {
5966                 struct bnx2x_fastpath *fp = &bp->fp[j];
5967
5968                 for (i = 0; i < NUM_RX_BD; i++) {
5969                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5970                         struct sk_buff *skb = rx_buf->skb;
5971
5972                         if (skb == NULL)
5973                                 continue;
5974
5975                         pci_unmap_single(bp->pdev,
5976                                          pci_unmap_addr(rx_buf, mapping),
5977                                          bp->rx_buf_size,
5978                                          PCI_DMA_FROMDEVICE);
5979
5980                         rx_buf->skb = NULL;
5981                         dev_kfree_skb(skb);
5982                 }
5983                 if (!fp->disable_tpa)
5984                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5985                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5986                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5987         }
5988 }
5989
5990 static void bnx2x_free_skbs(struct bnx2x *bp)
5991 {
5992         bnx2x_free_tx_skbs(bp);
5993         bnx2x_free_rx_skbs(bp);
5994 }
5995
5996 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5997 {
5998         int i, offset = 1;
5999
6000         free_irq(bp->msix_table[0].vector, bp->dev);
6001         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6002            bp->msix_table[0].vector);
6003
6004         for_each_queue(bp, i) {
6005                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6006                    "state %x\n", i, bp->msix_table[i + offset].vector,
6007                    bnx2x_fp(bp, i, state));
6008
6009                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6010                         BNX2X_ERR("IRQ of fp #%d being freed while "
6011                                   "state != closed\n", i);
6012
6013                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6014         }
6015 }
6016
6017 static void bnx2x_free_irq(struct bnx2x *bp)
6018 {
6019         if (bp->flags & USING_MSIX_FLAG) {
6020                 bnx2x_free_msix_irqs(bp);
6021                 pci_disable_msix(bp->pdev);
6022                 bp->flags &= ~USING_MSIX_FLAG;
6023
6024         } else
6025                 free_irq(bp->pdev->irq, bp->dev);
6026 }
6027
6028 static int bnx2x_enable_msix(struct bnx2x *bp)
6029 {
6030         int i, rc, offset;
6031
6032         bp->msix_table[0].entry = 0;
6033         offset = 1;
6034         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6035
6036         for_each_queue(bp, i) {
6037                 int igu_vec = offset + i + BP_L_ID(bp);
6038
6039                 bp->msix_table[i + offset].entry = igu_vec;
6040                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6041                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6042         }
6043
6044         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6045                              bp->num_queues + offset);
6046         if (rc) {
6047                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6048                 return -1;
6049         }
6050         bp->flags |= USING_MSIX_FLAG;
6051
6052         return 0;
6053 }
6054
6055 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6056 {
6057         int i, rc, offset = 1;
6058
6059         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6060                          bp->dev->name, bp->dev);
6061         if (rc) {
6062                 BNX2X_ERR("request sp irq failed\n");
6063                 return -EBUSY;
6064         }
6065
6066         for_each_queue(bp, i) {
6067                 rc = request_irq(bp->msix_table[i + offset].vector,
6068                                  bnx2x_msix_fp_int, 0,
6069                                  bp->dev->name, &bp->fp[i]);
6070                 if (rc) {
6071                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6072                                   i + offset, -rc);
6073                         bnx2x_free_msix_irqs(bp);
6074                         return -EBUSY;
6075                 }
6076
6077                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6078         }
6079
6080         return 0;
6081 }
6082
6083 static int bnx2x_req_irq(struct bnx2x *bp)
6084 {
6085         int rc;
6086
6087         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6088                          bp->dev->name, bp->dev);
6089         if (!rc)
6090                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6091
6092         return rc;
6093 }
6094
6095 static void bnx2x_napi_enable(struct bnx2x *bp)
6096 {
6097         int i;
6098
6099         for_each_queue(bp, i)
6100                 napi_enable(&bnx2x_fp(bp, i, napi));
6101 }
6102
6103 static void bnx2x_napi_disable(struct bnx2x *bp)
6104 {
6105         int i;
6106
6107         for_each_queue(bp, i)
6108                 napi_disable(&bnx2x_fp(bp, i, napi));
6109 }
6110
6111 static void bnx2x_netif_start(struct bnx2x *bp)
6112 {
6113         if (atomic_dec_and_test(&bp->intr_sem)) {
6114                 if (netif_running(bp->dev)) {
6115                         if (bp->state == BNX2X_STATE_OPEN)
6116                                 netif_wake_queue(bp->dev);
6117                         bnx2x_napi_enable(bp);
6118                         bnx2x_int_enable(bp);
6119                 }
6120         }
6121 }
6122
6123 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6124 {
6125         bnx2x_int_disable_sync(bp, disable_hw);
6126         if (netif_running(bp->dev)) {
6127                 bnx2x_napi_disable(bp);
6128                 netif_tx_disable(bp->dev);
6129                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6130         }
6131 }
6132
6133 /*
6134  * Init service functions
6135  */
6136
6137 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6138 {
6139         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6140         int port = BP_PORT(bp);
6141
6142         /* CAM allocation
6143          * unicasts 0-31:port0 32-63:port1
6144          * multicast 64-127:port0 128-191:port1
6145          */
6146         config->hdr.length_6b = 2;
6147         config->hdr.offset = port ? 32 : 0;
6148         config->hdr.client_id = BP_CL_ID(bp);
6149         config->hdr.reserved1 = 0;
6150
6151         /* primary MAC */
6152         config->config_table[0].cam_entry.msb_mac_addr =
6153                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6154         config->config_table[0].cam_entry.middle_mac_addr =
6155                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6156         config->config_table[0].cam_entry.lsb_mac_addr =
6157                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6158         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6159         if (set)
6160                 config->config_table[0].target_table_entry.flags = 0;
6161         else
6162                 CAM_INVALIDATE(config->config_table[0]);
6163         config->config_table[0].target_table_entry.client_id = 0;
6164         config->config_table[0].target_table_entry.vlan_id = 0;
6165
6166         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6167            (set ? "setting" : "clearing"),
6168            config->config_table[0].cam_entry.msb_mac_addr,
6169            config->config_table[0].cam_entry.middle_mac_addr,
6170            config->config_table[0].cam_entry.lsb_mac_addr);
6171
6172         /* broadcast */
6173         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6174         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6175         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6176         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6177         if (set)
6178                 config->config_table[1].target_table_entry.flags =
6179                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6180         else
6181                 CAM_INVALIDATE(config->config_table[1]);
6182         config->config_table[1].target_table_entry.client_id = 0;
6183         config->config_table[1].target_table_entry.vlan_id = 0;
6184
6185         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6186                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6187                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6188 }
6189
6190 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6191 {
6192         struct mac_configuration_cmd_e1h *config =
6193                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6194
6195         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6196                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6197                 return;
6198         }
6199
6200         /* CAM allocation for E1H
6201          * unicasts: by func number
6202          * multicast: 20+FUNC*20, 20 each
6203          */
6204         config->hdr.length_6b = 1;
6205         config->hdr.offset = BP_FUNC(bp);
6206         config->hdr.client_id = BP_CL_ID(bp);
6207         config->hdr.reserved1 = 0;
6208
6209         /* primary MAC */
6210         config->config_table[0].msb_mac_addr =
6211                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6212         config->config_table[0].middle_mac_addr =
6213                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6214         config->config_table[0].lsb_mac_addr =
6215                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6216         config->config_table[0].client_id = BP_L_ID(bp);
6217         config->config_table[0].vlan_id = 0;
6218         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6219         if (set)
6220                 config->config_table[0].flags = BP_PORT(bp);
6221         else
6222                 config->config_table[0].flags =
6223                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6224
6225         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6226            (set ? "setting" : "clearing"),
6227            config->config_table[0].msb_mac_addr,
6228            config->config_table[0].middle_mac_addr,
6229            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6230
6231         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6232                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6233                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6234 }
6235
6236 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6237                              int *state_p, int poll)
6238 {
6239         /* can take a while if any port is running */
6240         int cnt = 500;
6241
6242         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6243            poll ? "polling" : "waiting", state, idx);
6244
6245         might_sleep();
6246         while (cnt--) {
6247                 if (poll) {
6248                         bnx2x_rx_int(bp->fp, 10);
6249                         /* if index is different from 0
6250                          * the reply for some commands will
6251                          * be on the non default queue
6252                          */
6253                         if (idx)
6254                                 bnx2x_rx_int(&bp->fp[idx], 10);
6255                 }
6256
6257                 mb(); /* state is changed by bnx2x_sp_event() */
6258                 if (*state_p == state)
6259                         return 0;
6260
6261                 msleep(1);
6262         }
6263
6264         /* timeout! */
6265         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6266                   poll ? "polling" : "waiting", state, idx);
6267 #ifdef BNX2X_STOP_ON_ERROR
6268         bnx2x_panic();
6269 #endif
6270
6271         return -EBUSY;
6272 }
6273
6274 static int bnx2x_setup_leading(struct bnx2x *bp)
6275 {
6276         int rc;
6277
6278         /* reset IGU state */
6279         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6280
6281         /* SETUP ramrod */
6282         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6283
6284         /* Wait for completion */
6285         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6286
6287         return rc;
6288 }
6289
6290 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6291 {
6292         /* reset IGU state */
6293         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6294
6295         /* SETUP ramrod */
6296         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6297         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6298
6299         /* Wait for completion */
6300         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6301                                  &(bp->fp[index].state), 0);
6302 }
6303
6304 static int bnx2x_poll(struct napi_struct *napi, int budget);
6305 static void bnx2x_set_rx_mode(struct net_device *dev);
6306
6307 /* must be called with rtnl_lock */
6308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6309 {
6310         u32 load_code;
6311         int i, rc;
6312 #ifdef BNX2X_STOP_ON_ERROR
6313         if (unlikely(bp->panic))
6314                 return -EPERM;
6315 #endif
6316
6317         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6318
6319         /* Send LOAD_REQUEST command to MCP
6320            Returns the type of LOAD command:
6321            if it is the first port to be initialized
6322            common blocks should be initialized, otherwise - not
6323         */
6324         if (!BP_NOMCP(bp)) {
6325                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6326                 if (!load_code) {
6327                         BNX2X_ERR("MCP response failure, aborting\n");
6328                         return -EBUSY;
6329                 }
6330                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6331                         return -EBUSY; /* other port in diagnostic mode */
6332
6333         } else {
6334                 int port = BP_PORT(bp);
6335
6336                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6337                    load_count[0], load_count[1], load_count[2]);
6338                 load_count[0]++;
6339                 load_count[1 + port]++;
6340                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6341                    load_count[0], load_count[1], load_count[2]);
6342                 if (load_count[0] == 1)
6343                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6344                 else if (load_count[1 + port] == 1)
6345                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6346                 else
6347                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6348         }
6349
6350         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6351             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6352                 bp->port.pmf = 1;
6353         else
6354                 bp->port.pmf = 0;
6355         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6356
6357         /* if we can't use MSI-X we only need one fp,
6358          * so try to enable MSI-X with the requested number of fp's
6359          * and fallback to inta with one fp
6360          */
6361         if (use_inta) {
6362                 bp->num_queues = 1;
6363
6364         } else {
6365                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6366                         /* user requested number */
6367                         bp->num_queues = use_multi;
6368
6369                 else if (use_multi)
6370                         bp->num_queues = min_t(u32, num_online_cpus(),
6371                                                BP_MAX_QUEUES(bp));
6372                 else
6373                         bp->num_queues = 1;
6374
6375                 if (bnx2x_enable_msix(bp)) {
6376                         /* failed to enable MSI-X */
6377                         bp->num_queues = 1;
6378                         if (use_multi)
6379                                 BNX2X_ERR("Multi requested but failed"
6380                                           " to enable MSI-X\n");
6381                 }
6382         }
6383         DP(NETIF_MSG_IFUP,
6384            "set number of queues to %d\n", bp->num_queues);
6385
6386         if (bnx2x_alloc_mem(bp))
6387                 return -ENOMEM;
6388
6389         for_each_queue(bp, i)
6390                 bnx2x_fp(bp, i, disable_tpa) =
6391                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6392
6393         if (bp->flags & USING_MSIX_FLAG) {
6394                 rc = bnx2x_req_msix_irqs(bp);
6395                 if (rc) {
6396                         pci_disable_msix(bp->pdev);
6397                         goto load_error;
6398                 }
6399         } else {
6400                 bnx2x_ack_int(bp);
6401                 rc = bnx2x_req_irq(bp);
6402                 if (rc) {
6403                         BNX2X_ERR("IRQ request failed, aborting\n");
6404                         goto load_error;
6405                 }
6406         }
6407
6408         for_each_queue(bp, i)
6409                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6410                                bnx2x_poll, 128);
6411
6412         /* Initialize HW */
6413         rc = bnx2x_init_hw(bp, load_code);
6414         if (rc) {
6415                 BNX2X_ERR("HW init failed, aborting\n");
6416                 goto load_int_disable;
6417         }
6418
6419         /* Setup NIC internals and enable interrupts */
6420         bnx2x_nic_init(bp, load_code);
6421
6422         /* Send LOAD_DONE command to MCP */
6423         if (!BP_NOMCP(bp)) {
6424                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6425                 if (!load_code) {
6426                         BNX2X_ERR("MCP response failure, aborting\n");
6427                         rc = -EBUSY;
6428                         goto load_rings_free;
6429                 }
6430         }
6431
6432         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6433
6434         rc = bnx2x_setup_leading(bp);
6435         if (rc) {
6436                 BNX2X_ERR("Setup leading failed!\n");
6437                 goto load_netif_stop;
6438         }
6439
6440         if (CHIP_IS_E1H(bp))
6441                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6442                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6443                         bp->state = BNX2X_STATE_DISABLED;
6444                 }
6445
6446         if (bp->state == BNX2X_STATE_OPEN)
6447                 for_each_nondefault_queue(bp, i) {
6448                         rc = bnx2x_setup_multi(bp, i);
6449                         if (rc)
6450                                 goto load_netif_stop;
6451                 }
6452
6453         if (CHIP_IS_E1(bp))
6454                 bnx2x_set_mac_addr_e1(bp, 1);
6455         else
6456                 bnx2x_set_mac_addr_e1h(bp, 1);
6457
6458         if (bp->port.pmf)
6459                 bnx2x_initial_phy_init(bp);
6460
6461         /* Start fast path */
6462         switch (load_mode) {
6463         case LOAD_NORMAL:
6464                 /* Tx queue should be only reenabled */
6465                 netif_wake_queue(bp->dev);
6466                 bnx2x_set_rx_mode(bp->dev);
6467                 break;
6468
6469         case LOAD_OPEN:
6470                 netif_start_queue(bp->dev);
6471                 bnx2x_set_rx_mode(bp->dev);
6472                 if (bp->flags & USING_MSIX_FLAG)
6473                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6474                                bp->dev->name);
6475                 break;
6476
6477         case LOAD_DIAG:
6478                 bnx2x_set_rx_mode(bp->dev);
6479                 bp->state = BNX2X_STATE_DIAG;
6480                 break;
6481
6482         default:
6483                 break;
6484         }
6485
6486         if (!bp->port.pmf)
6487                 bnx2x__link_status_update(bp);
6488
6489         /* start the timer */
6490         mod_timer(&bp->timer, jiffies + bp->current_interval);
6491
6492
6493         return 0;
6494
6495 load_netif_stop:
6496         bnx2x_napi_disable(bp);
6497 load_rings_free:
6498         /* Free SKBs, SGEs, TPA pool and driver internals */
6499         bnx2x_free_skbs(bp);
6500         for_each_queue(bp, i)
6501                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6502 load_int_disable:
6503         bnx2x_int_disable_sync(bp, 1);
6504         /* Release IRQs */
6505         bnx2x_free_irq(bp);
6506 load_error:
6507         bnx2x_free_mem(bp);
6508         bp->port.pmf = 0;
6509
6510         /* TBD we really need to reset the chip
6511            if we want to recover from this */
6512         return rc;
6513 }
6514
6515 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6516 {
6517         int rc;
6518
6519         /* halt the connection */
6520         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6521         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6522
6523         /* Wait for completion */
6524         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6525                                &(bp->fp[index].state), 1);
6526         if (rc) /* timeout */
6527                 return rc;
6528
6529         /* delete cfc entry */
6530         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6531
6532         /* Wait for completion */
6533         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6534                                &(bp->fp[index].state), 1);
6535         return rc;
6536 }
6537
6538 static int bnx2x_stop_leading(struct bnx2x *bp)
6539 {
6540         u16 dsb_sp_prod_idx;
6541         /* if the other port is handling traffic,
6542            this can take a lot of time */
6543         int cnt = 500;
6544         int rc;
6545
6546         might_sleep();
6547
6548         /* Send HALT ramrod */
6549         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6550         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6551
6552         /* Wait for completion */
6553         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6554                                &(bp->fp[0].state), 1);
6555         if (rc) /* timeout */
6556                 return rc;
6557
6558         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6559
6560         /* Send PORT_DELETE ramrod */
6561         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6562
6563         /* Wait for completion to arrive on default status block
6564            we are going to reset the chip anyway
6565            so there is not much to do if this times out
6566          */
6567         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6568                 if (!cnt) {
6569                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6570                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6571                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6572 #ifdef BNX2X_STOP_ON_ERROR
6573                         bnx2x_panic();
6574 #else
6575                         rc = -EBUSY;
6576 #endif
6577                         break;
6578                 }
6579                 cnt--;
6580                 msleep(1);
6581         }
6582         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6583         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6584
6585         return rc;
6586 }
6587
6588 static void bnx2x_reset_func(struct bnx2x *bp)
6589 {
6590         int port = BP_PORT(bp);
6591         int func = BP_FUNC(bp);
6592         int base, i;
6593
6594         /* Configure IGU */
6595         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6596         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6597
6598         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6599
6600         /* Clear ILT */
6601         base = FUNC_ILT_BASE(func);
6602         for (i = base; i < base + ILT_PER_FUNC; i++)
6603                 bnx2x_ilt_wr(bp, i, 0);
6604 }
6605
6606 static void bnx2x_reset_port(struct bnx2x *bp)
6607 {
6608         int port = BP_PORT(bp);
6609         u32 val;
6610
6611         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6612
6613         /* Do not rcv packets to BRB */
6614         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6615         /* Do not direct rcv packets that are not for MCP to the BRB */
6616         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6617                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6618
6619         /* Configure AEU */
6620         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6621
6622         msleep(100);
6623         /* Check for BRB port occupancy */
6624         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6625         if (val)
6626                 DP(NETIF_MSG_IFDOWN,
6627                    "BRB1 is not empty  %d blocks are occupied\n", val);
6628
6629         /* TODO: Close Doorbell port? */
6630 }
6631
6632 static void bnx2x_reset_common(struct bnx2x *bp)
6633 {
6634         /* reset_common */
6635         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6636                0xd3ffff7f);
6637         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6638 }
6639
6640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6641 {
6642         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6643            BP_FUNC(bp), reset_code);
6644
6645         switch (reset_code) {
6646         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6647                 bnx2x_reset_port(bp);
6648                 bnx2x_reset_func(bp);
6649                 bnx2x_reset_common(bp);
6650                 break;
6651
6652         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6653                 bnx2x_reset_port(bp);
6654                 bnx2x_reset_func(bp);
6655                 break;
6656
6657         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6658                 bnx2x_reset_func(bp);
6659                 break;
6660
6661         default:
6662                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6663                 break;
6664         }
6665 }
6666
6667 /* must be called with rtnl_lock */
6668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6669 {
6670         int port = BP_PORT(bp);
6671         u32 reset_code = 0;
6672         int i, cnt, rc;
6673
6674         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6675
6676         bp->rx_mode = BNX2X_RX_MODE_NONE;
6677         bnx2x_set_storm_rx_mode(bp);
6678
6679         bnx2x_netif_stop(bp, 1);
6680         if (!netif_running(bp->dev))
6681                 bnx2x_napi_disable(bp);
6682         del_timer_sync(&bp->timer);
6683         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6684                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6685         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6686
6687         /* Release IRQs */
6688         bnx2x_free_irq(bp);
6689
6690         /* Wait until tx fast path tasks complete */
6691         for_each_queue(bp, i) {
6692                 struct bnx2x_fastpath *fp = &bp->fp[i];
6693
6694                 cnt = 1000;
6695                 smp_rmb();
6696                 while (BNX2X_HAS_TX_WORK(fp)) {
6697
6698                         bnx2x_tx_int(fp, 1000);
6699                         if (!cnt) {
6700                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6701                                           i);
6702 #ifdef BNX2X_STOP_ON_ERROR
6703                                 bnx2x_panic();
6704                                 return -EBUSY;
6705 #else
6706                                 break;
6707 #endif
6708                         }
6709                         cnt--;
6710                         msleep(1);
6711                         smp_rmb();
6712                 }
6713         }
6714         /* Give HW time to discard old tx messages */
6715         msleep(1);
6716
6717         if (CHIP_IS_E1(bp)) {
6718                 struct mac_configuration_cmd *config =
6719                                                 bnx2x_sp(bp, mcast_config);
6720
6721                 bnx2x_set_mac_addr_e1(bp, 0);
6722
6723                 for (i = 0; i < config->hdr.length_6b; i++)
6724                         CAM_INVALIDATE(config->config_table[i]);
6725
6726                 config->hdr.length_6b = i;
6727                 if (CHIP_REV_IS_SLOW(bp))
6728                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6729                 else
6730                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6731                 config->hdr.client_id = BP_CL_ID(bp);
6732                 config->hdr.reserved1 = 0;
6733
6734                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6735                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6736                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6737
6738         } else { /* E1H */
6739                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6740
6741                 bnx2x_set_mac_addr_e1h(bp, 0);
6742
6743                 for (i = 0; i < MC_HASH_SIZE; i++)
6744                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6745         }
6746
6747         if (unload_mode == UNLOAD_NORMAL)
6748                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6749
6750         else if (bp->flags & NO_WOL_FLAG) {
6751                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6752                 if (CHIP_IS_E1H(bp))
6753                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6754
6755         } else if (bp->wol) {
6756                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6757                 u8 *mac_addr = bp->dev->dev_addr;
6758                 u32 val;
6759                 /* The mac address is written to entries 1-4 to
6760                    preserve entry 0 which is used by the PMF */
6761                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6762
6763                 val = (mac_addr[0] << 8) | mac_addr[1];
6764                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6765
6766                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6767                       (mac_addr[4] << 8) | mac_addr[5];
6768                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6769
6770                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6771
6772         } else
6773                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6774
6775         /* Close multi and leading connections
6776            Completions for ramrods are collected in a synchronous way */
6777         for_each_nondefault_queue(bp, i)
6778                 if (bnx2x_stop_multi(bp, i))
6779                         goto unload_error;
6780
6781         rc = bnx2x_stop_leading(bp);
6782         if (rc) {
6783                 BNX2X_ERR("Stop leading failed!\n");
6784 #ifdef BNX2X_STOP_ON_ERROR
6785                 return -EBUSY;
6786 #else
6787                 goto unload_error;
6788 #endif
6789         }
6790
6791 unload_error:
6792         if (!BP_NOMCP(bp))
6793                 reset_code = bnx2x_fw_command(bp, reset_code);
6794         else {
6795                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6796                    load_count[0], load_count[1], load_count[2]);
6797                 load_count[0]--;
6798                 load_count[1 + port]--;
6799                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6800                    load_count[0], load_count[1], load_count[2]);
6801                 if (load_count[0] == 0)
6802                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6803                 else if (load_count[1 + port] == 0)
6804                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6805                 else
6806                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6807         }
6808
6809         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6810             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6811                 bnx2x__link_reset(bp);
6812
6813         /* Reset the chip */
6814         bnx2x_reset_chip(bp, reset_code);
6815
6816         /* Report UNLOAD_DONE to MCP */
6817         if (!BP_NOMCP(bp))
6818                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6819         bp->port.pmf = 0;
6820
6821         /* Free SKBs, SGEs, TPA pool and driver internals */
6822         bnx2x_free_skbs(bp);
6823         for_each_queue(bp, i)
6824                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6825         bnx2x_free_mem(bp);
6826
6827         bp->state = BNX2X_STATE_CLOSED;
6828
6829         netif_carrier_off(bp->dev);
6830
6831         return 0;
6832 }
6833
6834 static void bnx2x_reset_task(struct work_struct *work)
6835 {
6836         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6837
6838 #ifdef BNX2X_STOP_ON_ERROR
6839         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6840                   " so reset not done to allow debug dump,\n"
6841          KERN_ERR " you will need to reboot when done\n");
6842         return;
6843 #endif
6844
6845         rtnl_lock();
6846
6847         if (!netif_running(bp->dev))
6848                 goto reset_task_exit;
6849
6850         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6851         bnx2x_nic_load(bp, LOAD_NORMAL);
6852
6853 reset_task_exit:
6854         rtnl_unlock();
6855 }
6856
6857 /* end of nic load/unload */
6858
6859 /* ethtool_ops */
6860
6861 /*
6862  * Init service functions
6863  */
6864
6865 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6866 {
6867         u32 val;
6868
6869         /* Check if there is any driver already loaded */
6870         val = REG_RD(bp, MISC_REG_UNPREPARED);
6871         if (val == 0x1) {
6872                 /* Check if it is the UNDI driver
6873                  * UNDI driver initializes CID offset for normal bell to 0x7
6874                  */
6875                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6876                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6877                 if (val == 0x7) {
6878                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6879                         /* save our func */
6880                         int func = BP_FUNC(bp);
6881                         u32 swap_en;
6882                         u32 swap_val;
6883
6884                         /* clear the UNDI indication */
6885                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6886
6887                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6888
6889                         /* try unload UNDI on port 0 */
6890                         bp->func = 0;
6891                         bp->fw_seq =
6892                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6893                                 DRV_MSG_SEQ_NUMBER_MASK);
6894                         reset_code = bnx2x_fw_command(bp, reset_code);
6895
6896                         /* if UNDI is loaded on the other port */
6897                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6898
6899                                 /* send "DONE" for previous unload */
6900                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6901
6902                                 /* unload UNDI on port 1 */
6903                                 bp->func = 1;
6904                                 bp->fw_seq =
6905                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6906                                         DRV_MSG_SEQ_NUMBER_MASK);
6907                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6908
6909                                 bnx2x_fw_command(bp, reset_code);
6910                         }
6911
6912                         /* now it's safe to release the lock */
6913                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6914
6915                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6916                                     HC_REG_CONFIG_0), 0x1000);
6917
6918                         /* close input traffic and wait for it */
6919                         /* Do not rcv packets to BRB */
6920                         REG_WR(bp,
6921                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6922                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6923                         /* Do not direct rcv packets that are not for MCP to
6924                          * the BRB */
6925                         REG_WR(bp,
6926                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6927                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6928                         /* clear AEU */
6929                         REG_WR(bp,
6930                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6931                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6932                         msleep(10);
6933
6934                         /* save NIG port swap info */
6935                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6936                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6937                         /* reset device */
6938                         REG_WR(bp,
6939                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6940                                0xd3ffffff);
6941                         REG_WR(bp,
6942                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6943                                0x1403);
6944                         /* take the NIG out of reset and restore swap values */
6945                         REG_WR(bp,
6946                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6947                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6948                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6949                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6950
6951                         /* send unload done to the MCP */
6952                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6953
6954                         /* restore our func and fw_seq */
6955                         bp->func = func;
6956                         bp->fw_seq =
6957                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6958                                 DRV_MSG_SEQ_NUMBER_MASK);
6959
6960                 } else
6961                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6962         }
6963 }
6964
6965 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6966 {
6967         u32 val, val2, val3, val4, id;
6968         u16 pmc;
6969
6970         /* Get the chip revision id and number. */
6971         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6972         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6973         id = ((val & 0xffff) << 16);
6974         val = REG_RD(bp, MISC_REG_CHIP_REV);
6975         id |= ((val & 0xf) << 12);
6976         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6977         id |= ((val & 0xff) << 4);
6978         val = REG_RD(bp, MISC_REG_BOND_ID);
6979         id |= (val & 0xf);
6980         bp->common.chip_id = id;
6981         bp->link_params.chip_id = bp->common.chip_id;
6982         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6983
6984         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6985         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6986                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6987         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6988                        bp->common.flash_size, bp->common.flash_size);
6989
6990         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6991         bp->link_params.shmem_base = bp->common.shmem_base;
6992         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6993
6994         if (!bp->common.shmem_base ||
6995             (bp->common.shmem_base < 0xA0000) ||
6996             (bp->common.shmem_base >= 0xC0000)) {
6997                 BNX2X_DEV_INFO("MCP not active\n");
6998                 bp->flags |= NO_MCP_FLAG;
6999                 return;
7000         }
7001
7002         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7003         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7004                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7005                 BNX2X_ERR("BAD MCP validity signature\n");
7006
7007         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7008         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7009
7010         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7011                        bp->common.hw_config, bp->common.board);
7012
7013         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7014                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7015                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7016
7017         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7018         bp->common.bc_ver = val;
7019         BNX2X_DEV_INFO("bc_ver %X\n", val);
7020         if (val < BNX2X_BC_VER) {
7021                 /* for now only warn
7022                  * later we might need to enforce this */
7023                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7024                           " please upgrade BC\n", BNX2X_BC_VER, val);
7025         }
7026
7027         if (BP_E1HVN(bp) == 0) {
7028                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7029                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7030         } else {
7031                 /* no WOL capability for E1HVN != 0 */
7032                 bp->flags |= NO_WOL_FLAG;
7033         }
7034         BNX2X_DEV_INFO("%sWoL capable\n",
7035                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7036
7037         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7038         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7039         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7040         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7041
7042         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7043                val, val2, val3, val4);
7044 }
7045
7046 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7047                                                     u32 switch_cfg)
7048 {
7049         int port = BP_PORT(bp);
7050         u32 ext_phy_type;
7051
7052         switch (switch_cfg) {
7053         case SWITCH_CFG_1G:
7054                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7055
7056                 ext_phy_type =
7057                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7058                 switch (ext_phy_type) {
7059                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7060                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7061                                        ext_phy_type);
7062
7063                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7064                                                SUPPORTED_10baseT_Full |
7065                                                SUPPORTED_100baseT_Half |
7066                                                SUPPORTED_100baseT_Full |
7067                                                SUPPORTED_1000baseT_Full |
7068                                                SUPPORTED_2500baseX_Full |
7069                                                SUPPORTED_TP |
7070                                                SUPPORTED_FIBRE |
7071                                                SUPPORTED_Autoneg |
7072                                                SUPPORTED_Pause |
7073                                                SUPPORTED_Asym_Pause);
7074                         break;
7075
7076                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7077                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7078                                        ext_phy_type);
7079
7080                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7081                                                SUPPORTED_10baseT_Full |
7082                                                SUPPORTED_100baseT_Half |
7083                                                SUPPORTED_100baseT_Full |
7084                                                SUPPORTED_1000baseT_Full |
7085                                                SUPPORTED_TP |
7086                                                SUPPORTED_FIBRE |
7087                                                SUPPORTED_Autoneg |
7088                                                SUPPORTED_Pause |
7089                                                SUPPORTED_Asym_Pause);
7090                         break;
7091
7092                 default:
7093                         BNX2X_ERR("NVRAM config error. "
7094                                   "BAD SerDes ext_phy_config 0x%x\n",
7095                                   bp->link_params.ext_phy_config);
7096                         return;
7097                 }
7098
7099                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7100                                            port*0x10);
7101                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7102                 break;
7103
7104         case SWITCH_CFG_10G:
7105                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7106
7107                 ext_phy_type =
7108                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7109                 switch (ext_phy_type) {
7110                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7111                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7112                                        ext_phy_type);
7113
7114                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7115                                                SUPPORTED_10baseT_Full |
7116                                                SUPPORTED_100baseT_Half |
7117                                                SUPPORTED_100baseT_Full |
7118                                                SUPPORTED_1000baseT_Full |
7119                                                SUPPORTED_2500baseX_Full |
7120                                                SUPPORTED_10000baseT_Full |
7121                                                SUPPORTED_TP |
7122                                                SUPPORTED_FIBRE |
7123                                                SUPPORTED_Autoneg |
7124                                                SUPPORTED_Pause |
7125                                                SUPPORTED_Asym_Pause);
7126                         break;
7127
7128                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7129                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7130                                        ext_phy_type);
7131
7132                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7133                                                SUPPORTED_FIBRE |
7134                                                SUPPORTED_Pause |
7135                                                SUPPORTED_Asym_Pause);
7136                         break;
7137
7138                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7139                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7140                                        ext_phy_type);
7141
7142                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7143                                                SUPPORTED_1000baseT_Full |
7144                                                SUPPORTED_FIBRE |
7145                                                SUPPORTED_Pause |
7146                                                SUPPORTED_Asym_Pause);
7147                         break;
7148
7149                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7150                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7151                                        ext_phy_type);
7152
7153                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7154                                                SUPPORTED_1000baseT_Full |
7155                                                SUPPORTED_FIBRE |
7156                                                SUPPORTED_Autoneg |
7157                                                SUPPORTED_Pause |
7158                                                SUPPORTED_Asym_Pause);
7159                         break;
7160
7161                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7162                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7163                                        ext_phy_type);
7164
7165                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7166                                                SUPPORTED_2500baseX_Full |
7167                                                SUPPORTED_1000baseT_Full |
7168                                                SUPPORTED_FIBRE |
7169                                                SUPPORTED_Autoneg |
7170                                                SUPPORTED_Pause |
7171                                                SUPPORTED_Asym_Pause);
7172                         break;
7173
7174                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7175                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7176                                        ext_phy_type);
7177
7178                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7179                                                SUPPORTED_TP |
7180                                                SUPPORTED_Autoneg |
7181                                                SUPPORTED_Pause |
7182                                                SUPPORTED_Asym_Pause);
7183                         break;
7184
7185                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7186                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7187                                   bp->link_params.ext_phy_config);
7188                         break;
7189
7190                 default:
7191                         BNX2X_ERR("NVRAM config error. "
7192                                   "BAD XGXS ext_phy_config 0x%x\n",
7193                                   bp->link_params.ext_phy_config);
7194                         return;
7195                 }
7196
7197                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7198                                            port*0x18);
7199                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7200
7201                 break;
7202
7203         default:
7204                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7205                           bp->port.link_config);
7206                 return;
7207         }
7208         bp->link_params.phy_addr = bp->port.phy_addr;
7209
7210         /* mask what we support according to speed_cap_mask */
7211         if (!(bp->link_params.speed_cap_mask &
7212                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7213                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7214
7215         if (!(bp->link_params.speed_cap_mask &
7216                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7217                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7218
7219         if (!(bp->link_params.speed_cap_mask &
7220                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7221                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7222
7223         if (!(bp->link_params.speed_cap_mask &
7224                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7225                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7226
7227         if (!(bp->link_params.speed_cap_mask &
7228                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7229                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7230                                         SUPPORTED_1000baseT_Full);
7231
7232         if (!(bp->link_params.speed_cap_mask &
7233                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7234                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7235
7236         if (!(bp->link_params.speed_cap_mask &
7237                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7238                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7239
7240         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7241 }
7242
7243 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7244 {
7245         bp->link_params.req_duplex = DUPLEX_FULL;
7246
7247         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7248         case PORT_FEATURE_LINK_SPEED_AUTO:
7249                 if (bp->port.supported & SUPPORTED_Autoneg) {
7250                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7251                         bp->port.advertising = bp->port.supported;
7252                 } else {
7253                         u32 ext_phy_type =
7254                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7255
7256                         if ((ext_phy_type ==
7257                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7258                             (ext_phy_type ==
7259                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7260                                 /* force 10G, no AN */
7261                                 bp->link_params.req_line_speed = SPEED_10000;
7262                                 bp->port.advertising =
7263                                                 (ADVERTISED_10000baseT_Full |
7264                                                  ADVERTISED_FIBRE);
7265                                 break;
7266                         }
7267                         BNX2X_ERR("NVRAM config error. "
7268                                   "Invalid link_config 0x%x"
7269                                   "  Autoneg not supported\n",
7270                                   bp->port.link_config);
7271                         return;
7272                 }
7273                 break;
7274
7275         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7276                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7277                         bp->link_params.req_line_speed = SPEED_10;
7278                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7279                                                 ADVERTISED_TP);
7280                 } else {
7281                         BNX2X_ERR("NVRAM config error. "
7282                                   "Invalid link_config 0x%x"
7283                                   "  speed_cap_mask 0x%x\n",
7284                                   bp->port.link_config,
7285                                   bp->link_params.speed_cap_mask);
7286                         return;
7287                 }
7288                 break;
7289
7290         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7291                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7292                         bp->link_params.req_line_speed = SPEED_10;
7293                         bp->link_params.req_duplex = DUPLEX_HALF;
7294                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7295                                                 ADVERTISED_TP);
7296                 } else {
7297                         BNX2X_ERR("NVRAM config error. "
7298                                   "Invalid link_config 0x%x"
7299                                   "  speed_cap_mask 0x%x\n",
7300                                   bp->port.link_config,
7301                                   bp->link_params.speed_cap_mask);
7302                         return;
7303                 }
7304                 break;
7305
7306         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7307                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7308                         bp->link_params.req_line_speed = SPEED_100;
7309                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7310                                                 ADVERTISED_TP);
7311                 } else {
7312                         BNX2X_ERR("NVRAM config error. "
7313                                   "Invalid link_config 0x%x"
7314                                   "  speed_cap_mask 0x%x\n",
7315                                   bp->port.link_config,
7316                                   bp->link_params.speed_cap_mask);
7317                         return;
7318                 }
7319                 break;
7320
7321         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7322                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7323                         bp->link_params.req_line_speed = SPEED_100;
7324                         bp->link_params.req_duplex = DUPLEX_HALF;
7325                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7326                                                 ADVERTISED_TP);
7327                 } else {
7328                         BNX2X_ERR("NVRAM config error. "
7329                                   "Invalid link_config 0x%x"
7330                                   "  speed_cap_mask 0x%x\n",
7331                                   bp->port.link_config,
7332                                   bp->link_params.speed_cap_mask);
7333                         return;
7334                 }
7335                 break;
7336
7337         case PORT_FEATURE_LINK_SPEED_1G:
7338                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7339                         bp->link_params.req_line_speed = SPEED_1000;
7340                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7341                                                 ADVERTISED_TP);
7342                 } else {
7343                         BNX2X_ERR("NVRAM config error. "
7344                                   "Invalid link_config 0x%x"
7345                                   "  speed_cap_mask 0x%x\n",
7346                                   bp->port.link_config,
7347                                   bp->link_params.speed_cap_mask);
7348                         return;
7349                 }
7350                 break;
7351
7352         case PORT_FEATURE_LINK_SPEED_2_5G:
7353                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7354                         bp->link_params.req_line_speed = SPEED_2500;
7355                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7356                                                 ADVERTISED_TP);
7357                 } else {
7358                         BNX2X_ERR("NVRAM config error. "
7359                                   "Invalid link_config 0x%x"
7360                                   "  speed_cap_mask 0x%x\n",
7361                                   bp->port.link_config,
7362                                   bp->link_params.speed_cap_mask);
7363                         return;
7364                 }
7365                 break;
7366
7367         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7368         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7369         case PORT_FEATURE_LINK_SPEED_10G_KR:
7370                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7371                         bp->link_params.req_line_speed = SPEED_10000;
7372                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7373                                                 ADVERTISED_FIBRE);
7374                 } else {
7375                         BNX2X_ERR("NVRAM config error. "
7376                                   "Invalid link_config 0x%x"
7377                                   "  speed_cap_mask 0x%x\n",
7378                                   bp->port.link_config,
7379                                   bp->link_params.speed_cap_mask);
7380                         return;
7381                 }
7382                 break;
7383
7384         default:
7385                 BNX2X_ERR("NVRAM config error. "
7386                           "BAD link speed link_config 0x%x\n",
7387                           bp->port.link_config);
7388                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7389                 bp->port.advertising = bp->port.supported;
7390                 break;
7391         }
7392
7393         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7394                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7395         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7396             !(bp->port.supported & SUPPORTED_Autoneg))
7397                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7398
7399         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7400                        "  advertising 0x%x\n",
7401                        bp->link_params.req_line_speed,
7402                        bp->link_params.req_duplex,
7403                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7404 }
7405
7406 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7407 {
7408         int port = BP_PORT(bp);
7409         u32 val, val2;
7410
7411         bp->link_params.bp = bp;
7412         bp->link_params.port = port;
7413
7414         bp->link_params.serdes_config =
7415                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7416         bp->link_params.lane_config =
7417                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7418         bp->link_params.ext_phy_config =
7419                 SHMEM_RD(bp,
7420                          dev_info.port_hw_config[port].external_phy_config);
7421         bp->link_params.speed_cap_mask =
7422                 SHMEM_RD(bp,
7423                          dev_info.port_hw_config[port].speed_capability_mask);
7424
7425         bp->port.link_config =
7426                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7427
7428         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7429              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7430                        "  link_config 0x%08x\n",
7431                        bp->link_params.serdes_config,
7432                        bp->link_params.lane_config,
7433                        bp->link_params.ext_phy_config,
7434                        bp->link_params.speed_cap_mask, bp->port.link_config);
7435
7436         bp->link_params.switch_cfg = (bp->port.link_config &
7437                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7438         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7439
7440         bnx2x_link_settings_requested(bp);
7441
7442         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7443         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7444         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7445         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7446         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7447         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7448         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7449         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7450         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7451         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7452 }
7453
7454 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7455 {
7456         int func = BP_FUNC(bp);
7457         u32 val, val2;
7458         int rc = 0;
7459
7460         bnx2x_get_common_hwinfo(bp);
7461
7462         bp->e1hov = 0;
7463         bp->e1hmf = 0;
7464         if (CHIP_IS_E1H(bp)) {
7465                 bp->mf_config =
7466                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7467
7468                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7469                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7470                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7471
7472                         bp->e1hov = val;
7473                         bp->e1hmf = 1;
7474                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7475                                        "(0x%04x)\n",
7476                                        func, bp->e1hov, bp->e1hov);
7477                 } else {
7478                         BNX2X_DEV_INFO("Single function mode\n");
7479                         if (BP_E1HVN(bp)) {
7480                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7481                                           "  aborting\n", func);
7482                                 rc = -EPERM;
7483                         }
7484                 }
7485         }
7486
7487         if (!BP_NOMCP(bp)) {
7488                 bnx2x_get_port_hwinfo(bp);
7489
7490                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7491                               DRV_MSG_SEQ_NUMBER_MASK);
7492                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7493         }
7494
7495         if (IS_E1HMF(bp)) {
7496                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7497                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7498                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7499                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7500                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7501                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7502                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7503                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7504                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7505                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7506                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7507                                ETH_ALEN);
7508                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7509                                ETH_ALEN);
7510                 }
7511
7512                 return rc;
7513         }
7514
7515         if (BP_NOMCP(bp)) {
7516                 /* only supposed to happen on emulation/FPGA */
7517                 BNX2X_ERR("warning random MAC workaround active\n");
7518                 random_ether_addr(bp->dev->dev_addr);
7519                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7520         }
7521
7522         return rc;
7523 }
7524
7525 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7526 {
7527         int func = BP_FUNC(bp);
7528         int rc;
7529
7530         /* Disable interrupt handling until HW is initialized */
7531         atomic_set(&bp->intr_sem, 1);
7532
7533         mutex_init(&bp->port.phy_mutex);
7534
7535         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7536         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7537
7538         rc = bnx2x_get_hwinfo(bp);
7539
7540         /* need to reset chip if undi was active */
7541         if (!BP_NOMCP(bp))
7542                 bnx2x_undi_unload(bp);
7543
7544         if (CHIP_REV_IS_FPGA(bp))
7545                 printk(KERN_ERR PFX "FPGA detected\n");
7546
7547         if (BP_NOMCP(bp) && (func == 0))
7548                 printk(KERN_ERR PFX
7549                        "MCP disabled, must load devices in order!\n");
7550
7551         /* Set TPA flags */
7552         if (disable_tpa) {
7553                 bp->flags &= ~TPA_ENABLE_FLAG;
7554                 bp->dev->features &= ~NETIF_F_LRO;
7555         } else {
7556                 bp->flags |= TPA_ENABLE_FLAG;
7557                 bp->dev->features |= NETIF_F_LRO;
7558         }
7559
7560
7561         bp->tx_ring_size = MAX_TX_AVAIL;
7562         bp->rx_ring_size = MAX_RX_AVAIL;
7563
7564         bp->rx_csum = 1;
7565         bp->rx_offset = 0;
7566
7567         bp->tx_ticks = 50;
7568         bp->rx_ticks = 25;
7569
7570         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7571         bp->current_interval = (poll ? poll : bp->timer_interval);
7572
7573         init_timer(&bp->timer);
7574         bp->timer.expires = jiffies + bp->current_interval;
7575         bp->timer.data = (unsigned long) bp;
7576         bp->timer.function = bnx2x_timer;
7577
7578         return rc;
7579 }
7580
7581 /*
7582  * ethtool service functions
7583  */
7584
7585 /* All ethtool functions called with rtnl_lock */
7586
7587 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7588 {
7589         struct bnx2x *bp = netdev_priv(dev);
7590
7591         cmd->supported = bp->port.supported;
7592         cmd->advertising = bp->port.advertising;
7593
7594         if (netif_carrier_ok(dev)) {
7595                 cmd->speed = bp->link_vars.line_speed;
7596                 cmd->duplex = bp->link_vars.duplex;
7597         } else {
7598                 cmd->speed = bp->link_params.req_line_speed;
7599                 cmd->duplex = bp->link_params.req_duplex;
7600         }
7601         if (IS_E1HMF(bp)) {
7602                 u16 vn_max_rate;
7603
7604                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7605                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7606                 if (vn_max_rate < cmd->speed)
7607                         cmd->speed = vn_max_rate;
7608         }
7609
7610         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7611                 u32 ext_phy_type =
7612                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7613
7614                 switch (ext_phy_type) {
7615                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7616                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7617                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7618                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7619                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7620                         cmd->port = PORT_FIBRE;
7621                         break;
7622
7623                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7624                         cmd->port = PORT_TP;
7625                         break;
7626
7627                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7628                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7629                                   bp->link_params.ext_phy_config);
7630                         break;
7631
7632                 default:
7633                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7634                            bp->link_params.ext_phy_config);
7635                         break;
7636                 }
7637         } else
7638                 cmd->port = PORT_TP;
7639
7640         cmd->phy_address = bp->port.phy_addr;
7641         cmd->transceiver = XCVR_INTERNAL;
7642
7643         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7644                 cmd->autoneg = AUTONEG_ENABLE;
7645         else
7646                 cmd->autoneg = AUTONEG_DISABLE;
7647
7648         cmd->maxtxpkt = 0;
7649         cmd->maxrxpkt = 0;
7650
7651         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7652            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7653            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7654            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7655            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7656            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7657            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7658
7659         return 0;
7660 }
7661
7662 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7663 {
7664         struct bnx2x *bp = netdev_priv(dev);
7665         u32 advertising;
7666
7667         if (IS_E1HMF(bp))
7668                 return 0;
7669
7670         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7671            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7672            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7673            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7674            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7675            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7676            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7677
7678         if (cmd->autoneg == AUTONEG_ENABLE) {
7679                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7680                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7681                         return -EINVAL;
7682                 }
7683
7684                 /* advertise the requested speed and duplex if supported */
7685                 cmd->advertising &= bp->port.supported;
7686
7687                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7688                 bp->link_params.req_duplex = DUPLEX_FULL;
7689                 bp->port.advertising |= (ADVERTISED_Autoneg |
7690                                          cmd->advertising);
7691
7692         } else { /* forced speed */
7693                 /* advertise the requested speed and duplex if supported */
7694                 switch (cmd->speed) {
7695                 case SPEED_10:
7696                         if (cmd->duplex == DUPLEX_FULL) {
7697                                 if (!(bp->port.supported &
7698                                       SUPPORTED_10baseT_Full)) {
7699                                         DP(NETIF_MSG_LINK,
7700                                            "10M full not supported\n");
7701                                         return -EINVAL;
7702                                 }
7703
7704                                 advertising = (ADVERTISED_10baseT_Full |
7705                                                ADVERTISED_TP);
7706                         } else {
7707                                 if (!(bp->port.supported &
7708                                       SUPPORTED_10baseT_Half)) {
7709                                         DP(NETIF_MSG_LINK,
7710                                            "10M half not supported\n");
7711                                         return -EINVAL;
7712                                 }
7713
7714                                 advertising = (ADVERTISED_10baseT_Half |
7715                                                ADVERTISED_TP);
7716                         }
7717                         break;
7718
7719                 case SPEED_100:
7720                         if (cmd->duplex == DUPLEX_FULL) {
7721                                 if (!(bp->port.supported &
7722                                                 SUPPORTED_100baseT_Full)) {
7723                                         DP(NETIF_MSG_LINK,
7724                                            "100M full not supported\n");
7725                                         return -EINVAL;
7726                                 }
7727
7728                                 advertising = (ADVERTISED_100baseT_Full |
7729                                                ADVERTISED_TP);
7730                         } else {
7731                                 if (!(bp->port.supported &
7732                                                 SUPPORTED_100baseT_Half)) {
7733                                         DP(NETIF_MSG_LINK,
7734                                            "100M half not supported\n");
7735                                         return -EINVAL;
7736                                 }
7737
7738                                 advertising = (ADVERTISED_100baseT_Half |
7739                                                ADVERTISED_TP);
7740                         }
7741                         break;
7742
7743                 case SPEED_1000:
7744                         if (cmd->duplex != DUPLEX_FULL) {
7745                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7746                                 return -EINVAL;
7747                         }
7748
7749                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7750                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7751                                 return -EINVAL;
7752                         }
7753
7754                         advertising = (ADVERTISED_1000baseT_Full |
7755                                        ADVERTISED_TP);
7756                         break;
7757
7758                 case SPEED_2500:
7759                         if (cmd->duplex != DUPLEX_FULL) {
7760                                 DP(NETIF_MSG_LINK,
7761                                    "2.5G half not supported\n");
7762                                 return -EINVAL;
7763                         }
7764
7765                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7766                                 DP(NETIF_MSG_LINK,
7767                                    "2.5G full not supported\n");
7768                                 return -EINVAL;
7769                         }
7770
7771                         advertising = (ADVERTISED_2500baseX_Full |
7772                                        ADVERTISED_TP);
7773                         break;
7774
7775                 case SPEED_10000:
7776                         if (cmd->duplex != DUPLEX_FULL) {
7777                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7778                                 return -EINVAL;
7779                         }
7780
7781                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7782                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7783                                 return -EINVAL;
7784                         }
7785
7786                         advertising = (ADVERTISED_10000baseT_Full |
7787                                        ADVERTISED_FIBRE);
7788                         break;
7789
7790                 default:
7791                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7792                         return -EINVAL;
7793                 }
7794
7795                 bp->link_params.req_line_speed = cmd->speed;
7796                 bp->link_params.req_duplex = cmd->duplex;
7797                 bp->port.advertising = advertising;
7798         }
7799
7800         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7801            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7802            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7803            bp->port.advertising);
7804
7805         if (netif_running(dev)) {
7806                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7807                 bnx2x_link_set(bp);
7808         }
7809
7810         return 0;
7811 }
7812
7813 #define PHY_FW_VER_LEN                  10
7814
7815 static void bnx2x_get_drvinfo(struct net_device *dev,
7816                               struct ethtool_drvinfo *info)
7817 {
7818         struct bnx2x *bp = netdev_priv(dev);
7819         u8 phy_fw_ver[PHY_FW_VER_LEN];
7820
7821         strcpy(info->driver, DRV_MODULE_NAME);
7822         strcpy(info->version, DRV_MODULE_VERSION);
7823
7824         phy_fw_ver[0] = '\0';
7825         if (bp->port.pmf) {
7826                 bnx2x_acquire_phy_lock(bp);
7827                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7828                                              (bp->state != BNX2X_STATE_CLOSED),
7829                                              phy_fw_ver, PHY_FW_VER_LEN);
7830                 bnx2x_release_phy_lock(bp);
7831         }
7832
7833         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7834                  (bp->common.bc_ver & 0xff0000) >> 16,
7835                  (bp->common.bc_ver & 0xff00) >> 8,
7836                  (bp->common.bc_ver & 0xff),
7837                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7838         strcpy(info->bus_info, pci_name(bp->pdev));
7839         info->n_stats = BNX2X_NUM_STATS;
7840         info->testinfo_len = BNX2X_NUM_TESTS;
7841         info->eedump_len = bp->common.flash_size;
7842         info->regdump_len = 0;
7843 }
7844
7845 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7846 {
7847         struct bnx2x *bp = netdev_priv(dev);
7848
7849         if (bp->flags & NO_WOL_FLAG) {
7850                 wol->supported = 0;
7851                 wol->wolopts = 0;
7852         } else {
7853                 wol->supported = WAKE_MAGIC;
7854                 if (bp->wol)
7855                         wol->wolopts = WAKE_MAGIC;
7856                 else
7857                         wol->wolopts = 0;
7858         }
7859         memset(&wol->sopass, 0, sizeof(wol->sopass));
7860 }
7861
7862 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7863 {
7864         struct bnx2x *bp = netdev_priv(dev);
7865
7866         if (wol->wolopts & ~WAKE_MAGIC)
7867                 return -EINVAL;
7868
7869         if (wol->wolopts & WAKE_MAGIC) {
7870                 if (bp->flags & NO_WOL_FLAG)
7871                         return -EINVAL;
7872
7873                 bp->wol = 1;
7874         } else
7875                 bp->wol = 0;
7876
7877         return 0;
7878 }
7879
7880 static u32 bnx2x_get_msglevel(struct net_device *dev)
7881 {
7882         struct bnx2x *bp = netdev_priv(dev);
7883
7884         return bp->msglevel;
7885 }
7886
7887 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7888 {
7889         struct bnx2x *bp = netdev_priv(dev);
7890
7891         if (capable(CAP_NET_ADMIN))
7892                 bp->msglevel = level;
7893 }
7894
7895 static int bnx2x_nway_reset(struct net_device *dev)
7896 {
7897         struct bnx2x *bp = netdev_priv(dev);
7898
7899         if (!bp->port.pmf)
7900                 return 0;
7901
7902         if (netif_running(dev)) {
7903                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7904                 bnx2x_link_set(bp);
7905         }
7906
7907         return 0;
7908 }
7909
7910 static int bnx2x_get_eeprom_len(struct net_device *dev)
7911 {
7912         struct bnx2x *bp = netdev_priv(dev);
7913
7914         return bp->common.flash_size;
7915 }
7916
7917 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7918 {
7919         int port = BP_PORT(bp);
7920         int count, i;
7921         u32 val = 0;
7922
7923         /* adjust timeout for emulation/FPGA */
7924         count = NVRAM_TIMEOUT_COUNT;
7925         if (CHIP_REV_IS_SLOW(bp))
7926                 count *= 100;
7927
7928         /* request access to nvram interface */
7929         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7930                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7931
7932         for (i = 0; i < count*10; i++) {
7933                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7934                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7935                         break;
7936
7937                 udelay(5);
7938         }
7939
7940         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7941                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7942                 return -EBUSY;
7943         }
7944
7945         return 0;
7946 }
7947
7948 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7949 {
7950         int port = BP_PORT(bp);
7951         int count, i;
7952         u32 val = 0;
7953
7954         /* adjust timeout for emulation/FPGA */
7955         count = NVRAM_TIMEOUT_COUNT;
7956         if (CHIP_REV_IS_SLOW(bp))
7957                 count *= 100;
7958
7959         /* relinquish nvram interface */
7960         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7961                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7962
7963         for (i = 0; i < count*10; i++) {
7964                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7965                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7966                         break;
7967
7968                 udelay(5);
7969         }
7970
7971         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7972                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7973                 return -EBUSY;
7974         }
7975
7976         return 0;
7977 }
7978
7979 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7980 {
7981         u32 val;
7982
7983         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7984
7985         /* enable both bits, even on read */
7986         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7987                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7988                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7989 }
7990
7991 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7992 {
7993         u32 val;
7994
7995         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7996
7997         /* disable both bits, even after read */
7998         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7999                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8000                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8001 }
8002
8003 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8004                                   u32 cmd_flags)
8005 {
8006         int count, i, rc;
8007         u32 val;
8008
8009         /* build the command word */
8010         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8011
8012         /* need to clear DONE bit separately */
8013         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8014
8015         /* address of the NVRAM to read from */
8016         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8017                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8018
8019         /* issue a read command */
8020         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8021
8022         /* adjust timeout for emulation/FPGA */
8023         count = NVRAM_TIMEOUT_COUNT;
8024         if (CHIP_REV_IS_SLOW(bp))
8025                 count *= 100;
8026
8027         /* wait for completion */
8028         *ret_val = 0;
8029         rc = -EBUSY;
8030         for (i = 0; i < count; i++) {
8031                 udelay(5);
8032                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8033
8034                 if (val & MCPR_NVM_COMMAND_DONE) {
8035                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8036                         /* we read nvram data in cpu order
8037                          * but ethtool sees it as an array of bytes
8038                          * converting to big-endian will do the work */
8039                         val = cpu_to_be32(val);
8040                         *ret_val = val;
8041                         rc = 0;
8042                         break;
8043                 }
8044         }
8045
8046         return rc;
8047 }
8048
8049 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8050                             int buf_size)
8051 {
8052         int rc;
8053         u32 cmd_flags;
8054         u32 val;
8055
8056         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8057                 DP(BNX2X_MSG_NVM,
8058                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8059                    offset, buf_size);
8060                 return -EINVAL;
8061         }
8062
8063         if (offset + buf_size > bp->common.flash_size) {
8064                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8065                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8066                    offset, buf_size, bp->common.flash_size);
8067                 return -EINVAL;
8068         }
8069
8070         /* request access to nvram interface */
8071         rc = bnx2x_acquire_nvram_lock(bp);
8072         if (rc)
8073                 return rc;
8074
8075         /* enable access to nvram interface */
8076         bnx2x_enable_nvram_access(bp);
8077
8078         /* read the first word(s) */
8079         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8080         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8081                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8082                 memcpy(ret_buf, &val, 4);
8083
8084                 /* advance to the next dword */
8085                 offset += sizeof(u32);
8086                 ret_buf += sizeof(u32);
8087                 buf_size -= sizeof(u32);
8088                 cmd_flags = 0;
8089         }
8090
8091         if (rc == 0) {
8092                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8093                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8094                 memcpy(ret_buf, &val, 4);
8095         }
8096
8097         /* disable access to nvram interface */
8098         bnx2x_disable_nvram_access(bp);
8099         bnx2x_release_nvram_lock(bp);
8100
8101         return rc;
8102 }
8103
8104 static int bnx2x_get_eeprom(struct net_device *dev,
8105                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8106 {
8107         struct bnx2x *bp = netdev_priv(dev);
8108         int rc;
8109
8110         if (!netif_running(dev))
8111                 return -EAGAIN;
8112
8113         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8114            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8115            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8116            eeprom->len, eeprom->len);
8117
8118         /* parameters already validated in ethtool_get_eeprom */
8119
8120         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8121
8122         return rc;
8123 }
8124
8125 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8126                                    u32 cmd_flags)
8127 {
8128         int count, i, rc;
8129
8130         /* build the command word */
8131         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8132
8133         /* need to clear DONE bit separately */
8134         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8135
8136         /* write the data */
8137         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8138
8139         /* address of the NVRAM to write to */
8140         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8141                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8142
8143         /* issue the write command */
8144         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8145
8146         /* adjust timeout for emulation/FPGA */
8147         count = NVRAM_TIMEOUT_COUNT;
8148         if (CHIP_REV_IS_SLOW(bp))
8149                 count *= 100;
8150
8151         /* wait for completion */
8152         rc = -EBUSY;
8153         for (i = 0; i < count; i++) {
8154                 udelay(5);
8155                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8156                 if (val & MCPR_NVM_COMMAND_DONE) {
8157                         rc = 0;
8158                         break;
8159                 }
8160         }
8161
8162         return rc;
8163 }
8164
8165 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8166
8167 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8168                               int buf_size)
8169 {
8170         int rc;
8171         u32 cmd_flags;
8172         u32 align_offset;
8173         u32 val;
8174
8175         if (offset + buf_size > bp->common.flash_size) {
8176                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8177                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8178                    offset, buf_size, bp->common.flash_size);
8179                 return -EINVAL;
8180         }
8181
8182         /* request access to nvram interface */
8183         rc = bnx2x_acquire_nvram_lock(bp);
8184         if (rc)
8185                 return rc;
8186
8187         /* enable access to nvram interface */
8188         bnx2x_enable_nvram_access(bp);
8189
8190         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8191         align_offset = (offset & ~0x03);
8192         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8193
8194         if (rc == 0) {
8195                 val &= ~(0xff << BYTE_OFFSET(offset));
8196                 val |= (*data_buf << BYTE_OFFSET(offset));
8197
8198                 /* nvram data is returned as an array of bytes
8199                  * convert it back to cpu order */
8200                 val = be32_to_cpu(val);
8201
8202                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8203                                              cmd_flags);
8204         }
8205
8206         /* disable access to nvram interface */
8207         bnx2x_disable_nvram_access(bp);
8208         bnx2x_release_nvram_lock(bp);
8209
8210         return rc;
8211 }
8212
8213 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8214                              int buf_size)
8215 {
8216         int rc;
8217         u32 cmd_flags;
8218         u32 val;
8219         u32 written_so_far;
8220
8221         if (buf_size == 1)      /* ethtool */
8222                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8223
8224         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8225                 DP(BNX2X_MSG_NVM,
8226                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8227                    offset, buf_size);
8228                 return -EINVAL;
8229         }
8230
8231         if (offset + buf_size > bp->common.flash_size) {
8232                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8233                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8234                    offset, buf_size, bp->common.flash_size);
8235                 return -EINVAL;
8236         }
8237
8238         /* request access to nvram interface */
8239         rc = bnx2x_acquire_nvram_lock(bp);
8240         if (rc)
8241                 return rc;
8242
8243         /* enable access to nvram interface */
8244         bnx2x_enable_nvram_access(bp);
8245
8246         written_so_far = 0;
8247         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8248         while ((written_so_far < buf_size) && (rc == 0)) {
8249                 if (written_so_far == (buf_size - sizeof(u32)))
8250                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8251                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8252                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8253                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8254                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8255
8256                 memcpy(&val, data_buf, 4);
8257
8258                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8259
8260                 /* advance to the next dword */
8261                 offset += sizeof(u32);
8262                 data_buf += sizeof(u32);
8263                 written_so_far += sizeof(u32);
8264                 cmd_flags = 0;
8265         }
8266
8267         /* disable access to nvram interface */
8268         bnx2x_disable_nvram_access(bp);
8269         bnx2x_release_nvram_lock(bp);
8270
8271         return rc;
8272 }
8273
8274 static int bnx2x_set_eeprom(struct net_device *dev,
8275                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8276 {
8277         struct bnx2x *bp = netdev_priv(dev);
8278         int rc;
8279
8280         if (!netif_running(dev))
8281                 return -EAGAIN;
8282
8283         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8284            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8285            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8286            eeprom->len, eeprom->len);
8287
8288         /* parameters already validated in ethtool_set_eeprom */
8289
8290         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8291         if (eeprom->magic == 0x00504859)
8292                 if (bp->port.pmf) {
8293
8294                         bnx2x_acquire_phy_lock(bp);
8295                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8296                                              bp->link_params.ext_phy_config,
8297                                              (bp->state != BNX2X_STATE_CLOSED),
8298                                              eebuf, eeprom->len);
8299                         if ((bp->state == BNX2X_STATE_OPEN) ||
8300                             (bp->state == BNX2X_STATE_DISABLED)) {
8301                                 rc |= bnx2x_link_reset(&bp->link_params,
8302                                                        &bp->link_vars);
8303                                 rc |= bnx2x_phy_init(&bp->link_params,
8304                                                      &bp->link_vars);
8305                         }
8306                         bnx2x_release_phy_lock(bp);
8307
8308                 } else /* Only the PMF can access the PHY */
8309                         return -EINVAL;
8310         else
8311                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8312
8313         return rc;
8314 }
8315
8316 static int bnx2x_get_coalesce(struct net_device *dev,
8317                               struct ethtool_coalesce *coal)
8318 {
8319         struct bnx2x *bp = netdev_priv(dev);
8320
8321         memset(coal, 0, sizeof(struct ethtool_coalesce));
8322
8323         coal->rx_coalesce_usecs = bp->rx_ticks;
8324         coal->tx_coalesce_usecs = bp->tx_ticks;
8325
8326         return 0;
8327 }
8328
8329 static int bnx2x_set_coalesce(struct net_device *dev,
8330                               struct ethtool_coalesce *coal)
8331 {
8332         struct bnx2x *bp = netdev_priv(dev);
8333
8334         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8335         if (bp->rx_ticks > 3000)
8336                 bp->rx_ticks = 3000;
8337
8338         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8339         if (bp->tx_ticks > 0x3000)
8340                 bp->tx_ticks = 0x3000;
8341
8342         if (netif_running(dev))
8343                 bnx2x_update_coalesce(bp);
8344
8345         return 0;
8346 }
8347
8348 static void bnx2x_get_ringparam(struct net_device *dev,
8349                                 struct ethtool_ringparam *ering)
8350 {
8351         struct bnx2x *bp = netdev_priv(dev);
8352
8353         ering->rx_max_pending = MAX_RX_AVAIL;
8354         ering->rx_mini_max_pending = 0;
8355         ering->rx_jumbo_max_pending = 0;
8356
8357         ering->rx_pending = bp->rx_ring_size;
8358         ering->rx_mini_pending = 0;
8359         ering->rx_jumbo_pending = 0;
8360
8361         ering->tx_max_pending = MAX_TX_AVAIL;
8362         ering->tx_pending = bp->tx_ring_size;
8363 }
8364
8365 static int bnx2x_set_ringparam(struct net_device *dev,
8366                                struct ethtool_ringparam *ering)
8367 {
8368         struct bnx2x *bp = netdev_priv(dev);
8369         int rc = 0;
8370
8371         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8372             (ering->tx_pending > MAX_TX_AVAIL) ||
8373             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8374                 return -EINVAL;
8375
8376         bp->rx_ring_size = ering->rx_pending;
8377         bp->tx_ring_size = ering->tx_pending;
8378
8379         if (netif_running(dev)) {
8380                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8381                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8382         }
8383
8384         return rc;
8385 }
8386
8387 static void bnx2x_get_pauseparam(struct net_device *dev,
8388                                  struct ethtool_pauseparam *epause)
8389 {
8390         struct bnx2x *bp = netdev_priv(dev);
8391
8392         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8393                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8394
8395         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8396                             BNX2X_FLOW_CTRL_RX);
8397         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8398                             BNX2X_FLOW_CTRL_TX);
8399
8400         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8401            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8402            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8403 }
8404
8405 static int bnx2x_set_pauseparam(struct net_device *dev,
8406                                 struct ethtool_pauseparam *epause)
8407 {
8408         struct bnx2x *bp = netdev_priv(dev);
8409
8410         if (IS_E1HMF(bp))
8411                 return 0;
8412
8413         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8414            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8415            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8416
8417         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8418
8419         if (epause->rx_pause)
8420                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8421
8422         if (epause->tx_pause)
8423                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8424
8425         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8426                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8427
8428         if (epause->autoneg) {
8429                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8430                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8431                         return -EINVAL;
8432                 }
8433
8434                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8435                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8436         }
8437
8438         DP(NETIF_MSG_LINK,
8439            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8440
8441         if (netif_running(dev)) {
8442                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8443                 bnx2x_link_set(bp);
8444         }
8445
8446         return 0;
8447 }
8448
8449 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8450 {
8451         struct bnx2x *bp = netdev_priv(dev);
8452         int changed = 0;
8453         int rc = 0;
8454
8455         /* TPA requires Rx CSUM offloading */
8456         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8457                 if (!(dev->features & NETIF_F_LRO)) {
8458                         dev->features |= NETIF_F_LRO;
8459                         bp->flags |= TPA_ENABLE_FLAG;
8460                         changed = 1;
8461                 }
8462
8463         } else if (dev->features & NETIF_F_LRO) {
8464                 dev->features &= ~NETIF_F_LRO;
8465                 bp->flags &= ~TPA_ENABLE_FLAG;
8466                 changed = 1;
8467         }
8468
8469         if (changed && netif_running(dev)) {
8470                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8471                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8472         }
8473
8474         return rc;
8475 }
8476
8477 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8478 {
8479         struct bnx2x *bp = netdev_priv(dev);
8480
8481         return bp->rx_csum;
8482 }
8483
8484 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8485 {
8486         struct bnx2x *bp = netdev_priv(dev);
8487         int rc = 0;
8488
8489         bp->rx_csum = data;
8490
8491         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8492            TPA'ed packets will be discarded due to wrong TCP CSUM */
8493         if (!data) {
8494                 u32 flags = ethtool_op_get_flags(dev);
8495
8496                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8497         }
8498
8499         return rc;
8500 }
8501
8502 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8503 {
8504         if (data) {
8505                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8506                 dev->features |= NETIF_F_TSO6;
8507         } else {
8508                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8509                 dev->features &= ~NETIF_F_TSO6;
8510         }
8511
8512         return 0;
8513 }
8514
8515 static const struct {
8516         char string[ETH_GSTRING_LEN];
8517 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8518         { "register_test (offline)" },
8519         { "memory_test (offline)" },
8520         { "loopback_test (offline)" },
8521         { "nvram_test (online)" },
8522         { "interrupt_test (online)" },
8523         { "link_test (online)" },
8524         { "idle check (online)" },
8525         { "MC errors (online)" }
8526 };
8527
8528 static int bnx2x_self_test_count(struct net_device *dev)
8529 {
8530         return BNX2X_NUM_TESTS;
8531 }
8532
8533 static int bnx2x_test_registers(struct bnx2x *bp)
8534 {
8535         int idx, i, rc = -ENODEV;
8536         u32 wr_val = 0;
8537         int port = BP_PORT(bp);
8538         static const struct {
8539                 u32  offset0;
8540                 u32  offset1;
8541                 u32  mask;
8542         } reg_tbl[] = {
8543 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8544                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8545                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8546                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8547                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8548                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8549                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8550                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8551                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8552                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8553 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8554                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8555                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8556                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8557                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8558                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8559                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8560                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8561                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8562                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8563 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8564                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8565                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8566                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8567                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8568                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8569                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8570                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8571                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8572                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8573 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8574                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8575                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8576                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8577                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8578                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8579                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8580                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8581
8582                 { 0xffffffff, 0, 0x00000000 }
8583         };
8584
8585         if (!netif_running(bp->dev))
8586                 return rc;
8587
8588         /* Repeat the test twice:
8589            First by writing 0x00000000, second by writing 0xffffffff */
8590         for (idx = 0; idx < 2; idx++) {
8591
8592                 switch (idx) {
8593                 case 0:
8594                         wr_val = 0;
8595                         break;
8596                 case 1:
8597                         wr_val = 0xffffffff;
8598                         break;
8599                 }
8600
8601                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8602                         u32 offset, mask, save_val, val;
8603
8604                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8605                         mask = reg_tbl[i].mask;
8606
8607                         save_val = REG_RD(bp, offset);
8608
8609                         REG_WR(bp, offset, wr_val);
8610                         val = REG_RD(bp, offset);
8611
8612                         /* Restore the original register's value */
8613                         REG_WR(bp, offset, save_val);
8614
8615                         /* verify that value is as expected value */
8616                         if ((val & mask) != (wr_val & mask))
8617                                 goto test_reg_exit;
8618                 }
8619         }
8620
8621         rc = 0;
8622
8623 test_reg_exit:
8624         return rc;
8625 }
8626
8627 static int bnx2x_test_memory(struct bnx2x *bp)
8628 {
8629         int i, j, rc = -ENODEV;
8630         u32 val;
8631         static const struct {
8632                 u32 offset;
8633                 int size;
8634         } mem_tbl[] = {
8635                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8636                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8637                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8638                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8639                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8640                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8641                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8642
8643                 { 0xffffffff, 0 }
8644         };
8645         static const struct {
8646                 char *name;
8647                 u32 offset;
8648                 u32 e1_mask;
8649                 u32 e1h_mask;
8650         } prty_tbl[] = {
8651                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8652                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8653                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8654                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8655                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8656                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8657
8658                 { NULL, 0xffffffff, 0, 0 }
8659         };
8660
8661         if (!netif_running(bp->dev))
8662                 return rc;
8663
8664         /* Go through all the memories */
8665         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8666                 for (j = 0; j < mem_tbl[i].size; j++)
8667                         REG_RD(bp, mem_tbl[i].offset + j*4);
8668
8669         /* Check the parity status */
8670         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8671                 val = REG_RD(bp, prty_tbl[i].offset);
8672                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8673                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8674                         DP(NETIF_MSG_HW,
8675                            "%s is 0x%x\n", prty_tbl[i].name, val);
8676                         goto test_mem_exit;
8677                 }
8678         }
8679
8680         rc = 0;
8681
8682 test_mem_exit:
8683         return rc;
8684 }
8685
8686 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8687 {
8688         int cnt = 1000;
8689
8690         if (link_up)
8691                 while (bnx2x_link_test(bp) && cnt--)
8692                         msleep(10);
8693 }
8694
8695 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8696 {
8697         unsigned int pkt_size, num_pkts, i;
8698         struct sk_buff *skb;
8699         unsigned char *packet;
8700         struct bnx2x_fastpath *fp = &bp->fp[0];
8701         u16 tx_start_idx, tx_idx;
8702         u16 rx_start_idx, rx_idx;
8703         u16 pkt_prod;
8704         struct sw_tx_bd *tx_buf;
8705         struct eth_tx_bd *tx_bd;
8706         dma_addr_t mapping;
8707         union eth_rx_cqe *cqe;
8708         u8 cqe_fp_flags;
8709         struct sw_rx_bd *rx_buf;
8710         u16 len;
8711         int rc = -ENODEV;
8712
8713         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8714                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8715                 bnx2x_acquire_phy_lock(bp);
8716                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8717                 bnx2x_release_phy_lock(bp);
8718
8719         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8720                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8721                 bnx2x_acquire_phy_lock(bp);
8722                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8723                 bnx2x_release_phy_lock(bp);
8724                 /* wait until link state is restored */
8725                 bnx2x_wait_for_link(bp, link_up);
8726
8727         } else
8728                 return -EINVAL;
8729
8730         pkt_size = 1514;
8731         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8732         if (!skb) {
8733                 rc = -ENOMEM;
8734                 goto test_loopback_exit;
8735         }
8736         packet = skb_put(skb, pkt_size);
8737         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8738         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8739         for (i = ETH_HLEN; i < pkt_size; i++)
8740                 packet[i] = (unsigned char) (i & 0xff);
8741
8742         num_pkts = 0;
8743         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8744         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8745
8746         pkt_prod = fp->tx_pkt_prod++;
8747         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8748         tx_buf->first_bd = fp->tx_bd_prod;
8749         tx_buf->skb = skb;
8750
8751         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8752         mapping = pci_map_single(bp->pdev, skb->data,
8753                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8754         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8755         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8756         tx_bd->nbd = cpu_to_le16(1);
8757         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8758         tx_bd->vlan = cpu_to_le16(pkt_prod);
8759         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8760                                        ETH_TX_BD_FLAGS_END_BD);
8761         tx_bd->general_data = ((UNICAST_ADDRESS <<
8762                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8763
8764         wmb();
8765
8766         fp->hw_tx_prods->bds_prod =
8767                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8768         mb(); /* FW restriction: must not reorder writing nbd and packets */
8769         fp->hw_tx_prods->packets_prod =
8770                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8771         DOORBELL(bp, FP_IDX(fp), 0);
8772
8773         mmiowb();
8774
8775         num_pkts++;
8776         fp->tx_bd_prod++;
8777         bp->dev->trans_start = jiffies;
8778
8779         udelay(100);
8780
8781         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8782         if (tx_idx != tx_start_idx + num_pkts)
8783                 goto test_loopback_exit;
8784
8785         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8786         if (rx_idx != rx_start_idx + num_pkts)
8787                 goto test_loopback_exit;
8788
8789         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8790         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8791         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8792                 goto test_loopback_rx_exit;
8793
8794         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8795         if (len != pkt_size)
8796                 goto test_loopback_rx_exit;
8797
8798         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8799         skb = rx_buf->skb;
8800         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8801         for (i = ETH_HLEN; i < pkt_size; i++)
8802                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8803                         goto test_loopback_rx_exit;
8804
8805         rc = 0;
8806
8807 test_loopback_rx_exit:
8808
8809         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8810         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8811         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8812         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8813
8814         /* Update producers */
8815         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8816                              fp->rx_sge_prod);
8817
8818 test_loopback_exit:
8819         bp->link_params.loopback_mode = LOOPBACK_NONE;
8820
8821         return rc;
8822 }
8823
8824 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8825 {
8826         int rc = 0;
8827
8828         if (!netif_running(bp->dev))
8829                 return BNX2X_LOOPBACK_FAILED;
8830
8831         bnx2x_netif_stop(bp, 1);
8832
8833         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8834                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8835                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8836         }
8837
8838         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8839                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8840                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8841         }
8842
8843         bnx2x_netif_start(bp);
8844
8845         return rc;
8846 }
8847
8848 #define CRC32_RESIDUAL                  0xdebb20e3
8849
8850 static int bnx2x_test_nvram(struct bnx2x *bp)
8851 {
8852         static const struct {
8853                 int offset;
8854                 int size;
8855         } nvram_tbl[] = {
8856                 {     0,  0x14 }, /* bootstrap */
8857                 {  0x14,  0xec }, /* dir */
8858                 { 0x100, 0x350 }, /* manuf_info */
8859                 { 0x450,  0xf0 }, /* feature_info */
8860                 { 0x640,  0x64 }, /* upgrade_key_info */
8861                 { 0x6a4,  0x64 },
8862                 { 0x708,  0x70 }, /* manuf_key_info */
8863                 { 0x778,  0x70 },
8864                 {     0,     0 }
8865         };
8866         u32 buf[0x350 / 4];
8867         u8 *data = (u8 *)buf;
8868         int i, rc;
8869         u32 magic, csum;
8870
8871         rc = bnx2x_nvram_read(bp, 0, data, 4);
8872         if (rc) {
8873                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8874                 goto test_nvram_exit;
8875         }
8876
8877         magic = be32_to_cpu(buf[0]);
8878         if (magic != 0x669955aa) {
8879                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8880                 rc = -ENODEV;
8881                 goto test_nvram_exit;
8882         }
8883
8884         for (i = 0; nvram_tbl[i].size; i++) {
8885
8886                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8887                                       nvram_tbl[i].size);
8888                 if (rc) {
8889                         DP(NETIF_MSG_PROBE,
8890                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8891                         goto test_nvram_exit;
8892                 }
8893
8894                 csum = ether_crc_le(nvram_tbl[i].size, data);
8895                 if (csum != CRC32_RESIDUAL) {
8896                         DP(NETIF_MSG_PROBE,
8897                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8898                         rc = -ENODEV;
8899                         goto test_nvram_exit;
8900                 }
8901         }
8902
8903 test_nvram_exit:
8904         return rc;
8905 }
8906
8907 static int bnx2x_test_intr(struct bnx2x *bp)
8908 {
8909         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8910         int i, rc;
8911
8912         if (!netif_running(bp->dev))
8913                 return -ENODEV;
8914
8915         config->hdr.length_6b = 0;
8916         if (CHIP_IS_E1(bp))
8917                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8918         else
8919                 config->hdr.offset = BP_FUNC(bp);
8920         config->hdr.client_id = BP_CL_ID(bp);
8921         config->hdr.reserved1 = 0;
8922
8923         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8924                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8925                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8926         if (rc == 0) {
8927                 bp->set_mac_pending++;
8928                 for (i = 0; i < 10; i++) {
8929                         if (!bp->set_mac_pending)
8930                                 break;
8931                         msleep_interruptible(10);
8932                 }
8933                 if (i == 10)
8934                         rc = -ENODEV;
8935         }
8936
8937         return rc;
8938 }
8939
8940 static void bnx2x_self_test(struct net_device *dev,
8941                             struct ethtool_test *etest, u64 *buf)
8942 {
8943         struct bnx2x *bp = netdev_priv(dev);
8944
8945         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8946
8947         if (!netif_running(dev))
8948                 return;
8949
8950         /* offline tests are not supported in MF mode */
8951         if (IS_E1HMF(bp))
8952                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8953
8954         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8955                 u8 link_up;
8956
8957                 link_up = bp->link_vars.link_up;
8958                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8959                 bnx2x_nic_load(bp, LOAD_DIAG);
8960                 /* wait until link state is restored */
8961                 bnx2x_wait_for_link(bp, link_up);
8962
8963                 if (bnx2x_test_registers(bp) != 0) {
8964                         buf[0] = 1;
8965                         etest->flags |= ETH_TEST_FL_FAILED;
8966                 }
8967                 if (bnx2x_test_memory(bp) != 0) {
8968                         buf[1] = 1;
8969                         etest->flags |= ETH_TEST_FL_FAILED;
8970                 }
8971                 buf[2] = bnx2x_test_loopback(bp, link_up);
8972                 if (buf[2] != 0)
8973                         etest->flags |= ETH_TEST_FL_FAILED;
8974
8975                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8976                 bnx2x_nic_load(bp, LOAD_NORMAL);
8977                 /* wait until link state is restored */
8978                 bnx2x_wait_for_link(bp, link_up);
8979         }
8980         if (bnx2x_test_nvram(bp) != 0) {
8981                 buf[3] = 1;
8982                 etest->flags |= ETH_TEST_FL_FAILED;
8983         }
8984         if (bnx2x_test_intr(bp) != 0) {
8985                 buf[4] = 1;
8986                 etest->flags |= ETH_TEST_FL_FAILED;
8987         }
8988         if (bp->port.pmf)
8989                 if (bnx2x_link_test(bp) != 0) {
8990                         buf[5] = 1;
8991                         etest->flags |= ETH_TEST_FL_FAILED;
8992                 }
8993         buf[7] = bnx2x_mc_assert(bp);
8994         if (buf[7] != 0)
8995                 etest->flags |= ETH_TEST_FL_FAILED;
8996
8997 #ifdef BNX2X_EXTRA_DEBUG
8998         bnx2x_panic_dump(bp);
8999 #endif
9000 }
9001
9002 static const struct {
9003         long offset;
9004         int size;
9005         u32 flags;
9006 #define STATS_FLAGS_PORT                1
9007 #define STATS_FLAGS_FUNC                2
9008         u8 string[ETH_GSTRING_LEN];
9009 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9010 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9011                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9012         { STATS_OFFSET32(error_bytes_received_hi),
9013                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9014         { STATS_OFFSET32(total_bytes_transmitted_hi),
9015                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9016         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9017                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9018         { STATS_OFFSET32(total_unicast_packets_received_hi),
9019                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9020         { STATS_OFFSET32(total_multicast_packets_received_hi),
9021                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9022         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9023                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9024         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9025                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9026         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9027                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9028 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9029                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9030         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9031                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9032         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9033                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9034         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9035                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9036         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9037                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9038         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9039                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9040         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9041                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9042         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9043                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9044         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9045                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9046         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9047                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9048 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9049                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9050         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9051                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9052         { STATS_OFFSET32(jabber_packets_received),
9053                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9054         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9055                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9056         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9057                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9058         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9059                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9060         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9061                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9062         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9063                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9064         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9065                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9066         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9067                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9068 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9069                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9070         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9071                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9072         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9073                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9074         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9075                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9076         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9077                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9078         { STATS_OFFSET32(mac_filter_discard),
9079                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9080         { STATS_OFFSET32(no_buff_discard),
9081                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9082         { STATS_OFFSET32(xxoverflow_discard),
9083                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9084         { STATS_OFFSET32(brb_drop_hi),
9085                                 8, STATS_FLAGS_PORT, "brb_discard" },
9086         { STATS_OFFSET32(brb_truncate_hi),
9087                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9088 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9089                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9090         { STATS_OFFSET32(rx_skb_alloc_failed),
9091                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9092 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9093                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9094 };
9095
9096 #define IS_NOT_E1HMF_STAT(bp, i) \
9097                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9098
9099 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9100 {
9101         struct bnx2x *bp = netdev_priv(dev);
9102         int i, j;
9103
9104         switch (stringset) {
9105         case ETH_SS_STATS:
9106                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9107                         if (IS_NOT_E1HMF_STAT(bp, i))
9108                                 continue;
9109                         strcpy(buf + j*ETH_GSTRING_LEN,
9110                                bnx2x_stats_arr[i].string);
9111                         j++;
9112                 }
9113                 break;
9114
9115         case ETH_SS_TEST:
9116                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9117                 break;
9118         }
9119 }
9120
9121 static int bnx2x_get_stats_count(struct net_device *dev)
9122 {
9123         struct bnx2x *bp = netdev_priv(dev);
9124         int i, num_stats = 0;
9125
9126         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9127                 if (IS_NOT_E1HMF_STAT(bp, i))
9128                         continue;
9129                 num_stats++;
9130         }
9131         return num_stats;
9132 }
9133
9134 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9135                                     struct ethtool_stats *stats, u64 *buf)
9136 {
9137         struct bnx2x *bp = netdev_priv(dev);
9138         u32 *hw_stats = (u32 *)&bp->eth_stats;
9139         int i, j;
9140
9141         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9142                 if (IS_NOT_E1HMF_STAT(bp, i))
9143                         continue;
9144
9145                 if (bnx2x_stats_arr[i].size == 0) {
9146                         /* skip this counter */
9147                         buf[j] = 0;
9148                         j++;
9149                         continue;
9150                 }
9151                 if (bnx2x_stats_arr[i].size == 4) {
9152                         /* 4-byte counter */
9153                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9154                         j++;
9155                         continue;
9156                 }
9157                 /* 8-byte counter */
9158                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9159                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9160                 j++;
9161         }
9162 }
9163
9164 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9165 {
9166         struct bnx2x *bp = netdev_priv(dev);
9167         int port = BP_PORT(bp);
9168         int i;
9169
9170         if (!netif_running(dev))
9171                 return 0;
9172
9173         if (!bp->port.pmf)
9174                 return 0;
9175
9176         if (data == 0)
9177                 data = 2;
9178
9179         for (i = 0; i < (data * 2); i++) {
9180                 if ((i % 2) == 0)
9181                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9182                                       bp->link_params.hw_led_mode,
9183                                       bp->link_params.chip_id);
9184                 else
9185                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9186                                       bp->link_params.hw_led_mode,
9187                                       bp->link_params.chip_id);
9188
9189                 msleep_interruptible(500);
9190                 if (signal_pending(current))
9191                         break;
9192         }
9193
9194         if (bp->link_vars.link_up)
9195                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9196                               bp->link_vars.line_speed,
9197                               bp->link_params.hw_led_mode,
9198                               bp->link_params.chip_id);
9199
9200         return 0;
9201 }
9202
9203 static struct ethtool_ops bnx2x_ethtool_ops = {
9204         .get_settings           = bnx2x_get_settings,
9205         .set_settings           = bnx2x_set_settings,
9206         .get_drvinfo            = bnx2x_get_drvinfo,
9207         .get_wol                = bnx2x_get_wol,
9208         .set_wol                = bnx2x_set_wol,
9209         .get_msglevel           = bnx2x_get_msglevel,
9210         .set_msglevel           = bnx2x_set_msglevel,
9211         .nway_reset             = bnx2x_nway_reset,
9212         .get_link               = ethtool_op_get_link,
9213         .get_eeprom_len         = bnx2x_get_eeprom_len,
9214         .get_eeprom             = bnx2x_get_eeprom,
9215         .set_eeprom             = bnx2x_set_eeprom,
9216         .get_coalesce           = bnx2x_get_coalesce,
9217         .set_coalesce           = bnx2x_set_coalesce,
9218         .get_ringparam          = bnx2x_get_ringparam,
9219         .set_ringparam          = bnx2x_set_ringparam,
9220         .get_pauseparam         = bnx2x_get_pauseparam,
9221         .set_pauseparam         = bnx2x_set_pauseparam,
9222         .get_rx_csum            = bnx2x_get_rx_csum,
9223         .set_rx_csum            = bnx2x_set_rx_csum,
9224         .get_tx_csum            = ethtool_op_get_tx_csum,
9225         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9226         .set_flags              = bnx2x_set_flags,
9227         .get_flags              = ethtool_op_get_flags,
9228         .get_sg                 = ethtool_op_get_sg,
9229         .set_sg                 = ethtool_op_set_sg,
9230         .get_tso                = ethtool_op_get_tso,
9231         .set_tso                = bnx2x_set_tso,
9232         .self_test_count        = bnx2x_self_test_count,
9233         .self_test              = bnx2x_self_test,
9234         .get_strings            = bnx2x_get_strings,
9235         .phys_id                = bnx2x_phys_id,
9236         .get_stats_count        = bnx2x_get_stats_count,
9237         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9238 };
9239
9240 /* end of ethtool_ops */
9241
9242 /****************************************************************************
9243 * General service functions
9244 ****************************************************************************/
9245
9246 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9247 {
9248         u16 pmcsr;
9249
9250         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9251
9252         switch (state) {
9253         case PCI_D0:
9254                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9255                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9256                                        PCI_PM_CTRL_PME_STATUS));
9257
9258                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9259                         /* delay required during transition out of D3hot */
9260                         msleep(20);
9261                 break;
9262
9263         case PCI_D3hot:
9264                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9265                 pmcsr |= 3;
9266
9267                 if (bp->wol)
9268                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9269
9270                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9271                                       pmcsr);
9272
9273                 /* No more memory access after this point until
9274                 * device is brought back to D0.
9275                 */
9276                 break;
9277
9278         default:
9279                 return -EINVAL;
9280         }
9281         return 0;
9282 }
9283
9284 /*
9285  * net_device service functions
9286  */
9287
9288 static int bnx2x_poll(struct napi_struct *napi, int budget)
9289 {
9290         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9291                                                  napi);
9292         struct bnx2x *bp = fp->bp;
9293         int work_done = 0;
9294         u16 rx_cons_sb;
9295
9296 #ifdef BNX2X_STOP_ON_ERROR
9297         if (unlikely(bp->panic))
9298                 goto poll_panic;
9299 #endif
9300
9301         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9302         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9303         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9304
9305         bnx2x_update_fpsb_idx(fp);
9306
9307         if (BNX2X_HAS_TX_WORK(fp))
9308                 bnx2x_tx_int(fp, budget);
9309
9310         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9311         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9312                 rx_cons_sb++;
9313         if (BNX2X_HAS_RX_WORK(fp))
9314                 work_done = bnx2x_rx_int(fp, budget);
9315
9316         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9317         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9318         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9319                 rx_cons_sb++;
9320
9321         /* must not complete if we consumed full budget */
9322         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9323
9324 #ifdef BNX2X_STOP_ON_ERROR
9325 poll_panic:
9326 #endif
9327                 netif_rx_complete(napi);
9328
9329                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9330                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9331                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9332                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9333         }
9334         return work_done;
9335 }
9336
9337
9338 /* we split the first BD into headers and data BDs
9339  * to ease the pain of our fellow microcode engineers
9340  * we use one mapping for both BDs
9341  * So far this has only been observed to happen
9342  * in Other Operating Systems(TM)
9343  */
9344 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9345                                    struct bnx2x_fastpath *fp,
9346                                    struct eth_tx_bd **tx_bd, u16 hlen,
9347                                    u16 bd_prod, int nbd)
9348 {
9349         struct eth_tx_bd *h_tx_bd = *tx_bd;
9350         struct eth_tx_bd *d_tx_bd;
9351         dma_addr_t mapping;
9352         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9353
9354         /* first fix first BD */
9355         h_tx_bd->nbd = cpu_to_le16(nbd);
9356         h_tx_bd->nbytes = cpu_to_le16(hlen);
9357
9358         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9359            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9360            h_tx_bd->addr_lo, h_tx_bd->nbd);
9361
9362         /* now get a new data BD
9363          * (after the pbd) and fill it */
9364         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9365         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9366
9367         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9368                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9369
9370         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9371         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9372         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9373         d_tx_bd->vlan = 0;
9374         /* this marks the BD as one that has no individual mapping
9375          * the FW ignores this flag in a BD not marked start
9376          */
9377         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9378         DP(NETIF_MSG_TX_QUEUED,
9379            "TSO split data size is %d (%x:%x)\n",
9380            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9381
9382         /* update tx_bd for marking the last BD flag */
9383         *tx_bd = d_tx_bd;
9384
9385         return bd_prod;
9386 }
9387
9388 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9389 {
9390         if (fix > 0)
9391                 csum = (u16) ~csum_fold(csum_sub(csum,
9392                                 csum_partial(t_header - fix, fix, 0)));
9393
9394         else if (fix < 0)
9395                 csum = (u16) ~csum_fold(csum_add(csum,
9396                                 csum_partial(t_header, -fix, 0)));
9397
9398         return swab16(csum);
9399 }
9400
9401 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9402 {
9403         u32 rc;
9404
9405         if (skb->ip_summed != CHECKSUM_PARTIAL)
9406                 rc = XMIT_PLAIN;
9407
9408         else {
9409                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9410                         rc = XMIT_CSUM_V6;
9411                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9412                                 rc |= XMIT_CSUM_TCP;
9413
9414                 } else {
9415                         rc = XMIT_CSUM_V4;
9416                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9417                                 rc |= XMIT_CSUM_TCP;
9418                 }
9419         }
9420
9421         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9422                 rc |= XMIT_GSO_V4;
9423
9424         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9425                 rc |= XMIT_GSO_V6;
9426
9427         return rc;
9428 }
9429
9430 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9431 /* check if packet requires linearization (packet is too fragmented) */
9432 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9433                              u32 xmit_type)
9434 {
9435         int to_copy = 0;
9436         int hlen = 0;
9437         int first_bd_sz = 0;
9438
9439         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9440         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9441
9442                 if (xmit_type & XMIT_GSO) {
9443                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9444                         /* Check if LSO packet needs to be copied:
9445                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9446                         int wnd_size = MAX_FETCH_BD - 3;
9447                         /* Number of windows to check */
9448                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9449                         int wnd_idx = 0;
9450                         int frag_idx = 0;
9451                         u32 wnd_sum = 0;
9452
9453                         /* Headers length */
9454                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9455                                 tcp_hdrlen(skb);
9456
9457                         /* Amount of data (w/o headers) on linear part of SKB*/
9458                         first_bd_sz = skb_headlen(skb) - hlen;
9459
9460                         wnd_sum  = first_bd_sz;
9461
9462                         /* Calculate the first sum - it's special */
9463                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9464                                 wnd_sum +=
9465                                         skb_shinfo(skb)->frags[frag_idx].size;
9466
9467                         /* If there was data on linear skb data - check it */
9468                         if (first_bd_sz > 0) {
9469                                 if (unlikely(wnd_sum < lso_mss)) {
9470                                         to_copy = 1;
9471                                         goto exit_lbl;
9472                                 }
9473
9474                                 wnd_sum -= first_bd_sz;
9475                         }
9476
9477                         /* Others are easier: run through the frag list and
9478                            check all windows */
9479                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9480                                 wnd_sum +=
9481                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9482
9483                                 if (unlikely(wnd_sum < lso_mss)) {
9484                                         to_copy = 1;
9485                                         break;
9486                                 }
9487                                 wnd_sum -=
9488                                         skb_shinfo(skb)->frags[wnd_idx].size;
9489                         }
9490
9491                 } else {
9492                         /* in non-LSO too fragmented packet should always
9493                            be linearized */
9494                         to_copy = 1;
9495                 }
9496         }
9497
9498 exit_lbl:
9499         if (unlikely(to_copy))
9500                 DP(NETIF_MSG_TX_QUEUED,
9501                    "Linearization IS REQUIRED for %s packet. "
9502                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9503                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9504                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9505
9506         return to_copy;
9507 }
9508 #endif
9509
9510 /* called with netif_tx_lock
9511  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9512  * netif_wake_queue()
9513  */
9514 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9515 {
9516         struct bnx2x *bp = netdev_priv(dev);
9517         struct bnx2x_fastpath *fp;
9518         struct sw_tx_bd *tx_buf;
9519         struct eth_tx_bd *tx_bd;
9520         struct eth_tx_parse_bd *pbd = NULL;
9521         u16 pkt_prod, bd_prod;
9522         int nbd, fp_index;
9523         dma_addr_t mapping;
9524         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9525         int vlan_off = (bp->e1hov ? 4 : 0);
9526         int i;
9527         u8 hlen = 0;
9528
9529 #ifdef BNX2X_STOP_ON_ERROR
9530         if (unlikely(bp->panic))
9531                 return NETDEV_TX_BUSY;
9532 #endif
9533
9534         fp_index = (smp_processor_id() % bp->num_queues);
9535         fp = &bp->fp[fp_index];
9536
9537         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9538                 bp->eth_stats.driver_xoff++,
9539                 netif_stop_queue(dev);
9540                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9541                 return NETDEV_TX_BUSY;
9542         }
9543
9544         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9545            "  gso type %x  xmit_type %x\n",
9546            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9547            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9548
9549 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9550         /* First, check if we need to linearize the skb
9551            (due to FW restrictions) */
9552         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9553                 /* Statistics of linearization */
9554                 bp->lin_cnt++;
9555                 if (skb_linearize(skb) != 0) {
9556                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9557                            "silently dropping this SKB\n");
9558                         dev_kfree_skb_any(skb);
9559                         return NETDEV_TX_OK;
9560                 }
9561         }
9562 #endif
9563
9564         /*
9565         Please read carefully. First we use one BD which we mark as start,
9566         then for TSO or xsum we have a parsing info BD,
9567         and only then we have the rest of the TSO BDs.
9568         (don't forget to mark the last one as last,
9569         and to unmap only AFTER you write to the BD ...)
9570         And above all, all pdb sizes are in words - NOT DWORDS!
9571         */
9572
9573         pkt_prod = fp->tx_pkt_prod++;
9574         bd_prod = TX_BD(fp->tx_bd_prod);
9575
9576         /* get a tx_buf and first BD */
9577         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9578         tx_bd = &fp->tx_desc_ring[bd_prod];
9579
9580         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9581         tx_bd->general_data = (UNICAST_ADDRESS <<
9582                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9583         /* header nbd */
9584         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9585
9586         /* remember the first BD of the packet */
9587         tx_buf->first_bd = fp->tx_bd_prod;
9588         tx_buf->skb = skb;
9589
9590         DP(NETIF_MSG_TX_QUEUED,
9591            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9592            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9593
9594 #ifdef BCM_VLAN
9595         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9596             (bp->flags & HW_VLAN_TX_FLAG)) {
9597                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9598                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9599                 vlan_off += 4;
9600         } else
9601 #endif
9602                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9603
9604         if (xmit_type) {
9605                 /* turn on parsing and get a BD */
9606                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9607                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9608
9609                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9610         }
9611
9612         if (xmit_type & XMIT_CSUM) {
9613                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9614
9615                 /* for now NS flag is not used in Linux */
9616                 pbd->global_data = (hlen |
9617                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9618                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9619
9620                 pbd->ip_hlen = (skb_transport_header(skb) -
9621                                 skb_network_header(skb)) / 2;
9622
9623                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9624
9625                 pbd->total_hlen = cpu_to_le16(hlen);
9626                 hlen = hlen*2 - vlan_off;
9627
9628                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9629
9630                 if (xmit_type & XMIT_CSUM_V4)
9631                         tx_bd->bd_flags.as_bitfield |=
9632                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9633                 else
9634                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9635
9636                 if (xmit_type & XMIT_CSUM_TCP) {
9637                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9638
9639                 } else {
9640                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9641
9642                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9643                         pbd->cs_offset = fix / 2;
9644
9645                         DP(NETIF_MSG_TX_QUEUED,
9646                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9647                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9648                            SKB_CS(skb));
9649
9650                         /* HW bug: fixup the CSUM */
9651                         pbd->tcp_pseudo_csum =
9652                                 bnx2x_csum_fix(skb_transport_header(skb),
9653                                                SKB_CS(skb), fix);
9654
9655                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9656                            pbd->tcp_pseudo_csum);
9657                 }
9658         }
9659
9660         mapping = pci_map_single(bp->pdev, skb->data,
9661                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9662
9663         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9664         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9665         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9666         tx_bd->nbd = cpu_to_le16(nbd);
9667         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9668
9669         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9670            "  nbytes %d  flags %x  vlan %x\n",
9671            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9672            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9673            le16_to_cpu(tx_bd->vlan));
9674
9675         if (xmit_type & XMIT_GSO) {
9676
9677                 DP(NETIF_MSG_TX_QUEUED,
9678                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9679                    skb->len, hlen, skb_headlen(skb),
9680                    skb_shinfo(skb)->gso_size);
9681
9682                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9683
9684                 if (unlikely(skb_headlen(skb) > hlen))
9685                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9686                                                  bd_prod, ++nbd);
9687
9688                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9689                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9690                 pbd->tcp_flags = pbd_tcp_flags(skb);
9691
9692                 if (xmit_type & XMIT_GSO_V4) {
9693                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9694                         pbd->tcp_pseudo_csum =
9695                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9696                                                           ip_hdr(skb)->daddr,
9697                                                           0, IPPROTO_TCP, 0));
9698
9699                 } else
9700                         pbd->tcp_pseudo_csum =
9701                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9702                                                         &ipv6_hdr(skb)->daddr,
9703                                                         0, IPPROTO_TCP, 0));
9704
9705                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9706         }
9707
9708         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9709                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9710
9711                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9712                 tx_bd = &fp->tx_desc_ring[bd_prod];
9713
9714                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9715                                        frag->size, PCI_DMA_TODEVICE);
9716
9717                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9718                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9719                 tx_bd->nbytes = cpu_to_le16(frag->size);
9720                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9721                 tx_bd->bd_flags.as_bitfield = 0;
9722
9723                 DP(NETIF_MSG_TX_QUEUED,
9724                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9725                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9726                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9727         }
9728
9729         /* now at last mark the BD as the last BD */
9730         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9731
9732         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9733            tx_bd, tx_bd->bd_flags.as_bitfield);
9734
9735         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9736
9737         /* now send a tx doorbell, counting the next BD
9738          * if the packet contains or ends with it
9739          */
9740         if (TX_BD_POFF(bd_prod) < nbd)
9741                 nbd++;
9742
9743         if (pbd)
9744                 DP(NETIF_MSG_TX_QUEUED,
9745                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9746                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9747                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9748                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9749                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9750
9751         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9752
9753         /*
9754          * Make sure that the BD data is updated before updating the producer
9755          * since FW might read the BD right after the producer is updated.
9756          * This is only applicable for weak-ordered memory model archs such
9757          * as IA-64. The following barrier is also mandatory since FW will
9758          * assumes packets must have BDs.
9759          */
9760         wmb();
9761
9762         fp->hw_tx_prods->bds_prod =
9763                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9764         mb(); /* FW restriction: must not reorder writing nbd and packets */
9765         fp->hw_tx_prods->packets_prod =
9766                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9767         DOORBELL(bp, FP_IDX(fp), 0);
9768
9769         mmiowb();
9770
9771         fp->tx_bd_prod += nbd;
9772         dev->trans_start = jiffies;
9773
9774         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9775                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9776                    if we put Tx into XOFF state. */
9777                 smp_mb();
9778                 netif_stop_queue(dev);
9779                 bp->eth_stats.driver_xoff++;
9780                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9781                         netif_wake_queue(dev);
9782         }
9783         fp->tx_pkt++;
9784
9785         return NETDEV_TX_OK;
9786 }
9787
9788 /* called with rtnl_lock */
9789 static int bnx2x_open(struct net_device *dev)
9790 {
9791         struct bnx2x *bp = netdev_priv(dev);
9792
9793         bnx2x_set_power_state(bp, PCI_D0);
9794
9795         return bnx2x_nic_load(bp, LOAD_OPEN);
9796 }
9797
9798 /* called with rtnl_lock */
9799 static int bnx2x_close(struct net_device *dev)
9800 {
9801         struct bnx2x *bp = netdev_priv(dev);
9802
9803         /* Unload the driver, release IRQs */
9804         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9805         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9806                 if (!CHIP_REV_IS_SLOW(bp))
9807                         bnx2x_set_power_state(bp, PCI_D3hot);
9808
9809         return 0;
9810 }
9811
9812 /* called with netif_tx_lock from set_multicast */
9813 static void bnx2x_set_rx_mode(struct net_device *dev)
9814 {
9815         struct bnx2x *bp = netdev_priv(dev);
9816         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9817         int port = BP_PORT(bp);
9818
9819         if (bp->state != BNX2X_STATE_OPEN) {
9820                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9821                 return;
9822         }
9823
9824         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9825
9826         if (dev->flags & IFF_PROMISC)
9827                 rx_mode = BNX2X_RX_MODE_PROMISC;
9828
9829         else if ((dev->flags & IFF_ALLMULTI) ||
9830                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9831                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9832
9833         else { /* some multicasts */
9834                 if (CHIP_IS_E1(bp)) {
9835                         int i, old, offset;
9836                         struct dev_mc_list *mclist;
9837                         struct mac_configuration_cmd *config =
9838                                                 bnx2x_sp(bp, mcast_config);
9839
9840                         for (i = 0, mclist = dev->mc_list;
9841                              mclist && (i < dev->mc_count);
9842                              i++, mclist = mclist->next) {
9843
9844                                 config->config_table[i].
9845                                         cam_entry.msb_mac_addr =
9846                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9847                                 config->config_table[i].
9848                                         cam_entry.middle_mac_addr =
9849                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9850                                 config->config_table[i].
9851                                         cam_entry.lsb_mac_addr =
9852                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9853                                 config->config_table[i].cam_entry.flags =
9854                                                         cpu_to_le16(port);
9855                                 config->config_table[i].
9856                                         target_table_entry.flags = 0;
9857                                 config->config_table[i].
9858                                         target_table_entry.client_id = 0;
9859                                 config->config_table[i].
9860                                         target_table_entry.vlan_id = 0;
9861
9862                                 DP(NETIF_MSG_IFUP,
9863                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9864                                    config->config_table[i].
9865                                                 cam_entry.msb_mac_addr,
9866                                    config->config_table[i].
9867                                                 cam_entry.middle_mac_addr,
9868                                    config->config_table[i].
9869                                                 cam_entry.lsb_mac_addr);
9870                         }
9871                         old = config->hdr.length_6b;
9872                         if (old > i) {
9873                                 for (; i < old; i++) {
9874                                         if (CAM_IS_INVALID(config->
9875                                                            config_table[i])) {
9876                                                 /* already invalidated */
9877                                                 break;
9878                                         }
9879                                         /* invalidate */
9880                                         CAM_INVALIDATE(config->
9881                                                        config_table[i]);
9882                                 }
9883                         }
9884
9885                         if (CHIP_REV_IS_SLOW(bp))
9886                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9887                         else
9888                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9889
9890                         config->hdr.length_6b = i;
9891                         config->hdr.offset = offset;
9892                         config->hdr.client_id = BP_CL_ID(bp);
9893                         config->hdr.reserved1 = 0;
9894
9895                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9896                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9897                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9898                                       0);
9899                 } else { /* E1H */
9900                         /* Accept one or more multicasts */
9901                         struct dev_mc_list *mclist;
9902                         u32 mc_filter[MC_HASH_SIZE];
9903                         u32 crc, bit, regidx;
9904                         int i;
9905
9906                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9907
9908                         for (i = 0, mclist = dev->mc_list;
9909                              mclist && (i < dev->mc_count);
9910                              i++, mclist = mclist->next) {
9911
9912                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9913                                    mclist->dmi_addr);
9914
9915                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9916                                 bit = (crc >> 24) & 0xff;
9917                                 regidx = bit >> 5;
9918                                 bit &= 0x1f;
9919                                 mc_filter[regidx] |= (1 << bit);
9920                         }
9921
9922                         for (i = 0; i < MC_HASH_SIZE; i++)
9923                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9924                                        mc_filter[i]);
9925                 }
9926         }
9927
9928         bp->rx_mode = rx_mode;
9929         bnx2x_set_storm_rx_mode(bp);
9930 }
9931
9932 /* called with rtnl_lock */
9933 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9934 {
9935         struct sockaddr *addr = p;
9936         struct bnx2x *bp = netdev_priv(dev);
9937
9938         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9939                 return -EINVAL;
9940
9941         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9942         if (netif_running(dev)) {
9943                 if (CHIP_IS_E1(bp))
9944                         bnx2x_set_mac_addr_e1(bp, 1);
9945                 else
9946                         bnx2x_set_mac_addr_e1h(bp, 1);
9947         }
9948
9949         return 0;
9950 }
9951
9952 /* called with rtnl_lock */
9953 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9954 {
9955         struct mii_ioctl_data *data = if_mii(ifr);
9956         struct bnx2x *bp = netdev_priv(dev);
9957         int port = BP_PORT(bp);
9958         int err;
9959
9960         switch (cmd) {
9961         case SIOCGMIIPHY:
9962                 data->phy_id = bp->port.phy_addr;
9963
9964                 /* fallthrough */
9965
9966         case SIOCGMIIREG: {
9967                 u16 mii_regval;
9968
9969                 if (!netif_running(dev))
9970                         return -EAGAIN;
9971
9972                 mutex_lock(&bp->port.phy_mutex);
9973                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9974                                       DEFAULT_PHY_DEV_ADDR,
9975                                       (data->reg_num & 0x1f), &mii_regval);
9976                 data->val_out = mii_regval;
9977                 mutex_unlock(&bp->port.phy_mutex);
9978                 return err;
9979         }
9980
9981         case SIOCSMIIREG:
9982                 if (!capable(CAP_NET_ADMIN))
9983                         return -EPERM;
9984
9985                 if (!netif_running(dev))
9986                         return -EAGAIN;
9987
9988                 mutex_lock(&bp->port.phy_mutex);
9989                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9990                                        DEFAULT_PHY_DEV_ADDR,
9991                                        (data->reg_num & 0x1f), data->val_in);
9992                 mutex_unlock(&bp->port.phy_mutex);
9993                 return err;
9994
9995         default:
9996                 /* do nothing */
9997                 break;
9998         }
9999
10000         return -EOPNOTSUPP;
10001 }
10002
10003 /* called with rtnl_lock */
10004 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10005 {
10006         struct bnx2x *bp = netdev_priv(dev);
10007         int rc = 0;
10008
10009         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10010             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10011                 return -EINVAL;
10012
10013         /* This does not race with packet allocation
10014          * because the actual alloc size is
10015          * only updated as part of load
10016          */
10017         dev->mtu = new_mtu;
10018
10019         if (netif_running(dev)) {
10020                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10021                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10022         }
10023
10024         return rc;
10025 }
10026
10027 static void bnx2x_tx_timeout(struct net_device *dev)
10028 {
10029         struct bnx2x *bp = netdev_priv(dev);
10030
10031 #ifdef BNX2X_STOP_ON_ERROR
10032         if (!bp->panic)
10033                 bnx2x_panic();
10034 #endif
10035         /* This allows the netif to be shutdown gracefully before resetting */
10036         schedule_work(&bp->reset_task);
10037 }
10038
10039 #ifdef BCM_VLAN
10040 /* called with rtnl_lock */
10041 static void bnx2x_vlan_rx_register(struct net_device *dev,
10042                                    struct vlan_group *vlgrp)
10043 {
10044         struct bnx2x *bp = netdev_priv(dev);
10045
10046         bp->vlgrp = vlgrp;
10047
10048         /* Set flags according to the required capabilities */
10049         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10050
10051         if (dev->features & NETIF_F_HW_VLAN_TX)
10052                 bp->flags |= HW_VLAN_TX_FLAG;
10053
10054         if (dev->features & NETIF_F_HW_VLAN_RX)
10055                 bp->flags |= HW_VLAN_RX_FLAG;
10056
10057         if (netif_running(dev))
10058                 bnx2x_set_client_config(bp);
10059 }
10060
10061 #endif
10062
10063 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10064 static void poll_bnx2x(struct net_device *dev)
10065 {
10066         struct bnx2x *bp = netdev_priv(dev);
10067
10068         disable_irq(bp->pdev->irq);
10069         bnx2x_interrupt(bp->pdev->irq, dev);
10070         enable_irq(bp->pdev->irq);
10071 }
10072 #endif
10073
10074 static const struct net_device_ops bnx2x_netdev_ops = {
10075         .ndo_open               = bnx2x_open,
10076         .ndo_stop               = bnx2x_close,
10077         .ndo_start_xmit         = bnx2x_start_xmit,
10078         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10079         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10080         .ndo_validate_addr      = eth_validate_addr,
10081         .ndo_do_ioctl           = bnx2x_ioctl,
10082         .ndo_change_mtu         = bnx2x_change_mtu,
10083         .ndo_tx_timeout         = bnx2x_tx_timeout,
10084 #ifdef BCM_VLAN
10085         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10086 #endif
10087 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10088         .ndo_poll_controller    = poll_bnx2x,
10089 #endif
10090 };
10091
10092
10093 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10094                                     struct net_device *dev)
10095 {
10096         struct bnx2x *bp;
10097         int rc;
10098
10099         SET_NETDEV_DEV(dev, &pdev->dev);
10100         bp = netdev_priv(dev);
10101
10102         bp->dev = dev;
10103         bp->pdev = pdev;
10104         bp->flags = 0;
10105         bp->func = PCI_FUNC(pdev->devfn);
10106
10107         rc = pci_enable_device(pdev);
10108         if (rc) {
10109                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10110                 goto err_out;
10111         }
10112
10113         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10114                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10115                        " aborting\n");
10116                 rc = -ENODEV;
10117                 goto err_out_disable;
10118         }
10119
10120         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10121                 printk(KERN_ERR PFX "Cannot find second PCI device"
10122                        " base address, aborting\n");
10123                 rc = -ENODEV;
10124                 goto err_out_disable;
10125         }
10126
10127         if (atomic_read(&pdev->enable_cnt) == 1) {
10128                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10129                 if (rc) {
10130                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10131                                " aborting\n");
10132                         goto err_out_disable;
10133                 }
10134
10135                 pci_set_master(pdev);
10136                 pci_save_state(pdev);
10137         }
10138
10139         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10140         if (bp->pm_cap == 0) {
10141                 printk(KERN_ERR PFX "Cannot find power management"
10142                        " capability, aborting\n");
10143                 rc = -EIO;
10144                 goto err_out_release;
10145         }
10146
10147         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10148         if (bp->pcie_cap == 0) {
10149                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10150                        " aborting\n");
10151                 rc = -EIO;
10152                 goto err_out_release;
10153         }
10154
10155         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10156                 bp->flags |= USING_DAC_FLAG;
10157                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10158                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10159                                " failed, aborting\n");
10160                         rc = -EIO;
10161                         goto err_out_release;
10162                 }
10163
10164         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10165                 printk(KERN_ERR PFX "System does not support DMA,"
10166                        " aborting\n");
10167                 rc = -EIO;
10168                 goto err_out_release;
10169         }
10170
10171         dev->mem_start = pci_resource_start(pdev, 0);
10172         dev->base_addr = dev->mem_start;
10173         dev->mem_end = pci_resource_end(pdev, 0);
10174
10175         dev->irq = pdev->irq;
10176
10177         bp->regview = pci_ioremap_bar(pdev, 0);
10178         if (!bp->regview) {
10179                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10180                 rc = -ENOMEM;
10181                 goto err_out_release;
10182         }
10183
10184         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10185                                         min_t(u64, BNX2X_DB_SIZE,
10186                                               pci_resource_len(pdev, 2)));
10187         if (!bp->doorbells) {
10188                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10189                 rc = -ENOMEM;
10190                 goto err_out_unmap;
10191         }
10192
10193         bnx2x_set_power_state(bp, PCI_D0);
10194
10195         /* clean indirect addresses */
10196         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10197                                PCICFG_VENDOR_ID_OFFSET);
10198         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10199         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10200         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10201         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10202
10203         dev->watchdog_timeo = TX_TIMEOUT;
10204
10205         dev->netdev_ops = &bnx2x_netdev_ops;
10206         dev->ethtool_ops = &bnx2x_ethtool_ops;
10207         dev->features |= NETIF_F_SG;
10208         dev->features |= NETIF_F_HW_CSUM;
10209         if (bp->flags & USING_DAC_FLAG)
10210                 dev->features |= NETIF_F_HIGHDMA;
10211 #ifdef BCM_VLAN
10212         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10213         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10214 #endif
10215         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10216         dev->features |= NETIF_F_TSO6;
10217
10218         return 0;
10219
10220 err_out_unmap:
10221         if (bp->regview) {
10222                 iounmap(bp->regview);
10223                 bp->regview = NULL;
10224         }
10225         if (bp->doorbells) {
10226                 iounmap(bp->doorbells);
10227                 bp->doorbells = NULL;
10228         }
10229
10230 err_out_release:
10231         if (atomic_read(&pdev->enable_cnt) == 1)
10232                 pci_release_regions(pdev);
10233
10234 err_out_disable:
10235         pci_disable_device(pdev);
10236         pci_set_drvdata(pdev, NULL);
10237
10238 err_out:
10239         return rc;
10240 }
10241
10242 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10243 {
10244         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10245
10246         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10247         return val;
10248 }
10249
10250 /* return value of 1=2.5GHz 2=5GHz */
10251 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10252 {
10253         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10254
10255         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10256         return val;
10257 }
10258
10259 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10260                                     const struct pci_device_id *ent)
10261 {
10262         static int version_printed;
10263         struct net_device *dev = NULL;
10264         struct bnx2x *bp;
10265         int rc;
10266
10267         if (version_printed++ == 0)
10268                 printk(KERN_INFO "%s", version);
10269
10270         /* dev zeroed in init_etherdev */
10271         dev = alloc_etherdev(sizeof(*bp));
10272         if (!dev) {
10273                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10274                 return -ENOMEM;
10275         }
10276
10277         bp = netdev_priv(dev);
10278         bp->msglevel = debug;
10279
10280         rc = bnx2x_init_dev(pdev, dev);
10281         if (rc < 0) {
10282                 free_netdev(dev);
10283                 return rc;
10284         }
10285
10286         pci_set_drvdata(pdev, dev);
10287
10288         rc = bnx2x_init_bp(bp);
10289         if (rc)
10290                 goto init_one_exit;
10291
10292         rc = register_netdev(dev);
10293         if (rc) {
10294                 dev_err(&pdev->dev, "Cannot register net device\n");
10295                 goto init_one_exit;
10296         }
10297
10298         netif_carrier_off(dev);
10299
10300         bp->common.name = board_info[ent->driver_data].name;
10301         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10302                " IRQ %d, ", dev->name, bp->common.name,
10303                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10304                bnx2x_get_pcie_width(bp),
10305                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10306                dev->base_addr, bp->pdev->irq);
10307         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10308         return 0;
10309
10310 init_one_exit:
10311         if (bp->regview)
10312                 iounmap(bp->regview);
10313
10314         if (bp->doorbells)
10315                 iounmap(bp->doorbells);
10316
10317         free_netdev(dev);
10318
10319         if (atomic_read(&pdev->enable_cnt) == 1)
10320                 pci_release_regions(pdev);
10321
10322         pci_disable_device(pdev);
10323         pci_set_drvdata(pdev, NULL);
10324
10325         return rc;
10326 }
10327
10328 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10329 {
10330         struct net_device *dev = pci_get_drvdata(pdev);
10331         struct bnx2x *bp;
10332
10333         if (!dev) {
10334                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10335                 return;
10336         }
10337         bp = netdev_priv(dev);
10338
10339         unregister_netdev(dev);
10340
10341         if (bp->regview)
10342                 iounmap(bp->regview);
10343
10344         if (bp->doorbells)
10345                 iounmap(bp->doorbells);
10346
10347         free_netdev(dev);
10348
10349         if (atomic_read(&pdev->enable_cnt) == 1)
10350                 pci_release_regions(pdev);
10351
10352         pci_disable_device(pdev);
10353         pci_set_drvdata(pdev, NULL);
10354 }
10355
10356 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10357 {
10358         struct net_device *dev = pci_get_drvdata(pdev);
10359         struct bnx2x *bp;
10360
10361         if (!dev) {
10362                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10363                 return -ENODEV;
10364         }
10365         bp = netdev_priv(dev);
10366
10367         rtnl_lock();
10368
10369         pci_save_state(pdev);
10370
10371         if (!netif_running(dev)) {
10372                 rtnl_unlock();
10373                 return 0;
10374         }
10375
10376         netif_device_detach(dev);
10377
10378         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10379
10380         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10381
10382         rtnl_unlock();
10383
10384         return 0;
10385 }
10386
10387 static int bnx2x_resume(struct pci_dev *pdev)
10388 {
10389         struct net_device *dev = pci_get_drvdata(pdev);
10390         struct bnx2x *bp;
10391         int rc;
10392
10393         if (!dev) {
10394                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10395                 return -ENODEV;
10396         }
10397         bp = netdev_priv(dev);
10398
10399         rtnl_lock();
10400
10401         pci_restore_state(pdev);
10402
10403         if (!netif_running(dev)) {
10404                 rtnl_unlock();
10405                 return 0;
10406         }
10407
10408         bnx2x_set_power_state(bp, PCI_D0);
10409         netif_device_attach(dev);
10410
10411         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10412
10413         rtnl_unlock();
10414
10415         return rc;
10416 }
10417
10418 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10419 {
10420         int i;
10421
10422         bp->state = BNX2X_STATE_ERROR;
10423
10424         bp->rx_mode = BNX2X_RX_MODE_NONE;
10425
10426         bnx2x_netif_stop(bp, 0);
10427
10428         del_timer_sync(&bp->timer);
10429         bp->stats_state = STATS_STATE_DISABLED;
10430         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10431
10432         /* Release IRQs */
10433         bnx2x_free_irq(bp);
10434
10435         if (CHIP_IS_E1(bp)) {
10436                 struct mac_configuration_cmd *config =
10437                                                 bnx2x_sp(bp, mcast_config);
10438
10439                 for (i = 0; i < config->hdr.length_6b; i++)
10440                         CAM_INVALIDATE(config->config_table[i]);
10441         }
10442
10443         /* Free SKBs, SGEs, TPA pool and driver internals */
10444         bnx2x_free_skbs(bp);
10445         for_each_queue(bp, i)
10446                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10447         bnx2x_free_mem(bp);
10448
10449         bp->state = BNX2X_STATE_CLOSED;
10450
10451         netif_carrier_off(bp->dev);
10452
10453         return 0;
10454 }
10455
10456 static void bnx2x_eeh_recover(struct bnx2x *bp)
10457 {
10458         u32 val;
10459
10460         mutex_init(&bp->port.phy_mutex);
10461
10462         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10463         bp->link_params.shmem_base = bp->common.shmem_base;
10464         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10465
10466         if (!bp->common.shmem_base ||
10467             (bp->common.shmem_base < 0xA0000) ||
10468             (bp->common.shmem_base >= 0xC0000)) {
10469                 BNX2X_DEV_INFO("MCP not active\n");
10470                 bp->flags |= NO_MCP_FLAG;
10471                 return;
10472         }
10473
10474         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10475         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10476                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10477                 BNX2X_ERR("BAD MCP validity signature\n");
10478
10479         if (!BP_NOMCP(bp)) {
10480                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10481                               & DRV_MSG_SEQ_NUMBER_MASK);
10482                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10483         }
10484 }
10485
10486 /**
10487  * bnx2x_io_error_detected - called when PCI error is detected
10488  * @pdev: Pointer to PCI device
10489  * @state: The current pci connection state
10490  *
10491  * This function is called after a PCI bus error affecting
10492  * this device has been detected.
10493  */
10494 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10495                                                 pci_channel_state_t state)
10496 {
10497         struct net_device *dev = pci_get_drvdata(pdev);
10498         struct bnx2x *bp = netdev_priv(dev);
10499
10500         rtnl_lock();
10501
10502         netif_device_detach(dev);
10503
10504         if (netif_running(dev))
10505                 bnx2x_eeh_nic_unload(bp);
10506
10507         pci_disable_device(pdev);
10508
10509         rtnl_unlock();
10510
10511         /* Request a slot reset */
10512         return PCI_ERS_RESULT_NEED_RESET;
10513 }
10514
10515 /**
10516  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10517  * @pdev: Pointer to PCI device
10518  *
10519  * Restart the card from scratch, as if from a cold-boot.
10520  */
10521 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10522 {
10523         struct net_device *dev = pci_get_drvdata(pdev);
10524         struct bnx2x *bp = netdev_priv(dev);
10525
10526         rtnl_lock();
10527
10528         if (pci_enable_device(pdev)) {
10529                 dev_err(&pdev->dev,
10530                         "Cannot re-enable PCI device after reset\n");
10531                 rtnl_unlock();
10532                 return PCI_ERS_RESULT_DISCONNECT;
10533         }
10534
10535         pci_set_master(pdev);
10536         pci_restore_state(pdev);
10537
10538         if (netif_running(dev))
10539                 bnx2x_set_power_state(bp, PCI_D0);
10540
10541         rtnl_unlock();
10542
10543         return PCI_ERS_RESULT_RECOVERED;
10544 }
10545
10546 /**
10547  * bnx2x_io_resume - called when traffic can start flowing again
10548  * @pdev: Pointer to PCI device
10549  *
10550  * This callback is called when the error recovery driver tells us that
10551  * its OK to resume normal operation.
10552  */
10553 static void bnx2x_io_resume(struct pci_dev *pdev)
10554 {
10555         struct net_device *dev = pci_get_drvdata(pdev);
10556         struct bnx2x *bp = netdev_priv(dev);
10557
10558         rtnl_lock();
10559
10560         bnx2x_eeh_recover(bp);
10561
10562         if (netif_running(dev))
10563                 bnx2x_nic_load(bp, LOAD_NORMAL);
10564
10565         netif_device_attach(dev);
10566
10567         rtnl_unlock();
10568 }
10569
10570 static struct pci_error_handlers bnx2x_err_handler = {
10571         .error_detected = bnx2x_io_error_detected,
10572         .slot_reset = bnx2x_io_slot_reset,
10573         .resume = bnx2x_io_resume,
10574 };
10575
10576 static struct pci_driver bnx2x_pci_driver = {
10577         .name        = DRV_MODULE_NAME,
10578         .id_table    = bnx2x_pci_tbl,
10579         .probe       = bnx2x_init_one,
10580         .remove      = __devexit_p(bnx2x_remove_one),
10581         .suspend     = bnx2x_suspend,
10582         .resume      = bnx2x_resume,
10583         .err_handler = &bnx2x_err_handler,
10584 };
10585
10586 static int __init bnx2x_init(void)
10587 {
10588         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10589         if (bnx2x_wq == NULL) {
10590                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10591                 return -ENOMEM;
10592         }
10593
10594         return pci_register_driver(&bnx2x_pci_driver);
10595 }
10596
10597 static void __exit bnx2x_cleanup(void)
10598 {
10599         pci_unregister_driver(&bnx2x_pci_driver);
10600
10601         destroy_workqueue(bnx2x_wq);
10602 }
10603
10604 module_init(bnx2x_init);
10605 module_exit(bnx2x_cleanup);
10606