]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2x_main.c
bnx2x: Using singlethread work queue
[karo-tx-linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.23"
63 #define DRV_MODULE_RELDATE      "2008/11/03"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98 static struct workqueue_struct *bnx2x_wq;
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = RX_SGE(fp->rx_sge_prod);
559                 end = RX_SGE(fp->last_max_sge);
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         if (disable_hw)
661                 /* prevent the HW from sending interrupts */
662                 bnx2x_int_disable(bp);
663
664         /* make sure all ISRs are done */
665         if (msix) {
666                 for_each_queue(bp, i)
667                         synchronize_irq(bp->msix_table[i].vector);
668
669                 /* one more for the Slow Path IRQ */
670                 synchronize_irq(bp->msix_table[i].vector);
671         } else
672                 synchronize_irq(bp->pdev->irq);
673
674         /* make sure sp_task is not running */
675         cancel_delayed_work(&bp->sp_task);
676         flush_workqueue(bnx2x_wq);
677 }
678
679 /* fast path */
680
681 /*
682  * General service functions
683  */
684
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686                                 u8 storm, u16 index, u8 op, u8 update)
687 {
688         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689                        COMMAND_REG_INT_ACK);
690         struct igu_ack_register igu_ack;
691
692         igu_ack.status_block_index = index;
693         igu_ack.sb_id_and_flags =
694                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
695                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
699         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700            (*(u32 *)&igu_ack), hc_addr);
701         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 }
703
704 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 {
706         struct host_status_block *fpsb = fp->status_blk;
707         u16 rc = 0;
708
709         barrier(); /* status block is written to by the chip */
710         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712                 rc |= 1;
713         }
714         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716                 rc |= 2;
717         }
718         return rc;
719 }
720
721 static u16 bnx2x_ack_int(struct bnx2x *bp)
722 {
723         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724                        COMMAND_REG_SIMD_MASK);
725         u32 result = REG_RD(bp, hc_addr);
726
727         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728            result, hc_addr);
729
730         return result;
731 }
732
733
734 /*
735  * fast path service functions
736  */
737
738 /* free skb in the packet ring at pos idx
739  * return idx of last bd freed
740  */
741 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742                              u16 idx)
743 {
744         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745         struct eth_tx_bd *tx_bd;
746         struct sk_buff *skb = tx_buf->skb;
747         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748         int nbd;
749
750         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
751            idx, tx_buf, skb);
752
753         /* unmap first bd */
754         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755         tx_bd = &fp->tx_desc_ring[bd_idx];
756         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759         nbd = le16_to_cpu(tx_bd->nbd) - 1;
760         new_cons = nbd + tx_buf->first_bd;
761 #ifdef BNX2X_STOP_ON_ERROR
762         if (nbd > (MAX_SKB_FRAGS + 2)) {
763                 BNX2X_ERR("BAD nbd!\n");
764                 bnx2x_panic();
765         }
766 #endif
767
768         /* Skip a parse bd and the TSO split header bd
769            since they have no mapping */
770         if (nbd)
771                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774                                            ETH_TX_BD_FLAGS_TCP_CSUM |
775                                            ETH_TX_BD_FLAGS_SW_LSO)) {
776                 if (--nbd)
777                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778                 tx_bd = &fp->tx_desc_ring[bd_idx];
779                 /* is this a TSO split header bd? */
780                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781                         if (--nbd)
782                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783                 }
784         }
785
786         /* now free frags */
787         while (nbd > 0) {
788
789                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790                 tx_bd = &fp->tx_desc_ring[bd_idx];
791                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793                 if (--nbd)
794                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795         }
796
797         /* release skb */
798         WARN_ON(!skb);
799         dev_kfree_skb(skb);
800         tx_buf->first_bd = 0;
801         tx_buf->skb = NULL;
802
803         return new_cons;
804 }
805
806 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
807 {
808         s16 used;
809         u16 prod;
810         u16 cons;
811
812         barrier(); /* Tell compiler that prod and cons can change */
813         prod = fp->tx_bd_prod;
814         cons = fp->tx_bd_cons;
815
816         /* NUM_TX_RINGS = number of "next-page" entries
817            It will be used as a threshold */
818         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
819
820 #ifdef BNX2X_STOP_ON_ERROR
821         WARN_ON(used < 0);
822         WARN_ON(used > fp->bp->tx_ring_size);
823         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 #endif
825
826         return (s16)(fp->bp->tx_ring_size) - used;
827 }
828
829 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830 {
831         struct bnx2x *bp = fp->bp;
832         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833         int done = 0;
834
835 #ifdef BNX2X_STOP_ON_ERROR
836         if (unlikely(bp->panic))
837                 return;
838 #endif
839
840         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841         sw_cons = fp->tx_pkt_cons;
842
843         while (sw_cons != hw_cons) {
844                 u16 pkt_cons;
845
846                 pkt_cons = TX_BD(sw_cons);
847
848                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
850                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
851                    hw_cons, sw_cons, pkt_cons);
852
853 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
854                         rmb();
855                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856                 }
857 */
858                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859                 sw_cons++;
860                 done++;
861
862                 if (done == work)
863                         break;
864         }
865
866         fp->tx_pkt_cons = sw_cons;
867         fp->tx_bd_cons = bd_cons;
868
869         /* Need to make the tx_cons update visible to start_xmit()
870          * before checking for netif_queue_stopped().  Without the
871          * memory barrier, there is a small possibility that start_xmit()
872          * will miss it and cause the queue to be stopped forever.
873          */
874         smp_mb();
875
876         /* TBD need a thresh? */
877         if (unlikely(netif_queue_stopped(bp->dev))) {
878
879                 netif_tx_lock(bp->dev);
880
881                 if (netif_queue_stopped(bp->dev) &&
882                     (bp->state == BNX2X_STATE_OPEN) &&
883                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884                         netif_wake_queue(bp->dev);
885
886                 netif_tx_unlock(bp->dev);
887         }
888 }
889
890
891 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892                            union eth_rx_cqe *rr_cqe)
893 {
894         struct bnx2x *bp = fp->bp;
895         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
898         DP(BNX2X_MSG_SP,
899            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
900            FP_IDX(fp), cid, command, bp->state,
901            rr_cqe->ramrod_cqe.ramrod_type);
902
903         bp->spq_left++;
904
905         if (FP_IDX(fp)) {
906                 switch (command | fp->state) {
907                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908                                                 BNX2X_FP_STATE_OPENING):
909                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910                            cid);
911                         fp->state = BNX2X_FP_STATE_OPEN;
912                         break;
913
914                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916                            cid);
917                         fp->state = BNX2X_FP_STATE_HALTED;
918                         break;
919
920                 default:
921                         BNX2X_ERR("unexpected MC reply (%d)  "
922                                   "fp->state is %x\n", command, fp->state);
923                         break;
924                 }
925                 mb(); /* force bnx2x_wait_ramrod() to see the change */
926                 return;
927         }
928
929         switch (command | bp->state) {
930         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932                 bp->state = BNX2X_STATE_OPEN;
933                 break;
934
935         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938                 fp->state = BNX2X_FP_STATE_HALTED;
939                 break;
940
941         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
942                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
943                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
944                 break;
945
946
947         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
948         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
949                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
950                 bp->set_mac_pending = 0;
951                 break;
952
953         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
954                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
955                 break;
956
957         default:
958                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
959                           command, bp->state);
960                 break;
961         }
962         mb(); /* force bnx2x_wait_ramrod() to see the change */
963 }
964
965 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966                                      struct bnx2x_fastpath *fp, u16 index)
967 {
968         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969         struct page *page = sw_buf->page;
970         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972         /* Skip "next page" elements */
973         if (!page)
974                 return;
975
976         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
977                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
978         __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980         sw_buf->page = NULL;
981         sge->addr_hi = 0;
982         sge->addr_lo = 0;
983 }
984
985 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986                                            struct bnx2x_fastpath *fp, int last)
987 {
988         int i;
989
990         for (i = 0; i < last; i++)
991                 bnx2x_free_rx_sge(bp, fp, i);
992 }
993
994 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995                                      struct bnx2x_fastpath *fp, u16 index)
996 {
997         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000         dma_addr_t mapping;
1001
1002         if (unlikely(page == NULL))
1003                 return -ENOMEM;
1004
1005         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1006                                PCI_DMA_FROMDEVICE);
1007         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1008                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009                 return -ENOMEM;
1010         }
1011
1012         sw_buf->page = page;
1013         pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018         return 0;
1019 }
1020
1021 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022                                      struct bnx2x_fastpath *fp, u16 index)
1023 {
1024         struct sk_buff *skb;
1025         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027         dma_addr_t mapping;
1028
1029         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030         if (unlikely(skb == NULL))
1031                 return -ENOMEM;
1032
1033         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1034                                  PCI_DMA_FROMDEVICE);
1035         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1036                 dev_kfree_skb(skb);
1037                 return -ENOMEM;
1038         }
1039
1040         rx_buf->skb = skb;
1041         pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046         return 0;
1047 }
1048
1049 /* note that we are not allocating a new skb,
1050  * we are just moving one from cons to prod
1051  * we are not creating a new mapping,
1052  * so there is no need to check for dma_mapping_error().
1053  */
1054 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055                                struct sk_buff *skb, u16 cons, u16 prod)
1056 {
1057         struct bnx2x *bp = fp->bp;
1058         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063         pci_dma_sync_single_for_device(bp->pdev,
1064                                        pci_unmap_addr(cons_rx_buf, mapping),
1065                                        bp->rx_offset + RX_COPY_THRESH,
1066                                        PCI_DMA_FROMDEVICE);
1067
1068         prod_rx_buf->skb = cons_rx_buf->skb;
1069         pci_unmap_addr_set(prod_rx_buf, mapping,
1070                            pci_unmap_addr(cons_rx_buf, mapping));
1071         *prod_bd = *cons_bd;
1072 }
1073
1074 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075                                              u16 idx)
1076 {
1077         u16 last_max = fp->last_max_sge;
1078
1079         if (SUB_S16(idx, last_max) > 0)
1080                 fp->last_max_sge = idx;
1081 }
1082
1083 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084 {
1085         int i, j;
1086
1087         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088                 int idx = RX_SGE_CNT * i - 1;
1089
1090                 for (j = 0; j < 2; j++) {
1091                         SGE_MASK_CLEAR_BIT(fp, idx);
1092                         idx--;
1093                 }
1094         }
1095 }
1096
1097 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098                                   struct eth_fast_path_rx_cqe *fp_cqe)
1099 {
1100         struct bnx2x *bp = fp->bp;
1101         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1102                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1103                       BCM_PAGE_SHIFT;
1104         u16 last_max, last_elem, first_elem;
1105         u16 delta = 0;
1106         u16 i;
1107
1108         if (!sge_len)
1109                 return;
1110
1111         /* First mark all used pages */
1112         for (i = 0; i < sge_len; i++)
1113                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         /* Here we assume that the last SGE index is the biggest */
1119         prefetch((void *)(fp->sge_mask));
1120         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122         last_max = RX_SGE(fp->last_max_sge);
1123         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126         /* If ring is not full */
1127         if (last_elem + 1 != first_elem)
1128                 last_elem++;
1129
1130         /* Now update the prod */
1131         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132                 if (likely(fp->sge_mask[i]))
1133                         break;
1134
1135                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136                 delta += RX_SGE_MASK_ELEM_SZ;
1137         }
1138
1139         if (delta > 0) {
1140                 fp->rx_sge_prod += delta;
1141                 /* clear page-end entries */
1142                 bnx2x_clear_sge_mask_next_elems(fp);
1143         }
1144
1145         DP(NETIF_MSG_RX_STATUS,
1146            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1147            fp->last_max_sge, fp->rx_sge_prod);
1148 }
1149
1150 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151 {
1152         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153         memset(fp->sge_mask, 0xff,
1154                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
1156         /* Clear the two last indices in the page to 1:
1157            these are the indices that correspond to the "next" element,
1158            hence will never be indicated and should be removed from
1159            the calculations. */
1160         bnx2x_clear_sge_mask_next_elems(fp);
1161 }
1162
1163 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164                             struct sk_buff *skb, u16 cons, u16 prod)
1165 {
1166         struct bnx2x *bp = fp->bp;
1167         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170         dma_addr_t mapping;
1171
1172         /* move empty skb from pool to prod and map it */
1173         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1175                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1176         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178         /* move partial skb from cons to pool (don't unmap yet) */
1179         fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181         /* mark bin state as start - print error if current state != stop */
1182         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185         fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187         /* point prod_bd to new skb */
1188         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191 #ifdef BNX2X_STOP_ON_ERROR
1192         fp->tpa_queue_used |= (1 << queue);
1193 #ifdef __powerpc64__
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195 #else
1196         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197 #endif
1198            fp->tpa_queue_used);
1199 #endif
1200 }
1201
1202 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203                                struct sk_buff *skb,
1204                                struct eth_fast_path_rx_cqe *fp_cqe,
1205                                u16 cqe_idx)
1206 {
1207         struct sw_rx_page *rx_pg, old_rx_pg;
1208         struct page *sge;
1209         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1210         u32 i, frag_len, frag_size, pages;
1211         int err;
1212         int j;
1213
1214         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1215         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1216
1217         /* This is needed in order to enable forwarding support */
1218         if (frag_size)
1219                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1220                                                max(frag_size, (u32)len_on_bd));
1221
1222 #ifdef BNX2X_STOP_ON_ERROR
1223         if (pages > 8*PAGES_PER_SGE) {
1224                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225                           pages, cqe_idx);
1226                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1227                           fp_cqe->pkt_len, len_on_bd);
1228                 bnx2x_panic();
1229                 return -EINVAL;
1230         }
1231 #endif
1232
1233         /* Run through the SGL and compose the fragmented skb */
1234         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237                 /* FW gives the indices of the SGE as if the ring is an array
1238                    (meaning that "next" element will consume 2 indices) */
1239                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1240                 rx_pg = &fp->rx_page_ring[sge_idx];
1241                 sge = rx_pg->page;
1242                 old_rx_pg = *rx_pg;
1243
1244                 /* If we fail to allocate a substitute page, we simply stop
1245                    where we are and drop the whole packet */
1246                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1247                 if (unlikely(err)) {
1248                         bp->eth_stats.rx_skb_alloc_failed++;
1249                         return err;
1250                 }
1251
1252                 /* Unmap the page as we r going to pass it to the stack */
1253                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1254                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1255
1256                 /* Add one frag and update the appropriate fields in the skb */
1257                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1258
1259                 skb->data_len += frag_len;
1260                 skb->truesize += frag_len;
1261                 skb->len += frag_len;
1262
1263                 frag_size -= frag_len;
1264         }
1265
1266         return 0;
1267 }
1268
1269 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1270                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1271                            u16 cqe_idx)
1272 {
1273         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1274         struct sk_buff *skb = rx_buf->skb;
1275         /* alloc new skb */
1276         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1277
1278         /* Unmap skb in the pool anyway, as we are going to change
1279            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1280            fails. */
1281         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1282                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1283
1284         if (likely(new_skb)) {
1285                 /* fix ip xsum and give it to the stack */
1286                 /* (no need to map the new skb) */
1287
1288                 prefetch(skb);
1289                 prefetch(((char *)(skb)) + 128);
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292                 if (pad + len > bp->rx_buf_size) {
1293                         BNX2X_ERR("skb_put is about to fail...  "
1294                                   "pad %d  len %d  rx_buf_size %d\n",
1295                                   pad, len, bp->rx_buf_size);
1296                         bnx2x_panic();
1297                         return;
1298                 }
1299 #endif
1300
1301                 skb_reserve(skb, pad);
1302                 skb_put(skb, len);
1303
1304                 skb->protocol = eth_type_trans(skb, bp->dev);
1305                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306
1307                 {
1308                         struct iphdr *iph;
1309
1310                         iph = (struct iphdr *)skb->data;
1311                         iph->check = 0;
1312                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1313                 }
1314
1315                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1316                                          &cqe->fast_path_cqe, cqe_idx)) {
1317 #ifdef BCM_VLAN
1318                         if ((bp->vlgrp != NULL) &&
1319                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320                              PARSING_FLAGS_VLAN))
1321                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1322                                                 le16_to_cpu(cqe->fast_path_cqe.
1323                                                             vlan_tag));
1324                         else
1325 #endif
1326                                 netif_receive_skb(skb);
1327                 } else {
1328                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1329                            " - dropping packet!\n");
1330                         dev_kfree_skb(skb);
1331                 }
1332
1333
1334                 /* put new skb in bin */
1335                 fp->tpa_pool[queue].skb = new_skb;
1336
1337         } else {
1338                 /* else drop the packet and keep the buffer in the bin */
1339                 DP(NETIF_MSG_RX_STATUS,
1340                    "Failed to allocate new skb - dropping packet!\n");
1341                 bp->eth_stats.rx_skb_alloc_failed++;
1342         }
1343
1344         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1345 }
1346
1347 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1348                                         struct bnx2x_fastpath *fp,
1349                                         u16 bd_prod, u16 rx_comp_prod,
1350                                         u16 rx_sge_prod)
1351 {
1352         struct tstorm_eth_rx_producers rx_prods = {0};
1353         int i;
1354
1355         /* Update producers */
1356         rx_prods.bd_prod = bd_prod;
1357         rx_prods.cqe_prod = rx_comp_prod;
1358         rx_prods.sge_prod = rx_sge_prod;
1359
1360         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1361                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1362                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1363                        ((u32 *)&rx_prods)[i]);
1364
1365         DP(NETIF_MSG_RX_STATUS,
1366            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1367            bd_prod, rx_comp_prod, rx_sge_prod);
1368 }
1369
1370 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1371 {
1372         struct bnx2x *bp = fp->bp;
1373         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1374         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1375         int rx_pkt = 0;
1376
1377 #ifdef BNX2X_STOP_ON_ERROR
1378         if (unlikely(bp->panic))
1379                 return 0;
1380 #endif
1381
1382         /* CQ "next element" is of the size of the regular element,
1383            that's why it's ok here */
1384         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1385         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1386                 hw_comp_cons++;
1387
1388         bd_cons = fp->rx_bd_cons;
1389         bd_prod = fp->rx_bd_prod;
1390         bd_prod_fw = bd_prod;
1391         sw_comp_cons = fp->rx_comp_cons;
1392         sw_comp_prod = fp->rx_comp_prod;
1393
1394         /* Memory barrier necessary as speculative reads of the rx
1395          * buffer can be ahead of the index in the status block
1396          */
1397         rmb();
1398
1399         DP(NETIF_MSG_RX_STATUS,
1400            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1401            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1402
1403         while (sw_comp_cons != hw_comp_cons) {
1404                 struct sw_rx_bd *rx_buf = NULL;
1405                 struct sk_buff *skb;
1406                 union eth_rx_cqe *cqe;
1407                 u8 cqe_fp_flags;
1408                 u16 len, pad;
1409
1410                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1411                 bd_prod = RX_BD(bd_prod);
1412                 bd_cons = RX_BD(bd_cons);
1413
1414                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1415                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1416
1417                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1418                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1419                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1420                    cqe->fast_path_cqe.rss_hash_result,
1421                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1422                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1423
1424                 /* is this a slowpath msg? */
1425                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1426                         bnx2x_sp_event(fp, cqe);
1427                         goto next_cqe;
1428
1429                 /* this is an rx packet */
1430                 } else {
1431                         rx_buf = &fp->rx_buf_ring[bd_cons];
1432                         skb = rx_buf->skb;
1433                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1434                         pad = cqe->fast_path_cqe.placement_offset;
1435
1436                         /* If CQE is marked both TPA_START and TPA_END
1437                            it is a non-TPA CQE */
1438                         if ((!fp->disable_tpa) &&
1439                             (TPA_TYPE(cqe_fp_flags) !=
1440                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1441                                 u16 queue = cqe->fast_path_cqe.queue_index;
1442
1443                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1444                                         DP(NETIF_MSG_RX_STATUS,
1445                                            "calling tpa_start on queue %d\n",
1446                                            queue);
1447
1448                                         bnx2x_tpa_start(fp, queue, skb,
1449                                                         bd_cons, bd_prod);
1450                                         goto next_rx;
1451                                 }
1452
1453                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1454                                         DP(NETIF_MSG_RX_STATUS,
1455                                            "calling tpa_stop on queue %d\n",
1456                                            queue);
1457
1458                                         if (!BNX2X_RX_SUM_FIX(cqe))
1459                                                 BNX2X_ERR("STOP on none TCP "
1460                                                           "data\n");
1461
1462                                         /* This is a size of the linear data
1463                                            on this skb */
1464                                         len = le16_to_cpu(cqe->fast_path_cqe.
1465                                                                 len_on_bd);
1466                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1467                                                     len, cqe, comp_ring_cons);
1468 #ifdef BNX2X_STOP_ON_ERROR
1469                                         if (bp->panic)
1470                                                 return -EINVAL;
1471 #endif
1472
1473                                         bnx2x_update_sge_prod(fp,
1474                                                         &cqe->fast_path_cqe);
1475                                         goto next_cqe;
1476                                 }
1477                         }
1478
1479                         pci_dma_sync_single_for_device(bp->pdev,
1480                                         pci_unmap_addr(rx_buf, mapping),
1481                                                        pad + RX_COPY_THRESH,
1482                                                        PCI_DMA_FROMDEVICE);
1483                         prefetch(skb);
1484                         prefetch(((char *)(skb)) + 128);
1485
1486                         /* is this an error packet? */
1487                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1488                                 DP(NETIF_MSG_RX_ERR,
1489                                    "ERROR  flags %x  rx packet %u\n",
1490                                    cqe_fp_flags, sw_comp_cons);
1491                                 bp->eth_stats.rx_err_discard_pkt++;
1492                                 goto reuse_rx;
1493                         }
1494
1495                         /* Since we don't have a jumbo ring
1496                          * copy small packets if mtu > 1500
1497                          */
1498                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1499                             (len <= RX_COPY_THRESH)) {
1500                                 struct sk_buff *new_skb;
1501
1502                                 new_skb = netdev_alloc_skb(bp->dev,
1503                                                            len + pad);
1504                                 if (new_skb == NULL) {
1505                                         DP(NETIF_MSG_RX_ERR,
1506                                            "ERROR  packet dropped "
1507                                            "because of alloc failure\n");
1508                                         bp->eth_stats.rx_skb_alloc_failed++;
1509                                         goto reuse_rx;
1510                                 }
1511
1512                                 /* aligned copy */
1513                                 skb_copy_from_linear_data_offset(skb, pad,
1514                                                     new_skb->data + pad, len);
1515                                 skb_reserve(new_skb, pad);
1516                                 skb_put(new_skb, len);
1517
1518                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1519
1520                                 skb = new_skb;
1521
1522                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1523                                 pci_unmap_single(bp->pdev,
1524                                         pci_unmap_addr(rx_buf, mapping),
1525                                                  bp->rx_buf_size,
1526                                                  PCI_DMA_FROMDEVICE);
1527                                 skb_reserve(skb, pad);
1528                                 skb_put(skb, len);
1529
1530                         } else {
1531                                 DP(NETIF_MSG_RX_ERR,
1532                                    "ERROR  packet dropped because "
1533                                    "of alloc failure\n");
1534                                 bp->eth_stats.rx_skb_alloc_failed++;
1535 reuse_rx:
1536                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1537                                 goto next_rx;
1538                         }
1539
1540                         skb->protocol = eth_type_trans(skb, bp->dev);
1541
1542                         skb->ip_summed = CHECKSUM_NONE;
1543                         if (bp->rx_csum) {
1544                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1545                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1546                                 else
1547                                         bp->eth_stats.hw_csum_err++;
1548                         }
1549                 }
1550
1551 #ifdef BCM_VLAN
1552                 if ((bp->vlgrp != NULL) &&
1553                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1554                      PARSING_FLAGS_VLAN))
1555                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1556                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1557                 else
1558 #endif
1559                         netif_receive_skb(skb);
1560
1561
1562 next_rx:
1563                 rx_buf->skb = NULL;
1564
1565                 bd_cons = NEXT_RX_IDX(bd_cons);
1566                 bd_prod = NEXT_RX_IDX(bd_prod);
1567                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1568                 rx_pkt++;
1569 next_cqe:
1570                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572
1573                 if (rx_pkt == budget)
1574                         break;
1575         } /* while */
1576
1577         fp->rx_bd_cons = bd_cons;
1578         fp->rx_bd_prod = bd_prod_fw;
1579         fp->rx_comp_cons = sw_comp_cons;
1580         fp->rx_comp_prod = sw_comp_prod;
1581
1582         /* Update producers */
1583         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584                              fp->rx_sge_prod);
1585         mmiowb(); /* keep prod updates ordered */
1586
1587         fp->rx_pkt += rx_pkt;
1588         fp->rx_calls++;
1589
1590         return rx_pkt;
1591 }
1592
1593 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 {
1595         struct bnx2x_fastpath *fp = fp_cookie;
1596         struct bnx2x *bp = fp->bp;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638         /* Return here if interrupt is disabled */
1639         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641                 return IRQ_HANDLED;
1642         }
1643
1644 #ifdef BNX2X_STOP_ON_ERROR
1645         if (unlikely(bp->panic))
1646                 return IRQ_HANDLED;
1647 #endif
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 5 second every 5ms */
1721         for (cnt = 0; cnt < 1000; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 /* It is recommended to turn off RX FC for jumbo frames
1950                    for better performance */
1951                 if (IS_E1HMF(bp))
1952                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1953                 else if (bp->dev->mtu > 5000)
1954                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1955                 else
1956                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1957
1958                 bnx2x_acquire_phy_lock(bp);
1959                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960                 bnx2x_release_phy_lock(bp);
1961
1962                 if (bp->link_vars.link_up)
1963                         bnx2x_link_report(bp);
1964
1965                 bnx2x_calc_fc_adv(bp);
1966
1967                 return rc;
1968         }
1969         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970         return -EINVAL;
1971 }
1972
1973 static void bnx2x_link_set(struct bnx2x *bp)
1974 {
1975         if (!BP_NOMCP(bp)) {
1976                 bnx2x_acquire_phy_lock(bp);
1977                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978                 bnx2x_release_phy_lock(bp);
1979
1980                 bnx2x_calc_fc_adv(bp);
1981         } else
1982                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1983 }
1984
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1986 {
1987         if (!BP_NOMCP(bp)) {
1988                 bnx2x_acquire_phy_lock(bp);
1989                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990                 bnx2x_release_phy_lock(bp);
1991         } else
1992                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1993 }
1994
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1996 {
1997         u8 rc;
1998
1999         bnx2x_acquire_phy_lock(bp);
2000         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001         bnx2x_release_phy_lock(bp);
2002
2003         return rc;
2004 }
2005
2006 /* Calculates the sum of vn_min_rates.
2007    It's needed for further normalizing of the min_rates.
2008
2009    Returns:
2010      sum of vn_min_rates
2011        or
2012      0 - if all the min_rates are 0.
2013      In the later case fairness algorithm should be deactivated.
2014      If not all min_rates are zero then those that are zeroes will
2015      be set to 1.
2016  */
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018 {
2019         int i, port = BP_PORT(bp);
2020         u32 wsum = 0;
2021         int all_zero = 1;
2022
2023         for (i = 0; i < E1HVN_MAX; i++) {
2024                 u32 vn_cfg =
2025                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029                         /* If min rate is zero - set it to 1 */
2030                         if (!vn_min_rate)
2031                                 vn_min_rate = DEF_MIN_RATE;
2032                         else
2033                                 all_zero = 0;
2034
2035                         wsum += vn_min_rate;
2036                 }
2037         }
2038
2039         /* ... only if all min rates are zeros - disable FAIRNESS */
2040         if (all_zero)
2041                 return 0;
2042
2043         return wsum;
2044 }
2045
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047                                    int en_fness,
2048                                    u16 port_rate,
2049                                    struct cmng_struct_per_port *m_cmng_port)
2050 {
2051         u32 r_param = port_rate / 8;
2052         int port = BP_PORT(bp);
2053         int i;
2054
2055         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057         /* Enable minmax only if we are in e1hmf mode */
2058         if (IS_E1HMF(bp)) {
2059                 u32 fair_periodic_timeout_usec;
2060                 u32 t_fair;
2061
2062                 /* Enable rate shaping and fairness */
2063                 m_cmng_port->flags.cmng_vn_enable = 1;
2064                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065                 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067                 if (!en_fness)
2068                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069                            "  fairness will be disabled\n");
2070
2071                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072                 m_cmng_port->rs_vars.rs_periodic_timeout =
2073                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075                 /* this is the threshold below which no timer arming will occur
2076                    1.25 coefficient is for the threshold to be a little bigger
2077                    than the real time, to compensate for timer in-accuracy */
2078                 m_cmng_port->rs_vars.rs_threshold =
2079                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081                 /* resolution of fairness timer */
2082                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084                 t_fair = T_FAIR_COEF / port_rate;
2085
2086                 /* this is the threshold below which we won't arm
2087                    the timer anymore */
2088                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090                 /* we multiply by 1e3/8 to get bytes/msec.
2091                    We don't want the credits to pass a credit
2092                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093                 m_cmng_port->fair_vars.upper_bound =
2094                                                 r_param * t_fair * FAIR_MEM;
2095                 /* since each tick is 4 usec */
2096                 m_cmng_port->fair_vars.fairness_timeout =
2097                                                 fair_periodic_timeout_usec / 4;
2098
2099         } else {
2100                 /* Disable rate shaping and fairness */
2101                 m_cmng_port->flags.cmng_vn_enable = 0;
2102                 m_cmng_port->flags.fairness_enable = 0;
2103                 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105                 DP(NETIF_MSG_IFUP,
2106                    "Single function mode  minmax will be disabled\n");
2107         }
2108
2109         /* Store it to internal memory */
2110         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113                        ((u32 *)(m_cmng_port))[i]);
2114 }
2115
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117                                    u32 wsum, u16 port_rate,
2118                                  struct cmng_struct_per_port *m_cmng_port)
2119 {
2120         struct rate_shaping_vars_per_vn m_rs_vn;
2121         struct fairness_vars_per_vn m_fair_vn;
2122         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123         u16 vn_min_rate, vn_max_rate;
2124         int i;
2125
2126         /* If function is hidden - set min and max to zeroes */
2127         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128                 vn_min_rate = 0;
2129                 vn_max_rate = 0;
2130
2131         } else {
2132                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135                    if current min rate is zero - set it to 1.
2136                    This is a requirement of the algorithm. */
2137                 if ((vn_min_rate == 0) && wsum)
2138                         vn_min_rate = DEF_MIN_RATE;
2139                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141         }
2142
2143         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2144            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149         /* global vn counter - maximal Mbps for this vn */
2150         m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152         /* quota - number of bytes transmitted in this period */
2153         m_rs_vn.vn_counter.quota =
2154                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156 #ifdef BNX2X_PER_PROT_QOS
2157         /* per protocol counter */
2158         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159                 /* maximal Mbps for this protocol */
2160                 m_rs_vn.protocol_counters[protocol].rate =
2161                                                 protocol_max_rate[protocol];
2162                 /* the quota in each timer period -
2163                    number of bytes transmitted in this period */
2164                 m_rs_vn.protocol_counters[protocol].quota =
2165                         (u32)(rs_periodic_timeout_usec *
2166                           ((double)m_rs_vn.
2167                                    protocol_counters[protocol].rate/8));
2168         }
2169 #endif
2170
2171         if (wsum) {
2172                 /* credit for each period of the fairness algorithm:
2173                    number of bytes in T_FAIR (the vn share the port rate).
2174                    wsum should not be larger than 10000, thus
2175                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176                 m_fair_vn.vn_credit_delta =
2177                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180                    m_fair_vn.vn_credit_delta);
2181         }
2182
2183 #ifdef BNX2X_PER_PROT_QOS
2184         do {
2185                 u32 protocolWeightSum = 0;
2186
2187                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188                         protocolWeightSum +=
2189                                         drvInit.protocol_min_rate[protocol];
2190                 /* per protocol counter -
2191                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192                 if (protocolWeightSum > 0) {
2193                         for (protocol = 0;
2194                              protocol < NUM_OF_PROTOCOLS; protocol++)
2195                                 /* credit for each period of the
2196                                    fairness algorithm - number of bytes in
2197                                    T_FAIR (the protocol share the vn rate) */
2198                                 m_fair_vn.protocol_credit_delta[protocol] =
2199                                         (u32)((vn_min_rate / 8) * t_fair *
2200                                         protocol_min_rate / protocolWeightSum);
2201                 }
2202         } while (0);
2203 #endif
2204
2205         /* Store it to internal memory */
2206         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209                        ((u32 *)(&m_rs_vn))[i]);
2210
2211         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214                        ((u32 *)(&m_fair_vn))[i]);
2215 }
2216
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2219 {
2220         int vn;
2221
2222         /* Make sure that we are synced with the current statistics */
2223         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
2225         bnx2x_acquire_phy_lock(bp);
2226         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227         bnx2x_release_phy_lock(bp);
2228
2229         if (bp->link_vars.link_up) {
2230
2231                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232                         struct host_port_stats *pstats;
2233
2234                         pstats = bnx2x_sp(bp, port_stats);
2235                         /* reset old bmac stats */
2236                         memset(&(pstats->mac_stx[0]), 0,
2237                                sizeof(struct mac_stx));
2238                 }
2239                 if ((bp->state == BNX2X_STATE_OPEN) ||
2240                     (bp->state == BNX2X_STATE_DISABLED))
2241                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242         }
2243
2244         /* indicate link status */
2245         bnx2x_link_report(bp);
2246
2247         if (IS_E1HMF(bp)) {
2248                 int func;
2249
2250                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251                         if (vn == BP_E1HVN(bp))
2252                                 continue;
2253
2254                         func = ((vn << 1) | BP_PORT(bp));
2255
2256                         /* Set the attention towards other drivers
2257                            on the same port */
2258                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260                 }
2261         }
2262
2263         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264                 struct cmng_struct_per_port m_cmng_port;
2265                 u32 wsum;
2266                 int port = BP_PORT(bp);
2267
2268                 /* Init RATE SHAPING and FAIRNESS contexts */
2269                 wsum = bnx2x_calc_vn_wsum(bp);
2270                 bnx2x_init_port_minmax(bp, (int)wsum,
2271                                         bp->link_vars.line_speed,
2272                                         &m_cmng_port);
2273                 if (IS_E1HMF(bp))
2274                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276                                         wsum, bp->link_vars.line_speed,
2277                                                      &m_cmng_port);
2278         }
2279 }
2280
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2282 {
2283         if (bp->state != BNX2X_STATE_OPEN)
2284                 return;
2285
2286         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2287
2288         if (bp->link_vars.link_up)
2289                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290         else
2291                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
2293         /* indicate link status */
2294         bnx2x_link_report(bp);
2295 }
2296
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2298 {
2299         int port = BP_PORT(bp);
2300         u32 val;
2301
2302         bp->port.pmf = 1;
2303         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305         /* enable nig attention */
2306         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2309
2310         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 }
2312
2313 /* end of Link */
2314
2315 /* slow path */
2316
2317 /*
2318  * General service functions
2319  */
2320
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323                          u32 data_hi, u32 data_lo, int common)
2324 {
2325         int func = BP_FUNC(bp);
2326
2327         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2329            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333 #ifdef BNX2X_STOP_ON_ERROR
2334         if (unlikely(bp->panic))
2335                 return -EIO;
2336 #endif
2337
2338         spin_lock_bh(&bp->spq_lock);
2339
2340         if (!bp->spq_left) {
2341                 BNX2X_ERR("BUG! SPQ ring full!\n");
2342                 spin_unlock_bh(&bp->spq_lock);
2343                 bnx2x_panic();
2344                 return -EBUSY;
2345         }
2346
2347         /* CID needs port number to be encoded int it */
2348         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350                                      HW_CID(bp, cid)));
2351         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352         if (common)
2353                 bp->spq_prod_bd->hdr.type |=
2354                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359         bp->spq_left--;
2360
2361         if (bp->spq_prod_bd == bp->spq_last_bd) {
2362                 bp->spq_prod_bd = bp->spq;
2363                 bp->spq_prod_idx = 0;
2364                 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366         } else {
2367                 bp->spq_prod_bd++;
2368                 bp->spq_prod_idx++;
2369         }
2370
2371         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2372                bp->spq_prod_idx);
2373
2374         spin_unlock_bh(&bp->spq_lock);
2375         return 0;
2376 }
2377
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2380 {
2381         u32 i, j, val;
2382         int rc = 0;
2383
2384         might_sleep();
2385         i = 100;
2386         for (j = 0; j < i*10; j++) {
2387                 val = (1UL << 31);
2388                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390                 if (val & (1L << 31))
2391                         break;
2392
2393                 msleep(5);
2394         }
2395         if (!(val & (1L << 31))) {
2396                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2397                 rc = -EBUSY;
2398         }
2399
2400         return rc;
2401 }
2402
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2405 {
2406         u32 val = 0;
2407
2408         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 }
2410
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412 {
2413         struct host_def_status_block *def_sb = bp->def_status_blk;
2414         u16 rc = 0;
2415
2416         barrier(); /* status block is written to by the chip */
2417         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419                 rc |= 1;
2420         }
2421         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423                 rc |= 2;
2424         }
2425         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427                 rc |= 4;
2428         }
2429         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431                 rc |= 8;
2432         }
2433         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435                 rc |= 16;
2436         }
2437         return rc;
2438 }
2439
2440 /*
2441  * slow path service functions
2442  */
2443
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445 {
2446         int port = BP_PORT(bp);
2447         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448                        COMMAND_REG_ATTN_BITS_SET);
2449         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452                                        NIG_REG_MASK_INTERRUPT_PORT0;
2453         u32 aeu_mask;
2454
2455         if (bp->attn_state & asserted)
2456                 BNX2X_ERR("IGU ERROR\n");
2457
2458         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459         aeu_mask = REG_RD(bp, aeu_addr);
2460
2461         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2462            aeu_mask, asserted);
2463         aeu_mask &= ~(asserted & 0xff);
2464         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2465
2466         REG_WR(bp, aeu_addr, aeu_mask);
2467         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2468
2469         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470         bp->attn_state |= asserted;
2471         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2472
2473         if (asserted & ATTN_HARD_WIRED_MASK) {
2474                 if (asserted & ATTN_NIG_FOR_FUNC) {
2475
2476                         /* save nig interrupt mask */
2477                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478                         REG_WR(bp, nig_int_mask_addr, 0);
2479
2480                         bnx2x_link_attn(bp);
2481
2482                         /* handle unicore attn? */
2483                 }
2484                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487                 if (asserted & GPIO_2_FUNC)
2488                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490                 if (asserted & GPIO_3_FUNC)
2491                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493                 if (asserted & GPIO_4_FUNC)
2494                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496                 if (port == 0) {
2497                         if (asserted & ATTN_GENERAL_ATTN_1) {
2498                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500                         }
2501                         if (asserted & ATTN_GENERAL_ATTN_2) {
2502                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504                         }
2505                         if (asserted & ATTN_GENERAL_ATTN_3) {
2506                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508                         }
2509                 } else {
2510                         if (asserted & ATTN_GENERAL_ATTN_4) {
2511                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513                         }
2514                         if (asserted & ATTN_GENERAL_ATTN_5) {
2515                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517                         }
2518                         if (asserted & ATTN_GENERAL_ATTN_6) {
2519                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521                         }
2522                 }
2523
2524         } /* if hardwired */
2525
2526         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527            asserted, hc_addr);
2528         REG_WR(bp, hc_addr, asserted);
2529
2530         /* now set back the mask */
2531         if (asserted & ATTN_NIG_FOR_FUNC)
2532                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2533 }
2534
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2536 {
2537         int port = BP_PORT(bp);
2538         int reg_offset;
2539         u32 val;
2540
2541         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2543
2544         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2545
2546                 val = REG_RD(bp, reg_offset);
2547                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548                 REG_WR(bp, reg_offset, val);
2549
2550                 BNX2X_ERR("SPIO5 hw attention\n");
2551
2552                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2554                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555                         /* Fan failure attention */
2556
2557                         /* The PHY reset is controlled by GPIO 1 */
2558                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560                         /* Low power mode is controlled by GPIO 2 */
2561                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563                         /* mark the failure */
2564                         bp->link_params.ext_phy_config &=
2565                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566                         bp->link_params.ext_phy_config |=
2567                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568                         SHMEM_WR(bp,
2569                                  dev_info.port_hw_config[port].
2570                                                         external_phy_config,
2571                                  bp->link_params.ext_phy_config);
2572                         /* log the failure */
2573                         printk(KERN_ERR PFX "Fan Failure on Network"
2574                                " Controller %s has caused the driver to"
2575                                " shutdown the card to prevent permanent"
2576                                " damage.  Please contact Dell Support for"
2577                                " assistance\n", bp->dev->name);
2578                         break;
2579
2580                 default:
2581                         break;
2582                 }
2583         }
2584
2585         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2586
2587                 val = REG_RD(bp, reg_offset);
2588                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589                 REG_WR(bp, reg_offset, val);
2590
2591                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592                           (attn & HW_INTERRUT_ASSERT_SET_0));
2593                 bnx2x_panic();
2594         }
2595 }
2596
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2598 {
2599         u32 val;
2600
2601         if (attn & BNX2X_DOORQ_ASSERT) {
2602
2603                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605                 /* DORQ discard attention */
2606                 if (val & 0x2)
2607                         BNX2X_ERR("FATAL error from DORQ\n");
2608         }
2609
2610         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2611
2612                 int port = BP_PORT(bp);
2613                 int reg_offset;
2614
2615                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2617
2618                 val = REG_RD(bp, reg_offset);
2619                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620                 REG_WR(bp, reg_offset, val);
2621
2622                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623                           (attn & HW_INTERRUT_ASSERT_SET_1));
2624                 bnx2x_panic();
2625         }
2626 }
2627
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2629 {
2630         u32 val;
2631
2632         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2633
2634                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636                 /* CFC error attention */
2637                 if (val & 0x2)
2638                         BNX2X_ERR("FATAL error from CFC\n");
2639         }
2640
2641         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2642
2643                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645                 /* RQ_USDMDP_FIFO_OVERFLOW */
2646                 if (val & 0x18000)
2647                         BNX2X_ERR("FATAL error from PXP\n");
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2651
2652                 int port = BP_PORT(bp);
2653                 int reg_offset;
2654
2655                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2657
2658                 val = REG_RD(bp, reg_offset);
2659                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660                 REG_WR(bp, reg_offset, val);
2661
2662                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663                           (attn & HW_INTERRUT_ASSERT_SET_2));
2664                 bnx2x_panic();
2665         }
2666 }
2667
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2669 {
2670         u32 val;
2671
2672         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2673
2674                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675                         int func = BP_FUNC(bp);
2676
2677                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678                         bnx2x__link_status_update(bp);
2679                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680                                                         DRV_STATUS_PMF)
2681                                 bnx2x_pmf_update(bp);
2682
2683                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2684
2685                         BNX2X_ERR("MC assert!\n");
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2690                         bnx2x_panic();
2691
2692                 } else if (attn & BNX2X_MCP_ASSERT) {
2693
2694                         BNX2X_ERR("MCP assert!\n");
2695                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2696                         bnx2x_fw_dump(bp);
2697
2698                 } else
2699                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2700         }
2701
2702         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704                 if (attn & BNX2X_GRC_TIMEOUT) {
2705                         val = CHIP_IS_E1H(bp) ?
2706                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2708                 }
2709                 if (attn & BNX2X_GRC_RSV) {
2710                         val = CHIP_IS_E1H(bp) ?
2711                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2713                 }
2714                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2715         }
2716 }
2717
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2719 {
2720         struct attn_route attn;
2721         struct attn_route group_mask;
2722         int port = BP_PORT(bp);
2723         int index;
2724         u32 reg_addr;
2725         u32 val;
2726         u32 aeu_mask;
2727
2728         /* need to take HW lock because MCP or other port might also
2729            try to handle this event */
2730         bnx2x_acquire_alr(bp);
2731
2732         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2738
2739         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740                 if (deasserted & (1 << index)) {
2741                         group_mask = bp->attn_group[index];
2742
2743                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744                            index, group_mask.sig[0], group_mask.sig[1],
2745                            group_mask.sig[2], group_mask.sig[3]);
2746
2747                         bnx2x_attn_int_deasserted3(bp,
2748                                         attn.sig[3] & group_mask.sig[3]);
2749                         bnx2x_attn_int_deasserted1(bp,
2750                                         attn.sig[1] & group_mask.sig[1]);
2751                         bnx2x_attn_int_deasserted2(bp,
2752                                         attn.sig[2] & group_mask.sig[2]);
2753                         bnx2x_attn_int_deasserted0(bp,
2754                                         attn.sig[0] & group_mask.sig[0]);
2755
2756                         if ((attn.sig[0] & group_mask.sig[0] &
2757                                                 HW_PRTY_ASSERT_SET_0) ||
2758                             (attn.sig[1] & group_mask.sig[1] &
2759                                                 HW_PRTY_ASSERT_SET_1) ||
2760                             (attn.sig[2] & group_mask.sig[2] &
2761                                                 HW_PRTY_ASSERT_SET_2))
2762                                 BNX2X_ERR("FATAL HW block parity attention\n");
2763                 }
2764         }
2765
2766         bnx2x_release_alr(bp);
2767
2768         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2769
2770         val = ~deasserted;
2771         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772            val, reg_addr);
2773         REG_WR(bp, reg_addr, val);
2774
2775         if (~bp->attn_state & deasserted)
2776                 BNX2X_ERR("IGU ERROR\n");
2777
2778         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2780
2781         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782         aeu_mask = REG_RD(bp, reg_addr);
2783
2784         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2785            aeu_mask, deasserted);
2786         aeu_mask |= (deasserted & 0xff);
2787         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788
2789         REG_WR(bp, reg_addr, aeu_mask);
2790         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2791
2792         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793         bp->attn_state &= ~deasserted;
2794         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2795 }
2796
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2798 {
2799         /* read local copy of bits */
2800         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802         u32 attn_state = bp->attn_state;
2803
2804         /* look for changed bits */
2805         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2806         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2807
2808         DP(NETIF_MSG_HW,
2809            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2810            attn_bits, attn_ack, asserted, deasserted);
2811
2812         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813                 BNX2X_ERR("BAD attention state\n");
2814
2815         /* handle bits that were raised */
2816         if (asserted)
2817                 bnx2x_attn_int_asserted(bp, asserted);
2818
2819         if (deasserted)
2820                 bnx2x_attn_int_deasserted(bp, deasserted);
2821 }
2822
2823 static void bnx2x_sp_task(struct work_struct *work)
2824 {
2825         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2826         u16 status;
2827
2828
2829         /* Return here if interrupt is disabled */
2830         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2832                 return;
2833         }
2834
2835         status = bnx2x_update_dsb_idx(bp);
2836 /*      if (status == 0)                                     */
2837 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2838
2839         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2840
2841         /* HW attentions */
2842         if (status & 0x1)
2843                 bnx2x_attn_int(bp);
2844
2845         /* CStorm events: query_stats, port delete ramrod */
2846         if (status & 0x2)
2847                 bp->stats_pending = 0;
2848
2849         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850                      IGU_INT_NOP, 1);
2851         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852                      IGU_INT_NOP, 1);
2853         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854                      IGU_INT_NOP, 1);
2855         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856                      IGU_INT_NOP, 1);
2857         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2858                      IGU_INT_ENABLE, 1);
2859
2860 }
2861
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2863 {
2864         struct net_device *dev = dev_instance;
2865         struct bnx2x *bp = netdev_priv(dev);
2866
2867         /* Return here if interrupt is disabled */
2868         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2870                 return IRQ_HANDLED;
2871         }
2872
2873         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2874
2875 #ifdef BNX2X_STOP_ON_ERROR
2876         if (unlikely(bp->panic))
2877                 return IRQ_HANDLED;
2878 #endif
2879
2880         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2881
2882         return IRQ_HANDLED;
2883 }
2884
2885 /* end of slow path */
2886
2887 /* Statistics */
2888
2889 /****************************************************************************
2890 * Macros
2891 ****************************************************************************/
2892
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2895         do { \
2896                 s_lo += a_lo; \
2897                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2898         } while (0)
2899
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902         do { \
2903                 if (m_lo < s_lo) { \
2904                         /* underflow */ \
2905                         d_hi = m_hi - s_hi; \
2906                         if (d_hi > 0) { \
2907                                 /* we can 'loan' 1 */ \
2908                                 d_hi--; \
2909                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2910                         } else { \
2911                                 /* m_hi <= s_hi */ \
2912                                 d_hi = 0; \
2913                                 d_lo = 0; \
2914                         } \
2915                 } else { \
2916                         /* m_lo >= s_lo */ \
2917                         if (m_hi < s_hi) { \
2918                                 d_hi = 0; \
2919                                 d_lo = 0; \
2920                         } else { \
2921                                 /* m_hi >= s_hi */ \
2922                                 d_hi = m_hi - s_hi; \
2923                                 d_lo = m_lo - s_lo; \
2924                         } \
2925                 } \
2926         } while (0)
2927
2928 #define UPDATE_STAT64(s, t) \
2929         do { \
2930                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935                        pstats->mac_stx[1].t##_lo, diff.lo); \
2936         } while (0)
2937
2938 #define UPDATE_STAT64_NIG(s, t) \
2939         do { \
2940                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941                         diff.lo, new->s##_lo, old->s##_lo); \
2942                 ADD_64(estats->t##_hi, diff.hi, \
2943                        estats->t##_lo, diff.lo); \
2944         } while (0)
2945
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2948         do { \
2949                 s_lo += a; \
2950                 s_hi += (s_lo < a) ? 1 : 0; \
2951         } while (0)
2952
2953 #define UPDATE_EXTEND_STAT(s) \
2954         do { \
2955                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956                               pstats->mac_stx[1].s##_lo, \
2957                               new->s); \
2958         } while (0)
2959
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2961         do { \
2962                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963                 old_tclient->s = le32_to_cpu(tclient->s); \
2964                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965         } while (0)
2966
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2968         do { \
2969                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970                 old_xclient->s = le32_to_cpu(xclient->s); \
2971                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2972         } while (0)
2973
2974 /*
2975  * General service functions
2976  */
2977
2978 static inline long bnx2x_hilo(u32 *hiref)
2979 {
2980         u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2982         u32 hi = *hiref;
2983
2984         return HILO_U64(hi, lo);
2985 #else
2986         return lo;
2987 #endif
2988 }
2989
2990 /*
2991  * Init service functions
2992  */
2993
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2995 {
2996         if (!bp->stats_pending) {
2997                 struct eth_query_ramrod_data ramrod_data = {0};
2998                 int rc;
2999
3000                 ramrod_data.drv_counter = bp->stats_counter++;
3001                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3003
3004                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005                                    ((u32 *)&ramrod_data)[1],
3006                                    ((u32 *)&ramrod_data)[0], 0);
3007                 if (rc == 0) {
3008                         /* stats ramrod has it's own slot on the spq */
3009                         bp->spq_left++;
3010                         bp->stats_pending = 1;
3011                 }
3012         }
3013 }
3014
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3016 {
3017         int port = BP_PORT(bp);
3018
3019         bp->executer_idx = 0;
3020         bp->stats_counter = 0;
3021
3022         /* port stats */
3023         if (!BP_NOMCP(bp))
3024                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025         else
3026                 bp->port.port_stx = 0;
3027         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3028
3029         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030         bp->port.old_nig_stats.brb_discard =
3031                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032         bp->port.old_nig_stats.brb_truncate =
3033                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3038
3039         /* function stats */
3040         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3044
3045         bp->stats_state = STATS_STATE_DISABLED;
3046         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3048 }
3049
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3051 {
3052         struct dmae_command *dmae = &bp->stats_dmae;
3053         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3054
3055         *stats_comp = DMAE_COMP_VAL;
3056
3057         /* loader */
3058         if (bp->executer_idx) {
3059                 int loader_idx = PMF_DMAE_C(bp);
3060
3061                 memset(dmae, 0, sizeof(struct dmae_command));
3062
3063                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065                                 DMAE_CMD_DST_RESET |
3066 #ifdef __BIG_ENDIAN
3067                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068 #else
3069                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3070 #endif
3071                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072                                                DMAE_CMD_PORT_0) |
3073                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077                                      sizeof(struct dmae_command) *
3078                                      (loader_idx + 1)) >> 2;
3079                 dmae->dst_addr_hi = 0;
3080                 dmae->len = sizeof(struct dmae_command) >> 2;
3081                 if (CHIP_IS_E1(bp))
3082                         dmae->len--;
3083                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084                 dmae->comp_addr_hi = 0;
3085                 dmae->comp_val = 1;
3086
3087                 *stats_comp = 0;
3088                 bnx2x_post_dmae(bp, dmae, loader_idx);
3089
3090         } else if (bp->func_stx) {
3091                 *stats_comp = 0;
3092                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3093         }
3094 }
3095
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3097 {
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099         int cnt = 10;
3100
3101         might_sleep();
3102         while (*stats_comp != DMAE_COMP_VAL) {
3103                 if (!cnt) {
3104                         BNX2X_ERR("timeout waiting for stats finished\n");
3105                         break;
3106                 }
3107                 cnt--;
3108                 msleep(1);
3109         }
3110         return 1;
3111 }
3112
3113 /*
3114  * Statistics service functions
3115  */
3116
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3118 {
3119         struct dmae_command *dmae;
3120         u32 opcode;
3121         int loader_idx = PMF_DMAE_C(bp);
3122         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3123
3124         /* sanity */
3125         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126                 BNX2X_ERR("BUG!\n");
3127                 return;
3128         }
3129
3130         bp->executer_idx = 0;
3131
3132         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133                   DMAE_CMD_C_ENABLE |
3134                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135 #ifdef __BIG_ENDIAN
3136                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 #else
3138                   DMAE_CMD_ENDIANITY_DW_SWAP |
3139 #endif
3140                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3142
3143         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145         dmae->src_addr_lo = bp->port.port_stx >> 2;
3146         dmae->src_addr_hi = 0;
3147         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149         dmae->len = DMAE_LEN32_RD_MAX;
3150         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151         dmae->comp_addr_hi = 0;
3152         dmae->comp_val = 1;
3153
3154         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157         dmae->src_addr_hi = 0;
3158         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159                                    DMAE_LEN32_RD_MAX * 4);
3160         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161                                    DMAE_LEN32_RD_MAX * 4);
3162         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165         dmae->comp_val = DMAE_COMP_VAL;
3166
3167         *stats_comp = 0;
3168         bnx2x_hw_stats_post(bp);
3169         bnx2x_stats_comp(bp);
3170 }
3171
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3173 {
3174         struct dmae_command *dmae;
3175         int port = BP_PORT(bp);
3176         int vn = BP_E1HVN(bp);
3177         u32 opcode;
3178         int loader_idx = PMF_DMAE_C(bp);
3179         u32 mac_addr;
3180         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3181
3182         /* sanity */
3183         if (!bp->link_vars.link_up || !bp->port.pmf) {
3184                 BNX2X_ERR("BUG!\n");
3185                 return;
3186         }
3187
3188         bp->executer_idx = 0;
3189
3190         /* MCP */
3191         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3194 #ifdef __BIG_ENDIAN
3195                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196 #else
3197                   DMAE_CMD_ENDIANITY_DW_SWAP |
3198 #endif
3199                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200                   (vn << DMAE_CMD_E1HVN_SHIFT));
3201
3202         if (bp->port.port_stx) {
3203
3204                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205                 dmae->opcode = opcode;
3206                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209                 dmae->dst_addr_hi = 0;
3210                 dmae->len = sizeof(struct host_port_stats) >> 2;
3211                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212                 dmae->comp_addr_hi = 0;
3213                 dmae->comp_val = 1;
3214         }
3215
3216         if (bp->func_stx) {
3217
3218                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219                 dmae->opcode = opcode;
3220                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222                 dmae->dst_addr_lo = bp->func_stx >> 2;
3223                 dmae->dst_addr_hi = 0;
3224                 dmae->len = sizeof(struct host_func_stats) >> 2;
3225                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226                 dmae->comp_addr_hi = 0;
3227                 dmae->comp_val = 1;
3228         }
3229
3230         /* MAC */
3231         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 #ifdef __BIG_ENDIAN
3235                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 #else
3237                   DMAE_CMD_ENDIANITY_DW_SWAP |
3238 #endif
3239                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240                   (vn << DMAE_CMD_E1HVN_SHIFT));
3241
3242         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3243
3244                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245                                    NIG_REG_INGRESS_BMAC0_MEM);
3246
3247                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3249                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250                 dmae->opcode = opcode;
3251                 dmae->src_addr_lo = (mac_addr +
3252                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253                 dmae->src_addr_hi = 0;
3254                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259                 dmae->comp_addr_hi = 0;
3260                 dmae->comp_val = 1;
3261
3262                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265                 dmae->opcode = opcode;
3266                 dmae->src_addr_lo = (mac_addr +
3267                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268                 dmae->src_addr_hi = 0;
3269                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276                 dmae->comp_addr_hi = 0;
3277                 dmae->comp_val = 1;
3278
3279         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3280
3281                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3282
3283                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285                 dmae->opcode = opcode;
3286                 dmae->src_addr_lo = (mac_addr +
3287                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288                 dmae->src_addr_hi = 0;
3289                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293                 dmae->comp_addr_hi = 0;
3294                 dmae->comp_val = 1;
3295
3296                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298                 dmae->opcode = opcode;
3299                 dmae->src_addr_lo = (mac_addr +
3300                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301                 dmae->src_addr_hi = 0;
3302                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3306                 dmae->len = 1;
3307                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308                 dmae->comp_addr_hi = 0;
3309                 dmae->comp_val = 1;
3310
3311                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313                 dmae->opcode = opcode;
3314                 dmae->src_addr_lo = (mac_addr +
3315                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316                 dmae->src_addr_hi = 0;
3317                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323                 dmae->comp_addr_hi = 0;
3324                 dmae->comp_val = 1;
3325         }
3326
3327         /* NIG */
3328         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329         dmae->opcode = opcode;
3330         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332         dmae->src_addr_hi = 0;
3333         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337         dmae->comp_addr_hi = 0;
3338         dmae->comp_val = 1;
3339
3340         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341         dmae->opcode = opcode;
3342         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344         dmae->src_addr_hi = 0;
3345         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349         dmae->len = (2*sizeof(u32)) >> 2;
3350         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351         dmae->comp_addr_hi = 0;
3352         dmae->comp_val = 1;
3353
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358 #ifdef __BIG_ENDIAN
3359                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360 #else
3361                         DMAE_CMD_ENDIANITY_DW_SWAP |
3362 #endif
3363                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364                         (vn << DMAE_CMD_E1HVN_SHIFT));
3365         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367         dmae->src_addr_hi = 0;
3368         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372         dmae->len = (2*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375         dmae->comp_val = DMAE_COMP_VAL;
3376
3377         *stats_comp = 0;
3378 }
3379
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3381 {
3382         struct dmae_command *dmae = &bp->stats_dmae;
3383         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385         /* sanity */
3386         if (!bp->func_stx) {
3387                 BNX2X_ERR("BUG!\n");
3388                 return;
3389         }
3390
3391         bp->executer_idx = 0;
3392         memset(dmae, 0, sizeof(struct dmae_command));
3393
3394         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397 #ifdef __BIG_ENDIAN
3398                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399 #else
3400                         DMAE_CMD_ENDIANITY_DW_SWAP |
3401 #endif
3402                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406         dmae->dst_addr_lo = bp->func_stx >> 2;
3407         dmae->dst_addr_hi = 0;
3408         dmae->len = sizeof(struct host_func_stats) >> 2;
3409         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_val = DMAE_COMP_VAL;
3412
3413         *stats_comp = 0;
3414 }
3415
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3417 {
3418         if (bp->port.pmf)
3419                 bnx2x_port_stats_init(bp);
3420
3421         else if (bp->func_stx)
3422                 bnx2x_func_stats_init(bp);
3423
3424         bnx2x_hw_stats_post(bp);
3425         bnx2x_storm_stats_post(bp);
3426 }
3427
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3429 {
3430         bnx2x_stats_comp(bp);
3431         bnx2x_stats_pmf_update(bp);
3432         bnx2x_stats_start(bp);
3433 }
3434
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3436 {
3437         bnx2x_stats_comp(bp);
3438         bnx2x_stats_start(bp);
3439 }
3440
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3442 {
3443         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445         struct regpair diff;
3446
3447         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459         UPDATE_STAT64(tx_stat_gt127,
3460                                 tx_stat_etherstatspkts65octetsto127octets);
3461         UPDATE_STAT64(tx_stat_gt255,
3462                                 tx_stat_etherstatspkts128octetsto255octets);
3463         UPDATE_STAT64(tx_stat_gt511,
3464                                 tx_stat_etherstatspkts256octetsto511octets);
3465         UPDATE_STAT64(tx_stat_gt1023,
3466                                 tx_stat_etherstatspkts512octetsto1023octets);
3467         UPDATE_STAT64(tx_stat_gt1518,
3468                                 tx_stat_etherstatspkts1024octetsto1522octets);
3469         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473         UPDATE_STAT64(tx_stat_gterr,
3474                                 tx_stat_dot3statsinternalmactransmiterrors);
3475         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3476 }
3477
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3479 {
3480         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482
3483         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3514 }
3515
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3517 {
3518         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519         struct nig_stats *old = &(bp->port.old_nig_stats);
3520         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522         struct regpair diff;
3523
3524         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525                 bnx2x_bmac_stats_update(bp);
3526
3527         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528                 bnx2x_emac_stats_update(bp);
3529
3530         else { /* unreached */
3531                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3532                 return -1;
3533         }
3534
3535         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536                       new->brb_discard - old->brb_discard);
3537         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538                       new->brb_truncate - old->brb_truncate);
3539
3540         UPDATE_STAT64_NIG(egress_mac_pkt0,
3541                                         etherstatspkts1024octetsto1522octets);
3542         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3543
3544         memcpy(old, new, sizeof(struct nig_stats));
3545
3546         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547                sizeof(struct mac_stx));
3548         estats->brb_drop_hi = pstats->brb_drop_hi;
3549         estats->brb_drop_lo = pstats->brb_drop_lo;
3550
3551         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3552
3553         return 0;
3554 }
3555
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3557 {
3558         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559         int cl_id = BP_CL_ID(bp);
3560         struct tstorm_per_port_stats *tport =
3561                                 &stats->tstorm_common.port_statistics;
3562         struct tstorm_per_client_stats *tclient =
3563                         &stats->tstorm_common.client_statistics[cl_id];
3564         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565         struct xstorm_per_client_stats *xclient =
3566                         &stats->xstorm_common.client_statistics[cl_id];
3567         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3570         u32 diff;
3571
3572         /* are storm stats valid? */
3573         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574                                                         bp->stats_counter) {
3575                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576                    "  tstorm counter (%d) != stats_counter (%d)\n",
3577                    tclient->stats_counter, bp->stats_counter);
3578                 return -1;
3579         }
3580         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581                                                         bp->stats_counter) {
3582                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583                    "  xstorm counter (%d) != stats_counter (%d)\n",
3584                    xclient->stats_counter, bp->stats_counter);
3585                 return -2;
3586         }
3587
3588         fstats->total_bytes_received_hi =
3589         fstats->valid_bytes_received_hi =
3590                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591         fstats->total_bytes_received_lo =
3592         fstats->valid_bytes_received_lo =
3593                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3594
3595         estats->error_bytes_received_hi =
3596                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597         estats->error_bytes_received_lo =
3598                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599         ADD_64(estats->error_bytes_received_hi,
3600                estats->rx_stat_ifhcinbadoctets_hi,
3601                estats->error_bytes_received_lo,
3602                estats->rx_stat_ifhcinbadoctets_lo);
3603
3604         ADD_64(fstats->total_bytes_received_hi,
3605                estats->error_bytes_received_hi,
3606                fstats->total_bytes_received_lo,
3607                estats->error_bytes_received_lo);
3608
3609         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611                                 total_multicast_packets_received);
3612         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613                                 total_broadcast_packets_received);
3614
3615         fstats->total_bytes_transmitted_hi =
3616                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3617         fstats->total_bytes_transmitted_lo =
3618                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3619
3620         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621                                 total_unicast_packets_transmitted);
3622         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623                                 total_multicast_packets_transmitted);
3624         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625                                 total_broadcast_packets_transmitted);
3626
3627         memcpy(estats, &(fstats->total_bytes_received_hi),
3628                sizeof(struct host_func_stats) - 2*sizeof(u32));
3629
3630         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632         estats->brb_truncate_discard =
3633                                 le32_to_cpu(tport->brb_truncate_discard);
3634         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635
3636         old_tclient->rcv_unicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638         old_tclient->rcv_unicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640         old_tclient->rcv_broadcast_bytes.hi =
3641                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642         old_tclient->rcv_broadcast_bytes.lo =
3643                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644         old_tclient->rcv_multicast_bytes.hi =
3645                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646         old_tclient->rcv_multicast_bytes.lo =
3647                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3649
3650         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651         old_tclient->packets_too_big_discard =
3652                                 le32_to_cpu(tclient->packets_too_big_discard);
3653         estats->no_buff_discard =
3654         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656
3657         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658         old_xclient->unicast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660         old_xclient->unicast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662         old_xclient->multicast_bytes_sent.hi =
3663                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664         old_xclient->multicast_bytes_sent.lo =
3665                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666         old_xclient->broadcast_bytes_sent.hi =
3667                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668         old_xclient->broadcast_bytes_sent.lo =
3669                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670
3671         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3672
3673         return 0;
3674 }
3675
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3677 {
3678         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680         struct net_device_stats *nstats = &bp->dev->stats;
3681
3682         nstats->rx_packets =
3683                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686
3687         nstats->tx_packets =
3688                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691
3692         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3693
3694         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3695
3696         nstats->rx_dropped = old_tclient->checksum_discard +
3697                              estats->mac_discard;
3698         nstats->tx_dropped = 0;
3699
3700         nstats->multicast =
3701                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702
3703         nstats->collisions =
3704                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3705                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706                         estats->tx_stat_dot3statslatecollisions_lo +
3707                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3708
3709         estats->jabber_packets_received =
3710                                 old_tclient->packets_too_big_discard +
3711                                 estats->rx_stat_dot3statsframestoolong_lo;
3712
3713         nstats->rx_length_errors =
3714                                 estats->rx_stat_etherstatsundersizepkts_lo +
3715                                 estats->jabber_packets_received;
3716         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720         nstats->rx_missed_errors = estats->xxoverflow_discard;
3721
3722         nstats->rx_errors = nstats->rx_length_errors +
3723                             nstats->rx_over_errors +
3724                             nstats->rx_crc_errors +
3725                             nstats->rx_frame_errors +
3726                             nstats->rx_fifo_errors +
3727                             nstats->rx_missed_errors;
3728
3729         nstats->tx_aborted_errors =
3730                         estats->tx_stat_dot3statslatecollisions_lo +
3731                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3732         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733         nstats->tx_fifo_errors = 0;
3734         nstats->tx_heartbeat_errors = 0;
3735         nstats->tx_window_errors = 0;
3736
3737         nstats->tx_errors = nstats->tx_aborted_errors +
3738                             nstats->tx_carrier_errors;
3739 }
3740
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3742 {
3743         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3744         int update = 0;
3745
3746         if (*stats_comp != DMAE_COMP_VAL)
3747                 return;
3748
3749         if (bp->port.pmf)
3750                 update = (bnx2x_hw_stats_update(bp) == 0);
3751
3752         update |= (bnx2x_storm_stats_update(bp) == 0);
3753
3754         if (update)
3755                 bnx2x_net_stats_update(bp);
3756
3757         else {
3758                 if (bp->stats_pending) {
3759                         bp->stats_pending++;
3760                         if (bp->stats_pending == 3) {
3761                                 BNX2X_ERR("stats not updated for 3 times\n");
3762                                 bnx2x_panic();
3763                                 return;
3764                         }
3765                 }
3766         }
3767
3768         if (bp->msglevel & NETIF_MSG_TIMER) {
3769                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771                 struct net_device_stats *nstats = &bp->dev->stats;
3772                 int i;
3773
3774                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3776                                   "  tx pkt (%lx)\n",
3777                        bnx2x_tx_avail(bp->fp),
3778                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3780                                   "  rx pkt (%lx)\n",
3781                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782                              bp->fp->rx_comp_cons),
3783                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3785                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3786                        estats->driver_xoff, estats->brb_drop_lo);
3787                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3788                         "packets_too_big_discard %u  no_buff_discard %u  "
3789                         "mac_discard %u  mac_filter_discard %u  "
3790                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3791                         "ttl0_discard %u\n",
3792                        old_tclient->checksum_discard,
3793                        old_tclient->packets_too_big_discard,
3794                        old_tclient->no_buff_discard, estats->mac_discard,
3795                        estats->mac_filter_discard, estats->xxoverflow_discard,
3796                        estats->brb_truncate_discard,
3797                        old_tclient->ttl0_discard);
3798
3799                 for_each_queue(bp, i) {
3800                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801                                bnx2x_fp(bp, i, tx_pkt),
3802                                bnx2x_fp(bp, i, rx_pkt),
3803                                bnx2x_fp(bp, i, rx_calls));
3804                 }
3805         }
3806
3807         bnx2x_hw_stats_post(bp);
3808         bnx2x_storm_stats_post(bp);
3809 }
3810
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3812 {
3813         struct dmae_command *dmae;
3814         u32 opcode;
3815         int loader_idx = PMF_DMAE_C(bp);
3816         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3817
3818         bp->executer_idx = 0;
3819
3820         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821                   DMAE_CMD_C_ENABLE |
3822                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3823 #ifdef __BIG_ENDIAN
3824                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3825 #else
3826                   DMAE_CMD_ENDIANITY_DW_SWAP |
3827 #endif
3828                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3830
3831         if (bp->port.port_stx) {
3832
3833                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834                 if (bp->func_stx)
3835                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836                 else
3837                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841                 dmae->dst_addr_hi = 0;
3842                 dmae->len = sizeof(struct host_port_stats) >> 2;
3843                 if (bp->func_stx) {
3844                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845                         dmae->comp_addr_hi = 0;
3846                         dmae->comp_val = 1;
3847                 } else {
3848                         dmae->comp_addr_lo =
3849                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850                         dmae->comp_addr_hi =
3851                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852                         dmae->comp_val = DMAE_COMP_VAL;
3853
3854                         *stats_comp = 0;
3855                 }
3856         }
3857
3858         if (bp->func_stx) {
3859
3860                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864                 dmae->dst_addr_lo = bp->func_stx >> 2;
3865                 dmae->dst_addr_hi = 0;
3866                 dmae->len = sizeof(struct host_func_stats) >> 2;
3867                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869                 dmae->comp_val = DMAE_COMP_VAL;
3870
3871                 *stats_comp = 0;
3872         }
3873 }
3874
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3876 {
3877         int update = 0;
3878
3879         bnx2x_stats_comp(bp);
3880
3881         if (bp->port.pmf)
3882                 update = (bnx2x_hw_stats_update(bp) == 0);
3883
3884         update |= (bnx2x_storm_stats_update(bp) == 0);
3885
3886         if (update) {
3887                 bnx2x_net_stats_update(bp);
3888
3889                 if (bp->port.pmf)
3890                         bnx2x_port_stats_stop(bp);
3891
3892                 bnx2x_hw_stats_post(bp);
3893                 bnx2x_stats_comp(bp);
3894         }
3895 }
3896
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3898 {
3899 }
3900
3901 static const struct {
3902         void (*action)(struct bnx2x *bp);
3903         enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3905 /* state        event   */
3906 {
3907 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3909 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3911 },
3912 {
3913 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3914 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3915 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3916 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3917 }
3918 };
3919
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3921 {
3922         enum bnx2x_stats_state state = bp->stats_state;
3923
3924         bnx2x_stats_stm[state][event].action(bp);
3925         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3926
3927         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929                    state, event, bp->stats_state);
3930 }
3931
3932 static void bnx2x_timer(unsigned long data)
3933 {
3934         struct bnx2x *bp = (struct bnx2x *) data;
3935
3936         if (!netif_running(bp->dev))
3937                 return;
3938
3939         if (atomic_read(&bp->intr_sem) != 0)
3940                 goto timer_restart;
3941
3942         if (poll) {
3943                 struct bnx2x_fastpath *fp = &bp->fp[0];
3944                 int rc;
3945
3946                 bnx2x_tx_int(fp, 1000);
3947                 rc = bnx2x_rx_int(fp, 1000);
3948         }
3949
3950         if (!BP_NOMCP(bp)) {
3951                 int func = BP_FUNC(bp);
3952                 u32 drv_pulse;
3953                 u32 mcp_pulse;
3954
3955                 ++bp->fw_drv_pulse_wr_seq;
3956                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957                 /* TBD - add SYSTEM_TIME */
3958                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3960
3961                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962                              MCP_PULSE_SEQ_MASK);
3963                 /* The delta between driver pulse and mcp response
3964                  * should be 1 (before mcp response) or 0 (after mcp response)
3965                  */
3966                 if ((drv_pulse != mcp_pulse) &&
3967                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968                         /* someone lost a heartbeat... */
3969                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970                                   drv_pulse, mcp_pulse);
3971                 }
3972         }
3973
3974         if ((bp->state == BNX2X_STATE_OPEN) ||
3975             (bp->state == BNX2X_STATE_DISABLED))
3976                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3977
3978 timer_restart:
3979         mod_timer(&bp->timer, jiffies + bp->current_interval);
3980 }
3981
3982 /* end of Statistics */
3983
3984 /* nic init */
3985
3986 /*
3987  * nic init service functions
3988  */
3989
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3991 {
3992         int port = BP_PORT(bp);
3993
3994         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996                         sizeof(struct ustorm_status_block)/4);
3997         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999                         sizeof(struct cstorm_status_block)/4);
4000 }
4001
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003                           dma_addr_t mapping, int sb_id)
4004 {
4005         int port = BP_PORT(bp);
4006         int func = BP_FUNC(bp);
4007         int index;
4008         u64 section;
4009
4010         /* USTORM */
4011         section = ((u64)mapping) + offsetof(struct host_status_block,
4012                                             u_status_block);
4013         sb->u_status_block.status_block_id = sb_id;
4014
4015         REG_WR(bp, BAR_USTRORM_INTMEM +
4016                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017         REG_WR(bp, BAR_USTRORM_INTMEM +
4018                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4019                U64_HI(section));
4020         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4022
4023         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4026
4027         /* CSTORM */
4028         section = ((u64)mapping) + offsetof(struct host_status_block,
4029                                             c_status_block);
4030         sb->c_status_block.status_block_id = sb_id;
4031
4032         REG_WR(bp, BAR_CSTRORM_INTMEM +
4033                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034         REG_WR(bp, BAR_CSTRORM_INTMEM +
4035                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4036                U64_HI(section));
4037         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4039
4040         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043
4044         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4045 }
4046
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4048 {
4049         int func = BP_FUNC(bp);
4050
4051         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053                         sizeof(struct ustorm_def_status_block)/4);
4054         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056                         sizeof(struct cstorm_def_status_block)/4);
4057         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059                         sizeof(struct xstorm_def_status_block)/4);
4060         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062                         sizeof(struct tstorm_def_status_block)/4);
4063 }
4064
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066                               struct host_def_status_block *def_sb,
4067                               dma_addr_t mapping, int sb_id)
4068 {
4069         int port = BP_PORT(bp);
4070         int func = BP_FUNC(bp);
4071         int index, val, reg_offset;
4072         u64 section;
4073
4074         /* ATTN */
4075         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076                                             atten_status_block);
4077         def_sb->atten_status_block.status_block_id = sb_id;
4078
4079         bp->attn_state = 0;
4080
4081         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
4084         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085                 bp->attn_group[index].sig[0] = REG_RD(bp,
4086                                                      reg_offset + 0x10*index);
4087                 bp->attn_group[index].sig[1] = REG_RD(bp,
4088                                                reg_offset + 0x4 + 0x10*index);
4089                 bp->attn_group[index].sig[2] = REG_RD(bp,
4090                                                reg_offset + 0x8 + 0x10*index);
4091                 bp->attn_group[index].sig[3] = REG_RD(bp,
4092                                                reg_offset + 0xc + 0x10*index);
4093         }
4094
4095         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096                              HC_REG_ATTN_MSG0_ADDR_L);
4097
4098         REG_WR(bp, reg_offset, U64_LO(section));
4099         REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103         val = REG_RD(bp, reg_offset);
4104         val |= sb_id;
4105         REG_WR(bp, reg_offset, val);
4106
4107         /* USTORM */
4108         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109                                             u_def_status_block);
4110         def_sb->u_def_status_block.status_block_id = sb_id;
4111
4112         REG_WR(bp, BAR_USTRORM_INTMEM +
4113                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114         REG_WR(bp, BAR_USTRORM_INTMEM +
4115                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4116                U64_HI(section));
4117         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4119
4120         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4123
4124         /* CSTORM */
4125         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126                                             c_def_status_block);
4127         def_sb->c_def_status_block.status_block_id = sb_id;
4128
4129         REG_WR(bp, BAR_CSTRORM_INTMEM +
4130                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131         REG_WR(bp, BAR_CSTRORM_INTMEM +
4132                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4133                U64_HI(section));
4134         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4136
4137         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4140
4141         /* TSTORM */
4142         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143                                             t_def_status_block);
4144         def_sb->t_def_status_block.status_block_id = sb_id;
4145
4146         REG_WR(bp, BAR_TSTRORM_INTMEM +
4147                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148         REG_WR(bp, BAR_TSTRORM_INTMEM +
4149                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4150                U64_HI(section));
4151         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4153
4154         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4157
4158         /* XSTORM */
4159         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160                                             x_def_status_block);
4161         def_sb->x_def_status_block.status_block_id = sb_id;
4162
4163         REG_WR(bp, BAR_XSTRORM_INTMEM +
4164                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165         REG_WR(bp, BAR_XSTRORM_INTMEM +
4166                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4167                U64_HI(section));
4168         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4170
4171         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4174
4175         bp->stats_pending = 0;
4176         bp->set_mac_pending = 0;
4177
4178         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4179 }
4180
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4182 {
4183         int port = BP_PORT(bp);
4184         int i;
4185
4186         for_each_queue(bp, i) {
4187                 int sb_id = bp->fp[i].sb_id;
4188
4189                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192                                                     U_SB_ETH_RX_CQ_INDEX),
4193                         bp->rx_ticks/12);
4194                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196                                                      U_SB_ETH_RX_CQ_INDEX),
4197                          bp->rx_ticks ? 0 : 1);
4198                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200                                                      U_SB_ETH_RX_BD_INDEX),
4201                          bp->rx_ticks ? 0 : 1);
4202
4203                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206                                                     C_SB_ETH_TX_CQ_INDEX),
4207                         bp->tx_ticks/12);
4208                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210                                                      C_SB_ETH_TX_CQ_INDEX),
4211                          bp->tx_ticks ? 0 : 1);
4212         }
4213 }
4214
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216                                        struct bnx2x_fastpath *fp, int last)
4217 {
4218         int i;
4219
4220         for (i = 0; i < last; i++) {
4221                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222                 struct sk_buff *skb = rx_buf->skb;
4223
4224                 if (skb == NULL) {
4225                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226                         continue;
4227                 }
4228
4229                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230                         pci_unmap_single(bp->pdev,
4231                                          pci_unmap_addr(rx_buf, mapping),
4232                                          bp->rx_buf_size,
4233                                          PCI_DMA_FROMDEVICE);
4234
4235                 dev_kfree_skb(skb);
4236                 rx_buf->skb = NULL;
4237         }
4238 }
4239
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4241 {
4242         int func = BP_FUNC(bp);
4243         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4245         u16 ring_prod, cqe_ring_prod;
4246         int i, j;
4247
4248         bp->rx_buf_size = bp->dev->mtu;
4249         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4250                 BCM_RX_ETH_PAYLOAD_ALIGN;
4251
4252         if (bp->flags & TPA_ENABLE_FLAG) {
4253                 DP(NETIF_MSG_IFUP,
4254                    "rx_buf_size %d  effective_mtu %d\n",
4255                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4256
4257                 for_each_queue(bp, j) {
4258                         struct bnx2x_fastpath *fp = &bp->fp[j];
4259
4260                         for (i = 0; i < max_agg_queues; i++) {
4261                                 fp->tpa_pool[i].skb =
4262                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263                                 if (!fp->tpa_pool[i].skb) {
4264                                         BNX2X_ERR("Failed to allocate TPA "
4265                                                   "skb pool for queue[%d] - "
4266                                                   "disabling TPA on this "
4267                                                   "queue!\n", j);
4268                                         bnx2x_free_tpa_pool(bp, fp, i);
4269                                         fp->disable_tpa = 1;
4270                                         break;
4271                                 }
4272                                 pci_unmap_addr_set((struct sw_rx_bd *)
4273                                                         &bp->fp->tpa_pool[i],
4274                                                    mapping, 0);
4275                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4276                         }
4277                 }
4278         }
4279
4280         for_each_queue(bp, j) {
4281                 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283                 fp->rx_bd_cons = 0;
4284                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4285                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286
4287                 /* "next page" elements initialization */
4288                 /* SGE ring */
4289                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290                         struct eth_rx_sge *sge;
4291
4292                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293                         sge->addr_hi =
4294                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296                         sge->addr_lo =
4297                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4299                 }
4300
4301                 bnx2x_init_sge_ring_bit_mask(fp);
4302
4303                 /* RX BD ring */
4304                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305                         struct eth_rx_bd *rx_bd;
4306
4307                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308                         rx_bd->addr_hi =
4309                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4310                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311                         rx_bd->addr_lo =
4312                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4313                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4314                 }
4315
4316                 /* CQ ring */
4317                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318                         struct eth_rx_cqe_next_page *nextpg;
4319
4320                         nextpg = (struct eth_rx_cqe_next_page *)
4321                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322                         nextpg->addr_hi =
4323                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4324                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325                         nextpg->addr_lo =
4326                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4327                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4328                 }
4329
4330                 /* Allocate SGEs and initialize the ring elements */
4331                 for (i = 0, ring_prod = 0;
4332                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4333
4334                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335                                 BNX2X_ERR("was only able to allocate "
4336                                           "%d rx sges\n", i);
4337                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338                                 /* Cleanup already allocated elements */
4339                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4340                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4341                                 fp->disable_tpa = 1;
4342                                 ring_prod = 0;
4343                                 break;
4344                         }
4345                         ring_prod = NEXT_SGE_IDX(ring_prod);
4346                 }
4347                 fp->rx_sge_prod = ring_prod;
4348
4349                 /* Allocate BDs and initialize BD ring */
4350                 fp->rx_comp_cons = 0;
4351                 cqe_ring_prod = ring_prod = 0;
4352                 for (i = 0; i < bp->rx_ring_size; i++) {
4353                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354                                 BNX2X_ERR("was only able to allocate "
4355                                           "%d rx skbs\n", i);
4356                                 bp->eth_stats.rx_skb_alloc_failed++;
4357                                 break;
4358                         }
4359                         ring_prod = NEXT_RX_IDX(ring_prod);
4360                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361                         WARN_ON(ring_prod <= i);
4362                 }
4363
4364                 fp->rx_bd_prod = ring_prod;
4365                 /* must not have more available CQEs than BDs */
4366                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367                                        cqe_ring_prod);
4368                 fp->rx_pkt = fp->rx_calls = 0;
4369
4370                 /* Warning!
4371                  * this will generate an interrupt (to the TSTORM)
4372                  * must only be done after chip is initialized
4373                  */
4374                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375                                      fp->rx_sge_prod);
4376                 if (j != 0)
4377                         continue;
4378
4379                 REG_WR(bp, BAR_USTRORM_INTMEM +
4380                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381                        U64_LO(fp->rx_comp_mapping));
4382                 REG_WR(bp, BAR_USTRORM_INTMEM +
4383                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384                        U64_HI(fp->rx_comp_mapping));
4385         }
4386 }
4387
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389 {
4390         int i, j;
4391
4392         for_each_queue(bp, j) {
4393                 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396                         struct eth_tx_bd *tx_bd =
4397                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399                         tx_bd->addr_hi =
4400                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4402                         tx_bd->addr_lo =
4403                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4405                 }
4406
4407                 fp->tx_pkt_prod = 0;
4408                 fp->tx_pkt_cons = 0;
4409                 fp->tx_bd_prod = 0;
4410                 fp->tx_bd_cons = 0;
4411                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412                 fp->tx_pkt = 0;
4413         }
4414 }
4415
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417 {
4418         int func = BP_FUNC(bp);
4419
4420         spin_lock_init(&bp->spq_lock);
4421
4422         bp->spq_left = MAX_SPQ_PENDING;
4423         bp->spq_prod_idx = 0;
4424         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425         bp->spq_prod_bd = bp->spq;
4426         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
4428         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429                U64_LO(bp->spq_mapping));
4430         REG_WR(bp,
4431                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432                U64_HI(bp->spq_mapping));
4433
4434         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4435                bp->spq_prod_idx);
4436 }
4437
4438 static void bnx2x_init_context(struct bnx2x *bp)
4439 {
4440         int i;
4441
4442         for_each_queue(bp, i) {
4443                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444                 struct bnx2x_fastpath *fp = &bp->fp[i];
4445                 u8 sb_id = FP_SB_ID(fp);
4446
4447                 context->xstorm_st_context.tx_bd_page_base_hi =
4448                                                 U64_HI(fp->tx_desc_mapping);
4449                 context->xstorm_st_context.tx_bd_page_base_lo =
4450                                                 U64_LO(fp->tx_desc_mapping);
4451                 context->xstorm_st_context.db_data_addr_hi =
4452                                                 U64_HI(fp->tx_prods_mapping);
4453                 context->xstorm_st_context.db_data_addr_lo =
4454                                                 U64_LO(fp->tx_prods_mapping);
4455                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458                 context->ustorm_st_context.common.sb_index_numbers =
4459                                                 BNX2X_RX_SB_INDEX_NUM;
4460                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461                 context->ustorm_st_context.common.status_block_id = sb_id;
4462                 context->ustorm_st_context.common.flags =
4463                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464                 context->ustorm_st_context.common.mc_alignment_size =
4465                         BCM_RX_ETH_PAYLOAD_ALIGN;
4466                 context->ustorm_st_context.common.bd_buff_size =
4467                                                 bp->rx_buf_size;
4468                 context->ustorm_st_context.common.bd_page_base_hi =
4469                                                 U64_HI(fp->rx_desc_mapping);
4470                 context->ustorm_st_context.common.bd_page_base_lo =
4471                                                 U64_LO(fp->rx_desc_mapping);
4472                 if (!fp->disable_tpa) {
4473                         context->ustorm_st_context.common.flags |=
4474                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476                         context->ustorm_st_context.common.sge_buff_size =
4477                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478                         context->ustorm_st_context.common.sge_page_base_hi =
4479                                                 U64_HI(fp->rx_sge_mapping);
4480                         context->ustorm_st_context.common.sge_page_base_lo =
4481                                                 U64_LO(fp->rx_sge_mapping);
4482                 }
4483
4484                 context->cstorm_st_context.sb_index_number =
4485                                                 C_SB_ETH_TX_CQ_INDEX;
4486                 context->cstorm_st_context.status_block_id = sb_id;
4487
4488                 context->xstorm_ag_context.cdu_reserved =
4489                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490                                                CDU_REGION_NUMBER_XCM_AG,
4491                                                ETH_CONNECTION_TYPE);
4492                 context->ustorm_ag_context.cdu_usage =
4493                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494                                                CDU_REGION_NUMBER_UCM_AG,
4495                                                ETH_CONNECTION_TYPE);
4496         }
4497 }
4498
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4500 {
4501         int port = BP_PORT(bp);
4502         int i;
4503
4504         if (!is_multi(bp))
4505                 return;
4506
4507         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511                         i % bp->num_queues);
4512
4513         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4514 }
4515
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4517 {
4518         struct tstorm_eth_client_config tstorm_client = {0};
4519         int port = BP_PORT(bp);
4520         int i;
4521
4522         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524         tstorm_client.config_flags =
4525                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526 #ifdef BCM_VLAN
4527         if (bp->rx_mode && bp->vlgrp) {
4528                 tstorm_client.config_flags |=
4529                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4531         }
4532 #endif
4533
4534         if (bp->flags & TPA_ENABLE_FLAG) {
4535                 tstorm_client.max_sges_for_packet =
4536                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537                 tstorm_client.max_sges_for_packet =
4538                         ((tstorm_client.max_sges_for_packet +
4539                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540                         PAGES_PER_SGE_SHIFT;
4541
4542                 tstorm_client.config_flags |=
4543                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4544         }
4545
4546         for_each_queue(bp, i) {
4547                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549                        ((u32 *)&tstorm_client)[0]);
4550                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552                        ((u32 *)&tstorm_client)[1]);
4553         }
4554
4555         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4557 }
4558
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4560 {
4561         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562         int mode = bp->rx_mode;
4563         int mask = (1 << BP_L_ID(bp));
4564         int func = BP_FUNC(bp);
4565         int i;
4566
4567         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4568
4569         switch (mode) {
4570         case BNX2X_RX_MODE_NONE: /* no Rx */
4571                 tstorm_mac_filter.ucast_drop_all = mask;
4572                 tstorm_mac_filter.mcast_drop_all = mask;
4573                 tstorm_mac_filter.bcast_drop_all = mask;
4574                 break;
4575         case BNX2X_RX_MODE_NORMAL:
4576                 tstorm_mac_filter.bcast_accept_all = mask;
4577                 break;
4578         case BNX2X_RX_MODE_ALLMULTI:
4579                 tstorm_mac_filter.mcast_accept_all = mask;
4580                 tstorm_mac_filter.bcast_accept_all = mask;
4581                 break;
4582         case BNX2X_RX_MODE_PROMISC:
4583                 tstorm_mac_filter.ucast_accept_all = mask;
4584                 tstorm_mac_filter.mcast_accept_all = mask;
4585                 tstorm_mac_filter.bcast_accept_all = mask;
4586                 break;
4587         default:
4588                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4589                 break;
4590         }
4591
4592         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595                        ((u32 *)&tstorm_mac_filter)[i]);
4596
4597 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598                    ((u32 *)&tstorm_mac_filter)[i]); */
4599         }
4600
4601         if (mode != BNX2X_RX_MODE_NONE)
4602                 bnx2x_set_client_config(bp);
4603 }
4604
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4606 {
4607         int i;
4608
4609         if (bp->flags & TPA_ENABLE_FLAG) {
4610                 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612                 tpa.tpa_exist = 1;
4613
4614                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615                        ((u32 *)&tpa)[0]);
4616                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617                        ((u32 *)&tpa)[1]);
4618         }
4619
4620         /* Zero this manually as its initialization is
4621            currently missing in the initTool */
4622         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623                 REG_WR(bp, BAR_USTRORM_INTMEM +
4624                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4625 }
4626
4627 static void bnx2x_init_internal_port(struct bnx2x *bp)
4628 {
4629         int port = BP_PORT(bp);
4630
4631         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635 }
4636
4637 static void bnx2x_init_internal_func(struct bnx2x *bp)
4638 {
4639         struct tstorm_eth_function_common_config tstorm_config = {0};
4640         struct stats_indication_flags stats_flags = {0};
4641         int port = BP_PORT(bp);
4642         int func = BP_FUNC(bp);
4643         int i;
4644         u16 max_agg_size;
4645
4646         if (is_multi(bp)) {
4647                 tstorm_config.config_flags = MULTI_FLAGS;
4648                 tstorm_config.rss_result_mask = MULTI_MASK;
4649         }
4650
4651         tstorm_config.leading_client_id = BP_L_ID(bp);
4652
4653         REG_WR(bp, BAR_TSTRORM_INTMEM +
4654                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4655                (*(u32 *)&tstorm_config));
4656
4657         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4658         bnx2x_set_storm_rx_mode(bp);
4659
4660         /* reset xstorm per client statistics */
4661         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664                        i*4, 0);
4665         }
4666         /* reset tstorm per client statistics */
4667         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670                        i*4, 0);
4671         }
4672
4673         /* Init statistics related context */
4674         stats_flags.collect_eth = 1;
4675
4676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4677                ((u32 *)&stats_flags)[0]);
4678         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4679                ((u32 *)&stats_flags)[1]);
4680
4681         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4682                ((u32 *)&stats_flags)[0]);
4683         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4684                ((u32 *)&stats_flags)[1]);
4685
4686         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4687                ((u32 *)&stats_flags)[0]);
4688         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4689                ((u32 *)&stats_flags)[1]);
4690
4691         REG_WR(bp, BAR_XSTRORM_INTMEM +
4692                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694         REG_WR(bp, BAR_XSTRORM_INTMEM +
4695                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698         REG_WR(bp, BAR_TSTRORM_INTMEM +
4699                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701         REG_WR(bp, BAR_TSTRORM_INTMEM +
4702                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4704
4705         if (CHIP_IS_E1H(bp)) {
4706                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4707                         IS_E1HMF(bp));
4708                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4709                         IS_E1HMF(bp));
4710                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4711                         IS_E1HMF(bp));
4712                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4713                         IS_E1HMF(bp));
4714
4715                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4716                          bp->e1hov);
4717         }
4718
4719         /* Init CQ ring mapping and aggregation size */
4720         max_agg_size = min((u32)(bp->rx_buf_size +
4721                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722                            (u32)0xffff);
4723         for_each_queue(bp, i) {
4724                 struct bnx2x_fastpath *fp = &bp->fp[i];
4725
4726                 REG_WR(bp, BAR_USTRORM_INTMEM +
4727                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4728                        U64_LO(fp->rx_comp_mapping));
4729                 REG_WR(bp, BAR_USTRORM_INTMEM +
4730                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4731                        U64_HI(fp->rx_comp_mapping));
4732
4733                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4734                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4735                          max_agg_size);
4736         }
4737 }
4738
4739 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4740 {
4741         switch (load_code) {
4742         case FW_MSG_CODE_DRV_LOAD_COMMON:
4743                 bnx2x_init_internal_common(bp);
4744                 /* no break */
4745
4746         case FW_MSG_CODE_DRV_LOAD_PORT:
4747                 bnx2x_init_internal_port(bp);
4748                 /* no break */
4749
4750         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751                 bnx2x_init_internal_func(bp);
4752                 break;
4753
4754         default:
4755                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756                 break;
4757         }
4758 }
4759
4760 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4761 {
4762         int i;
4763
4764         for_each_queue(bp, i) {
4765                 struct bnx2x_fastpath *fp = &bp->fp[i];
4766
4767                 fp->bp = bp;
4768                 fp->state = BNX2X_FP_STATE_CLOSED;
4769                 fp->index = i;
4770                 fp->cl_id = BP_L_ID(bp) + i;
4771                 fp->sb_id = fp->cl_id;
4772                 DP(NETIF_MSG_IFUP,
4773                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4774                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4775                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4776                               FP_SB_ID(fp));
4777                 bnx2x_update_fpsb_idx(fp);
4778         }
4779
4780         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4781                           DEF_SB_ID);
4782         bnx2x_update_dsb_idx(bp);
4783         bnx2x_update_coalesce(bp);
4784         bnx2x_init_rx_rings(bp);
4785         bnx2x_init_tx_ring(bp);
4786         bnx2x_init_sp_ring(bp);
4787         bnx2x_init_context(bp);
4788         bnx2x_init_internal(bp, load_code);
4789         bnx2x_init_ind_table(bp);
4790         bnx2x_int_enable(bp);
4791 }
4792
4793 /* end of nic init */
4794
4795 /*
4796  * gzip service functions
4797  */
4798
4799 static int bnx2x_gunzip_init(struct bnx2x *bp)
4800 {
4801         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4802                                               &bp->gunzip_mapping);
4803         if (bp->gunzip_buf  == NULL)
4804                 goto gunzip_nomem1;
4805
4806         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4807         if (bp->strm  == NULL)
4808                 goto gunzip_nomem2;
4809
4810         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4811                                       GFP_KERNEL);
4812         if (bp->strm->workspace == NULL)
4813                 goto gunzip_nomem3;
4814
4815         return 0;
4816
4817 gunzip_nomem3:
4818         kfree(bp->strm);
4819         bp->strm = NULL;
4820
4821 gunzip_nomem2:
4822         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4823                             bp->gunzip_mapping);
4824         bp->gunzip_buf = NULL;
4825
4826 gunzip_nomem1:
4827         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4828                " un-compression\n", bp->dev->name);
4829         return -ENOMEM;
4830 }
4831
4832 static void bnx2x_gunzip_end(struct bnx2x *bp)
4833 {
4834         kfree(bp->strm->workspace);
4835
4836         kfree(bp->strm);
4837         bp->strm = NULL;
4838
4839         if (bp->gunzip_buf) {
4840                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4841                                     bp->gunzip_mapping);
4842                 bp->gunzip_buf = NULL;
4843         }
4844 }
4845
4846 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4847 {
4848         int n, rc;
4849
4850         /* check gzip header */
4851         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4852                 return -EINVAL;
4853
4854         n = 10;
4855
4856 #define FNAME                           0x8
4857
4858         if (zbuf[3] & FNAME)
4859                 while ((zbuf[n++] != 0) && (n < len));
4860
4861         bp->strm->next_in = zbuf + n;
4862         bp->strm->avail_in = len - n;
4863         bp->strm->next_out = bp->gunzip_buf;
4864         bp->strm->avail_out = FW_BUF_SIZE;
4865
4866         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4867         if (rc != Z_OK)
4868                 return rc;
4869
4870         rc = zlib_inflate(bp->strm, Z_FINISH);
4871         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4872                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4873                        bp->dev->name, bp->strm->msg);
4874
4875         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4876         if (bp->gunzip_outlen & 0x3)
4877                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4878                                     " gunzip_outlen (%d) not aligned\n",
4879                        bp->dev->name, bp->gunzip_outlen);
4880         bp->gunzip_outlen >>= 2;
4881
4882         zlib_inflateEnd(bp->strm);
4883
4884         if (rc == Z_STREAM_END)
4885                 return 0;
4886
4887         return rc;
4888 }
4889
4890 /* nic load/unload */
4891
4892 /*
4893  * General service functions
4894  */
4895
4896 /* send a NIG loopback debug packet */
4897 static void bnx2x_lb_pckt(struct bnx2x *bp)
4898 {
4899         u32 wb_write[3];
4900
4901         /* Ethernet source and destination addresses */
4902         wb_write[0] = 0x55555555;
4903         wb_write[1] = 0x55555555;
4904         wb_write[2] = 0x20;             /* SOP */
4905         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4906
4907         /* NON-IP protocol */
4908         wb_write[0] = 0x09000000;
4909         wb_write[1] = 0x55555555;
4910         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4911         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4912 }
4913
4914 /* some of the internal memories
4915  * are not directly readable from the driver
4916  * to test them we send debug packets
4917  */
4918 static int bnx2x_int_mem_test(struct bnx2x *bp)
4919 {
4920         int factor;
4921         int count, i;
4922         u32 val = 0;
4923
4924         if (CHIP_REV_IS_FPGA(bp))
4925                 factor = 120;
4926         else if (CHIP_REV_IS_EMUL(bp))
4927                 factor = 200;
4928         else
4929                 factor = 1;
4930
4931         DP(NETIF_MSG_HW, "start part1\n");
4932
4933         /* Disable inputs of parser neighbor blocks */
4934         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4935         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4936         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4937         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4938
4939         /*  Write 0 to parser credits for CFC search request */
4940         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4941
4942         /* send Ethernet packet */
4943         bnx2x_lb_pckt(bp);
4944
4945         /* TODO do i reset NIG statistic? */
4946         /* Wait until NIG register shows 1 packet of size 0x10 */
4947         count = 1000 * factor;
4948         while (count) {
4949
4950                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4951                 val = *bnx2x_sp(bp, wb_data[0]);
4952                 if (val == 0x10)
4953                         break;
4954
4955                 msleep(10);
4956                 count--;
4957         }
4958         if (val != 0x10) {
4959                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4960                 return -1;
4961         }
4962
4963         /* Wait until PRS register shows 1 packet */
4964         count = 1000 * factor;
4965         while (count) {
4966                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4967                 if (val == 1)
4968                         break;
4969
4970                 msleep(10);
4971                 count--;
4972         }
4973         if (val != 0x1) {
4974                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4975                 return -2;
4976         }
4977
4978         /* Reset and init BRB, PRS */
4979         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4980         msleep(50);
4981         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4982         msleep(50);
4983         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4984         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4985
4986         DP(NETIF_MSG_HW, "part2\n");
4987
4988         /* Disable inputs of parser neighbor blocks */
4989         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4990         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4991         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4992         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4993
4994         /* Write 0 to parser credits for CFC search request */
4995         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4996
4997         /* send 10 Ethernet packets */
4998         for (i = 0; i < 10; i++)
4999                 bnx2x_lb_pckt(bp);
5000
5001         /* Wait until NIG register shows 10 + 1
5002            packets of size 11*0x10 = 0xb0 */
5003         count = 1000 * factor;
5004         while (count) {
5005
5006                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5007                 val = *bnx2x_sp(bp, wb_data[0]);
5008                 if (val == 0xb0)
5009                         break;
5010
5011                 msleep(10);
5012                 count--;
5013         }
5014         if (val != 0xb0) {
5015                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5016                 return -3;
5017         }
5018
5019         /* Wait until PRS register shows 2 packets */
5020         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5021         if (val != 2)
5022                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5023
5024         /* Write 1 to parser credits for CFC search request */
5025         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5026
5027         /* Wait until PRS register shows 3 packets */
5028         msleep(10 * factor);
5029         /* Wait until NIG register shows 1 packet of size 0x10 */
5030         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5031         if (val != 3)
5032                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5033
5034         /* clear NIG EOP FIFO */
5035         for (i = 0; i < 11; i++)
5036                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5037         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5038         if (val != 1) {
5039                 BNX2X_ERR("clear of NIG failed\n");
5040                 return -4;
5041         }
5042
5043         /* Reset and init BRB, PRS, NIG */
5044         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5045         msleep(50);
5046         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5047         msleep(50);
5048         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5049         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5050 #ifndef BCM_ISCSI
5051         /* set NIC mode */
5052         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5053 #endif
5054
5055         /* Enable inputs of parser neighbor blocks */
5056         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5057         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5058         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5059         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5060
5061         DP(NETIF_MSG_HW, "done\n");
5062
5063         return 0; /* OK */
5064 }
5065
5066 static void enable_blocks_attention(struct bnx2x *bp)
5067 {
5068         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5069         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5070         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5071         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5072         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5073         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5074         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5075         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5076         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5077 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5078 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5079         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5080         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5081         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5082 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5083 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5084         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5085         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5086         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5087         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5088 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5089 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5090         if (CHIP_REV_IS_FPGA(bp))
5091                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5092         else
5093                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5094         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5095         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5096         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5097 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5098 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5099         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5100         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5101 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5102         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5103 }
5104
5105
5106 static int bnx2x_init_common(struct bnx2x *bp)
5107 {
5108         u32 val, i;
5109
5110         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5111
5112         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5113         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5114
5115         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5116         if (CHIP_IS_E1H(bp))
5117                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5118
5119         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5120         msleep(30);
5121         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5122
5123         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5124         if (CHIP_IS_E1(bp)) {
5125                 /* enable HW interrupt from PXP on USDM overflow
5126                    bit 16 on INT_MASK_0 */
5127                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5128         }
5129
5130         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5131         bnx2x_init_pxp(bp);
5132
5133 #ifdef __BIG_ENDIAN
5134         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5135         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5136         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5137         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5138         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5139         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5140
5141 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5142         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5143         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5144         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5145         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5146 #endif
5147
5148         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5149 #ifdef BCM_ISCSI
5150         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5151         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5152         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5153 #endif
5154
5155         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5156                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5157
5158         /* let the HW do it's magic ... */
5159         msleep(100);
5160         /* finish PXP init */
5161         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5162         if (val != 1) {
5163                 BNX2X_ERR("PXP2 CFG failed\n");
5164                 return -EBUSY;
5165         }
5166         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5167         if (val != 1) {
5168                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5169                 return -EBUSY;
5170         }
5171
5172         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5173         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5174
5175         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5176
5177         /* clean the DMAE memory */
5178         bp->dmae_ready = 1;
5179         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5180
5181         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5182         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5183         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5184         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5185
5186         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5187         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5188         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5189         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5190
5191         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5192         /* soft reset pulse */
5193         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5194         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5195
5196 #ifdef BCM_ISCSI
5197         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5198 #endif
5199
5200         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5201         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5202         if (!CHIP_REV_IS_SLOW(bp)) {
5203                 /* enable hw interrupt from doorbell Q */
5204                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5205         }
5206
5207         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5208         if (CHIP_REV_IS_SLOW(bp)) {
5209                 /* fix for emulation and FPGA for no pause */
5210                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5211                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5212                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5213                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5214         }
5215
5216         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5217         /* set NIC mode */
5218         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5219         if (CHIP_IS_E1H(bp))
5220                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5221
5222         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5223         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5224         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5225         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5226
5227         if (CHIP_IS_E1H(bp)) {
5228                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5229                                 STORM_INTMEM_SIZE_E1H/2);
5230                 bnx2x_init_fill(bp,
5231                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5232                                 0, STORM_INTMEM_SIZE_E1H/2);
5233                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5234                                 STORM_INTMEM_SIZE_E1H/2);
5235                 bnx2x_init_fill(bp,
5236                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5237                                 0, STORM_INTMEM_SIZE_E1H/2);
5238                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5239                                 STORM_INTMEM_SIZE_E1H/2);
5240                 bnx2x_init_fill(bp,
5241                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5242                                 0, STORM_INTMEM_SIZE_E1H/2);
5243                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5244                                 STORM_INTMEM_SIZE_E1H/2);
5245                 bnx2x_init_fill(bp,
5246                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5247                                 0, STORM_INTMEM_SIZE_E1H/2);
5248         } else { /* E1 */
5249                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5250                                 STORM_INTMEM_SIZE_E1);
5251                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5252                                 STORM_INTMEM_SIZE_E1);
5253                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5254                                 STORM_INTMEM_SIZE_E1);
5255                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5256                                 STORM_INTMEM_SIZE_E1);
5257         }
5258
5259         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5260         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5261         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5262         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5263
5264         /* sync semi rtc */
5265         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5266                0x80000000);
5267         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5268                0x80000000);
5269
5270         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5271         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5272         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5273
5274         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5275         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5276                 REG_WR(bp, i, 0xc0cac01a);
5277                 /* TODO: replace with something meaningful */
5278         }
5279         if (CHIP_IS_E1H(bp))
5280                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5281         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5282
5283         if (sizeof(union cdu_context) != 1024)
5284                 /* we currently assume that a context is 1024 bytes */
5285                 printk(KERN_ALERT PFX "please adjust the size of"
5286                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5287
5288         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5289         val = (4 << 24) + (0 << 12) + 1024;
5290         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5291         if (CHIP_IS_E1(bp)) {
5292                 /* !!! fix pxp client crdit until excel update */
5293                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5294                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5295         }
5296
5297         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5298         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5299
5300         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5301         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5302
5303         /* PXPCS COMMON comes here */
5304         /* Reset PCIE errors for debug */
5305         REG_WR(bp, 0x2814, 0xffffffff);
5306         REG_WR(bp, 0x3820, 0xffffffff);
5307
5308         /* EMAC0 COMMON comes here */
5309         /* EMAC1 COMMON comes here */
5310         /* DBU COMMON comes here */
5311         /* DBG COMMON comes here */
5312
5313         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5314         if (CHIP_IS_E1H(bp)) {
5315                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5316                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5317         }
5318
5319         if (CHIP_REV_IS_SLOW(bp))
5320                 msleep(200);
5321
5322         /* finish CFC init */
5323         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5324         if (val != 1) {
5325                 BNX2X_ERR("CFC LL_INIT failed\n");
5326                 return -EBUSY;
5327         }
5328         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5329         if (val != 1) {
5330                 BNX2X_ERR("CFC AC_INIT failed\n");
5331                 return -EBUSY;
5332         }
5333         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5334         if (val != 1) {
5335                 BNX2X_ERR("CFC CAM_INIT failed\n");
5336                 return -EBUSY;
5337         }
5338         REG_WR(bp, CFC_REG_DEBUG0, 0);
5339
5340         /* read NIG statistic
5341            to see if this is our first up since powerup */
5342         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5343         val = *bnx2x_sp(bp, wb_data[0]);
5344
5345         /* do internal memory self test */
5346         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5347                 BNX2X_ERR("internal mem self test failed\n");
5348                 return -EBUSY;
5349         }
5350
5351         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5353         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5354                 /* Fan failure is indicated by SPIO 5 */
5355                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5356                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5357
5358                 /* set to active low mode */
5359                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5360                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5361                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5362                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5363
5364                 /* enable interrupt to signal the IGU */
5365                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5366                 val |= (1 << MISC_REGISTERS_SPIO_5);
5367                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5368                 break;
5369
5370         default:
5371                 break;
5372         }
5373
5374         /* clear PXP2 attentions */
5375         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5376
5377         enable_blocks_attention(bp);
5378
5379         if (!BP_NOMCP(bp)) {
5380                 bnx2x_acquire_phy_lock(bp);
5381                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5382                 bnx2x_release_phy_lock(bp);
5383         } else
5384                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5385
5386         return 0;
5387 }
5388
5389 static int bnx2x_init_port(struct bnx2x *bp)
5390 {
5391         int port = BP_PORT(bp);
5392         u32 val;
5393
5394         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5395
5396         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5397
5398         /* Port PXP comes here */
5399         /* Port PXP2 comes here */
5400 #ifdef BCM_ISCSI
5401         /* Port0  1
5402          * Port1  385 */
5403         i++;
5404         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5405         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5406         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5408
5409         /* Port0  2
5410          * Port1  386 */
5411         i++;
5412         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5413         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5414         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5416
5417         /* Port0  3
5418          * Port1  387 */
5419         i++;
5420         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5421         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5422         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5423         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5424 #endif
5425         /* Port CMs come here */
5426
5427         /* Port QM comes here */
5428 #ifdef BCM_ISCSI
5429         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5430         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5431
5432         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5433                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5434 #endif
5435         /* Port DQ comes here */
5436         /* Port BRB1 comes here */
5437         /* Port PRS comes here */
5438         /* Port TSDM comes here */
5439         /* Port CSDM comes here */
5440         /* Port USDM comes here */
5441         /* Port XSDM comes here */
5442         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5443                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5444         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5445                              port ? USEM_PORT1_END : USEM_PORT0_END);
5446         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5447                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5448         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5449                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5450         /* Port UPB comes here */
5451         /* Port XPB comes here */
5452
5453         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5454                              port ? PBF_PORT1_END : PBF_PORT0_END);
5455
5456         /* configure PBF to work without PAUSE mtu 9000 */
5457         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5458
5459         /* update threshold */
5460         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5461         /* update init credit */
5462         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5463
5464         /* probe changes */
5465         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5466         msleep(5);
5467         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5468
5469 #ifdef BCM_ISCSI
5470         /* tell the searcher where the T2 table is */
5471         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5472
5473         wb_write[0] = U64_LO(bp->t2_mapping);
5474         wb_write[1] = U64_HI(bp->t2_mapping);
5475         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5476         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5477         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5478         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5479
5480         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5481         /* Port SRCH comes here */
5482 #endif
5483         /* Port CDU comes here */
5484         /* Port CFC comes here */
5485
5486         if (CHIP_IS_E1(bp)) {
5487                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5488                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5489         }
5490         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5491                              port ? HC_PORT1_END : HC_PORT0_END);
5492
5493         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5494                                     MISC_AEU_PORT0_START,
5495                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5496         /* init aeu_mask_attn_func_0/1:
5497          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5498          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5499          *             bits 4-7 are used for "per vn group attention" */
5500         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5501                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5502
5503         /* Port PXPCS comes here */
5504         /* Port EMAC0 comes here */
5505         /* Port EMAC1 comes here */
5506         /* Port DBU comes here */
5507         /* Port DBG comes here */
5508         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5509                              port ? NIG_PORT1_END : NIG_PORT0_END);
5510
5511         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5512
5513         if (CHIP_IS_E1H(bp)) {
5514                 u32 wsum;
5515                 struct cmng_struct_per_port m_cmng_port;
5516                 int vn;
5517
5518                 /* 0x2 disable e1hov, 0x1 enable */
5519                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5520                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5521
5522                 /* Init RATE SHAPING and FAIRNESS contexts.
5523                    Initialize as if there is 10G link. */
5524                 wsum = bnx2x_calc_vn_wsum(bp);
5525                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5526                 if (IS_E1HMF(bp))
5527                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5528                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5529                                         wsum, 10000, &m_cmng_port);
5530         }
5531
5532         /* Port MCP comes here */
5533         /* Port DMAE comes here */
5534
5535         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5537         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5538                 /* add SPIO 5 to group 0 */
5539                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5540                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5542                 break;
5543
5544         default:
5545                 break;
5546         }
5547
5548         bnx2x__link_reset(bp);
5549
5550         return 0;
5551 }
5552
5553 #define ILT_PER_FUNC            (768/2)
5554 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5555 /* the phys address is shifted right 12 bits and has an added
5556    1=valid bit added to the 53rd bit
5557    then since this is a wide register(TM)
5558    we split it into two 32 bit writes
5559  */
5560 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5561 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5562 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5563 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5564
5565 #define CNIC_ILT_LINES          0
5566
5567 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5568 {
5569         int reg;
5570
5571         if (CHIP_IS_E1H(bp))
5572                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5573         else /* E1 */
5574                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5575
5576         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5577 }
5578
5579 static int bnx2x_init_func(struct bnx2x *bp)
5580 {
5581         int port = BP_PORT(bp);
5582         int func = BP_FUNC(bp);
5583         int i;
5584
5585         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5586
5587         i = FUNC_ILT_BASE(func);
5588
5589         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5590         if (CHIP_IS_E1H(bp)) {
5591                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5592                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5593         } else /* E1 */
5594                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5595                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5596
5597
5598         if (CHIP_IS_E1H(bp)) {
5599                 for (i = 0; i < 9; i++)
5600                         bnx2x_init_block(bp,
5601                                          cm_start[func][i], cm_end[func][i]);
5602
5603                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5604                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5605         }
5606
5607         /* HC init per function */
5608         if (CHIP_IS_E1H(bp)) {
5609                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5610
5611                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5612                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5613         }
5614         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5615
5616         if (CHIP_IS_E1H(bp))
5617                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5618
5619         /* Reset PCIE errors for debug */
5620         REG_WR(bp, 0x2114, 0xffffffff);
5621         REG_WR(bp, 0x2120, 0xffffffff);
5622
5623         return 0;
5624 }
5625
5626 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5627 {
5628         int i, rc = 0;
5629
5630         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5631            BP_FUNC(bp), load_code);
5632
5633         bp->dmae_ready = 0;
5634         mutex_init(&bp->dmae_mutex);
5635         bnx2x_gunzip_init(bp);
5636
5637         switch (load_code) {
5638         case FW_MSG_CODE_DRV_LOAD_COMMON:
5639                 rc = bnx2x_init_common(bp);
5640                 if (rc)
5641                         goto init_hw_err;
5642                 /* no break */
5643
5644         case FW_MSG_CODE_DRV_LOAD_PORT:
5645                 bp->dmae_ready = 1;
5646                 rc = bnx2x_init_port(bp);
5647                 if (rc)
5648                         goto init_hw_err;
5649                 /* no break */
5650
5651         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5652                 bp->dmae_ready = 1;
5653                 rc = bnx2x_init_func(bp);
5654                 if (rc)
5655                         goto init_hw_err;
5656                 break;
5657
5658         default:
5659                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5660                 break;
5661         }
5662
5663         if (!BP_NOMCP(bp)) {
5664                 int func = BP_FUNC(bp);
5665
5666                 bp->fw_drv_pulse_wr_seq =
5667                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5668                                  DRV_PULSE_SEQ_MASK);
5669                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5670                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5671                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5672         } else
5673                 bp->func_stx = 0;
5674
5675         /* this needs to be done before gunzip end */
5676         bnx2x_zero_def_sb(bp);
5677         for_each_queue(bp, i)
5678                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5679
5680 init_hw_err:
5681         bnx2x_gunzip_end(bp);
5682
5683         return rc;
5684 }
5685
5686 /* send the MCP a request, block until there is a reply */
5687 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5688 {
5689         int func = BP_FUNC(bp);
5690         u32 seq = ++bp->fw_seq;
5691         u32 rc = 0;
5692         u32 cnt = 1;
5693         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5694
5695         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5696         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5697
5698         do {
5699                 /* let the FW do it's magic ... */
5700                 msleep(delay);
5701
5702                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5703
5704                 /* Give the FW up to 2 second (200*10ms) */
5705         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5706
5707         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708            cnt*delay, rc, seq);
5709
5710         /* is this a reply to our command? */
5711         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5712                 rc &= FW_MSG_CODE_MASK;
5713
5714         } else {
5715                 /* FW BUG! */
5716                 BNX2X_ERR("FW failed to respond!\n");
5717                 bnx2x_fw_dump(bp);
5718                 rc = 0;
5719         }
5720
5721         return rc;
5722 }
5723
5724 static void bnx2x_free_mem(struct bnx2x *bp)
5725 {
5726
5727 #define BNX2X_PCI_FREE(x, y, size) \
5728         do { \
5729                 if (x) { \
5730                         pci_free_consistent(bp->pdev, size, x, y); \
5731                         x = NULL; \
5732                         y = 0; \
5733                 } \
5734         } while (0)
5735
5736 #define BNX2X_FREE(x) \
5737         do { \
5738                 if (x) { \
5739                         vfree(x); \
5740                         x = NULL; \
5741                 } \
5742         } while (0)
5743
5744         int i;
5745
5746         /* fastpath */
5747         for_each_queue(bp, i) {
5748
5749                 /* Status blocks */
5750                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5751                                bnx2x_fp(bp, i, status_blk_mapping),
5752                                sizeof(struct host_status_block) +
5753                                sizeof(struct eth_tx_db_data));
5754
5755                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5756                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5757                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5758                                bnx2x_fp(bp, i, tx_desc_mapping),
5759                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5760
5761                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5762                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5763                                bnx2x_fp(bp, i, rx_desc_mapping),
5764                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5765
5766                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5767                                bnx2x_fp(bp, i, rx_comp_mapping),
5768                                sizeof(struct eth_fast_path_rx_cqe) *
5769                                NUM_RCQ_BD);
5770
5771                 /* SGE ring */
5772                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5773                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5774                                bnx2x_fp(bp, i, rx_sge_mapping),
5775                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5776         }
5777         /* end of fastpath */
5778
5779         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5780                        sizeof(struct host_def_status_block));
5781
5782         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5783                        sizeof(struct bnx2x_slowpath));
5784
5785 #ifdef BCM_ISCSI
5786         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5787         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5788         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5789         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5790 #endif
5791         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5792
5793 #undef BNX2X_PCI_FREE
5794 #undef BNX2X_KFREE
5795 }
5796
5797 static int bnx2x_alloc_mem(struct bnx2x *bp)
5798 {
5799
5800 #define BNX2X_PCI_ALLOC(x, y, size) \
5801         do { \
5802                 x = pci_alloc_consistent(bp->pdev, size, y); \
5803                 if (x == NULL) \
5804                         goto alloc_mem_err; \
5805                 memset(x, 0, size); \
5806         } while (0)
5807
5808 #define BNX2X_ALLOC(x, size) \
5809         do { \
5810                 x = vmalloc(size); \
5811                 if (x == NULL) \
5812                         goto alloc_mem_err; \
5813                 memset(x, 0, size); \
5814         } while (0)
5815
5816         int i;
5817
5818         /* fastpath */
5819         for_each_queue(bp, i) {
5820                 bnx2x_fp(bp, i, bp) = bp;
5821
5822                 /* Status blocks */
5823                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5824                                 &bnx2x_fp(bp, i, status_blk_mapping),
5825                                 sizeof(struct host_status_block) +
5826                                 sizeof(struct eth_tx_db_data));
5827
5828                 bnx2x_fp(bp, i, hw_tx_prods) =
5829                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5830
5831                 bnx2x_fp(bp, i, tx_prods_mapping) =
5832                                 bnx2x_fp(bp, i, status_blk_mapping) +
5833                                 sizeof(struct host_status_block);
5834
5835                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5836                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5837                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5838                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5839                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5840                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5841
5842                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5843                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5844                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5845                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5846                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5847
5848                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5849                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5850                                 sizeof(struct eth_fast_path_rx_cqe) *
5851                                 NUM_RCQ_BD);
5852
5853                 /* SGE ring */
5854                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5855                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5856                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5857                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5858                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5859         }
5860         /* end of fastpath */
5861
5862         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5863                         sizeof(struct host_def_status_block));
5864
5865         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5866                         sizeof(struct bnx2x_slowpath));
5867
5868 #ifdef BCM_ISCSI
5869         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5870
5871         /* Initialize T1 */
5872         for (i = 0; i < 64*1024; i += 64) {
5873                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5874                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5875         }
5876
5877         /* allocate searcher T2 table
5878            we allocate 1/4 of alloc num for T2
5879           (which is not entered into the ILT) */
5880         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5881
5882         /* Initialize T2 */
5883         for (i = 0; i < 16*1024; i += 64)
5884                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5885
5886         /* now fixup the last line in the block to point to the next block */
5887         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5888
5889         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5890         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5891
5892         /* QM queues (128*MAX_CONN) */
5893         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5894 #endif
5895
5896         /* Slow path ring */
5897         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5898
5899         return 0;
5900
5901 alloc_mem_err:
5902         bnx2x_free_mem(bp);
5903         return -ENOMEM;
5904
5905 #undef BNX2X_PCI_ALLOC
5906 #undef BNX2X_ALLOC
5907 }
5908
5909 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5910 {
5911         int i;
5912
5913         for_each_queue(bp, i) {
5914                 struct bnx2x_fastpath *fp = &bp->fp[i];
5915
5916                 u16 bd_cons = fp->tx_bd_cons;
5917                 u16 sw_prod = fp->tx_pkt_prod;
5918                 u16 sw_cons = fp->tx_pkt_cons;
5919
5920                 while (sw_cons != sw_prod) {
5921                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5922                         sw_cons++;
5923                 }
5924         }
5925 }
5926
5927 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5928 {
5929         int i, j;
5930
5931         for_each_queue(bp, j) {
5932                 struct bnx2x_fastpath *fp = &bp->fp[j];
5933
5934                 for (i = 0; i < NUM_RX_BD; i++) {
5935                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5936                         struct sk_buff *skb = rx_buf->skb;
5937
5938                         if (skb == NULL)
5939                                 continue;
5940
5941                         pci_unmap_single(bp->pdev,
5942                                          pci_unmap_addr(rx_buf, mapping),
5943                                          bp->rx_buf_size,
5944                                          PCI_DMA_FROMDEVICE);
5945
5946                         rx_buf->skb = NULL;
5947                         dev_kfree_skb(skb);
5948                 }
5949                 if (!fp->disable_tpa)
5950                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5952                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5953         }
5954 }
5955
5956 static void bnx2x_free_skbs(struct bnx2x *bp)
5957 {
5958         bnx2x_free_tx_skbs(bp);
5959         bnx2x_free_rx_skbs(bp);
5960 }
5961
5962 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5963 {
5964         int i, offset = 1;
5965
5966         free_irq(bp->msix_table[0].vector, bp->dev);
5967         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5968            bp->msix_table[0].vector);
5969
5970         for_each_queue(bp, i) {
5971                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5972                    "state %x\n", i, bp->msix_table[i + offset].vector,
5973                    bnx2x_fp(bp, i, state));
5974
5975                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5976                         BNX2X_ERR("IRQ of fp #%d being freed while "
5977                                   "state != closed\n", i);
5978
5979                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5980         }
5981 }
5982
5983 static void bnx2x_free_irq(struct bnx2x *bp)
5984 {
5985         if (bp->flags & USING_MSIX_FLAG) {
5986                 bnx2x_free_msix_irqs(bp);
5987                 pci_disable_msix(bp->pdev);
5988                 bp->flags &= ~USING_MSIX_FLAG;
5989
5990         } else
5991                 free_irq(bp->pdev->irq, bp->dev);
5992 }
5993
5994 static int bnx2x_enable_msix(struct bnx2x *bp)
5995 {
5996         int i, rc, offset;
5997
5998         bp->msix_table[0].entry = 0;
5999         offset = 1;
6000         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6001
6002         for_each_queue(bp, i) {
6003                 int igu_vec = offset + i + BP_L_ID(bp);
6004
6005                 bp->msix_table[i + offset].entry = igu_vec;
6006                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6007                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6008         }
6009
6010         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6011                              bp->num_queues + offset);
6012         if (rc) {
6013                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6014                 return -1;
6015         }
6016         bp->flags |= USING_MSIX_FLAG;
6017
6018         return 0;
6019 }
6020
6021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6022 {
6023         int i, rc, offset = 1;
6024
6025         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6026                          bp->dev->name, bp->dev);
6027         if (rc) {
6028                 BNX2X_ERR("request sp irq failed\n");
6029                 return -EBUSY;
6030         }
6031
6032         for_each_queue(bp, i) {
6033                 rc = request_irq(bp->msix_table[i + offset].vector,
6034                                  bnx2x_msix_fp_int, 0,
6035                                  bp->dev->name, &bp->fp[i]);
6036                 if (rc) {
6037                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6038                                   i + offset, -rc);
6039                         bnx2x_free_msix_irqs(bp);
6040                         return -EBUSY;
6041                 }
6042
6043                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6044         }
6045
6046         return 0;
6047 }
6048
6049 static int bnx2x_req_irq(struct bnx2x *bp)
6050 {
6051         int rc;
6052
6053         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6054                          bp->dev->name, bp->dev);
6055         if (!rc)
6056                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6057
6058         return rc;
6059 }
6060
6061 static void bnx2x_napi_enable(struct bnx2x *bp)
6062 {
6063         int i;
6064
6065         for_each_queue(bp, i)
6066                 napi_enable(&bnx2x_fp(bp, i, napi));
6067 }
6068
6069 static void bnx2x_napi_disable(struct bnx2x *bp)
6070 {
6071         int i;
6072
6073         for_each_queue(bp, i)
6074                 napi_disable(&bnx2x_fp(bp, i, napi));
6075 }
6076
6077 static void bnx2x_netif_start(struct bnx2x *bp)
6078 {
6079         if (atomic_dec_and_test(&bp->intr_sem)) {
6080                 if (netif_running(bp->dev)) {
6081                         if (bp->state == BNX2X_STATE_OPEN)
6082                                 netif_wake_queue(bp->dev);
6083                         bnx2x_napi_enable(bp);
6084                         bnx2x_int_enable(bp);
6085                 }
6086         }
6087 }
6088
6089 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6090 {
6091         bnx2x_int_disable_sync(bp, disable_hw);
6092         if (netif_running(bp->dev)) {
6093                 bnx2x_napi_disable(bp);
6094                 netif_tx_disable(bp->dev);
6095                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096         }
6097 }
6098
6099 /*
6100  * Init service functions
6101  */
6102
6103 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6104 {
6105         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6106         int port = BP_PORT(bp);
6107
6108         /* CAM allocation
6109          * unicasts 0-31:port0 32-63:port1
6110          * multicast 64-127:port0 128-191:port1
6111          */
6112         config->hdr.length_6b = 2;
6113         config->hdr.offset = port ? 31 : 0;
6114         config->hdr.client_id = BP_CL_ID(bp);
6115         config->hdr.reserved1 = 0;
6116
6117         /* primary MAC */
6118         config->config_table[0].cam_entry.msb_mac_addr =
6119                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6120         config->config_table[0].cam_entry.middle_mac_addr =
6121                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6122         config->config_table[0].cam_entry.lsb_mac_addr =
6123                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6124         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6125         if (set)
6126                 config->config_table[0].target_table_entry.flags = 0;
6127         else
6128                 CAM_INVALIDATE(config->config_table[0]);
6129         config->config_table[0].target_table_entry.client_id = 0;
6130         config->config_table[0].target_table_entry.vlan_id = 0;
6131
6132         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6133            (set ? "setting" : "clearing"),
6134            config->config_table[0].cam_entry.msb_mac_addr,
6135            config->config_table[0].cam_entry.middle_mac_addr,
6136            config->config_table[0].cam_entry.lsb_mac_addr);
6137
6138         /* broadcast */
6139         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6140         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6141         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6142         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6143         if (set)
6144                 config->config_table[1].target_table_entry.flags =
6145                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6146         else
6147                 CAM_INVALIDATE(config->config_table[1]);
6148         config->config_table[1].target_table_entry.client_id = 0;
6149         config->config_table[1].target_table_entry.vlan_id = 0;
6150
6151         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6152                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6153                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6154 }
6155
6156 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6157 {
6158         struct mac_configuration_cmd_e1h *config =
6159                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6160
6161         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6162                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6163                 return;
6164         }
6165
6166         /* CAM allocation for E1H
6167          * unicasts: by func number
6168          * multicast: 20+FUNC*20, 20 each
6169          */
6170         config->hdr.length_6b = 1;
6171         config->hdr.offset = BP_FUNC(bp);
6172         config->hdr.client_id = BP_CL_ID(bp);
6173         config->hdr.reserved1 = 0;
6174
6175         /* primary MAC */
6176         config->config_table[0].msb_mac_addr =
6177                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6178         config->config_table[0].middle_mac_addr =
6179                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6180         config->config_table[0].lsb_mac_addr =
6181                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6182         config->config_table[0].client_id = BP_L_ID(bp);
6183         config->config_table[0].vlan_id = 0;
6184         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6185         if (set)
6186                 config->config_table[0].flags = BP_PORT(bp);
6187         else
6188                 config->config_table[0].flags =
6189                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6190
6191         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6192            (set ? "setting" : "clearing"),
6193            config->config_table[0].msb_mac_addr,
6194            config->config_table[0].middle_mac_addr,
6195            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6196
6197         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6198                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6199                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6200 }
6201
6202 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6203                              int *state_p, int poll)
6204 {
6205         /* can take a while if any port is running */
6206         int cnt = 500;
6207
6208         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6209            poll ? "polling" : "waiting", state, idx);
6210
6211         might_sleep();
6212         while (cnt--) {
6213                 if (poll) {
6214                         bnx2x_rx_int(bp->fp, 10);
6215                         /* if index is different from 0
6216                          * the reply for some commands will
6217                          * be on the non default queue
6218                          */
6219                         if (idx)
6220                                 bnx2x_rx_int(&bp->fp[idx], 10);
6221                 }
6222
6223                 mb(); /* state is changed by bnx2x_sp_event() */
6224                 if (*state_p == state)
6225                         return 0;
6226
6227                 msleep(1);
6228         }
6229
6230         /* timeout! */
6231         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6232                   poll ? "polling" : "waiting", state, idx);
6233 #ifdef BNX2X_STOP_ON_ERROR
6234         bnx2x_panic();
6235 #endif
6236
6237         return -EBUSY;
6238 }
6239
6240 static int bnx2x_setup_leading(struct bnx2x *bp)
6241 {
6242         int rc;
6243
6244         /* reset IGU state */
6245         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6246
6247         /* SETUP ramrod */
6248         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6249
6250         /* Wait for completion */
6251         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6252
6253         return rc;
6254 }
6255
6256 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6257 {
6258         /* reset IGU state */
6259         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6260
6261         /* SETUP ramrod */
6262         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6263         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6264
6265         /* Wait for completion */
6266         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6267                                  &(bp->fp[index].state), 0);
6268 }
6269
6270 static int bnx2x_poll(struct napi_struct *napi, int budget);
6271 static void bnx2x_set_rx_mode(struct net_device *dev);
6272
6273 /* must be called with rtnl_lock */
6274 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6275 {
6276         u32 load_code;
6277         int i, rc;
6278 #ifdef BNX2X_STOP_ON_ERROR
6279         if (unlikely(bp->panic))
6280                 return -EPERM;
6281 #endif
6282
6283         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6284
6285         /* Send LOAD_REQUEST command to MCP
6286            Returns the type of LOAD command:
6287            if it is the first port to be initialized
6288            common blocks should be initialized, otherwise - not
6289         */
6290         if (!BP_NOMCP(bp)) {
6291                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6292                 if (!load_code) {
6293                         BNX2X_ERR("MCP response failure, aborting\n");
6294                         return -EBUSY;
6295                 }
6296                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6297                         return -EBUSY; /* other port in diagnostic mode */
6298
6299         } else {
6300                 int port = BP_PORT(bp);
6301
6302                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6303                    load_count[0], load_count[1], load_count[2]);
6304                 load_count[0]++;
6305                 load_count[1 + port]++;
6306                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6307                    load_count[0], load_count[1], load_count[2]);
6308                 if (load_count[0] == 1)
6309                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6310                 else if (load_count[1 + port] == 1)
6311                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6312                 else
6313                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6314         }
6315
6316         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6317             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6318                 bp->port.pmf = 1;
6319         else
6320                 bp->port.pmf = 0;
6321         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6322
6323         /* if we can't use MSI-X we only need one fp,
6324          * so try to enable MSI-X with the requested number of fp's
6325          * and fallback to inta with one fp
6326          */
6327         if (use_inta) {
6328                 bp->num_queues = 1;
6329
6330         } else {
6331                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6332                         /* user requested number */
6333                         bp->num_queues = use_multi;
6334
6335                 else if (use_multi)
6336                         bp->num_queues = min_t(u32, num_online_cpus(),
6337                                                BP_MAX_QUEUES(bp));
6338                 else
6339                         bp->num_queues = 1;
6340
6341                 if (bnx2x_enable_msix(bp)) {
6342                         /* failed to enable MSI-X */
6343                         bp->num_queues = 1;
6344                         if (use_multi)
6345                                 BNX2X_ERR("Multi requested but failed"
6346                                           " to enable MSI-X\n");
6347                 }
6348         }
6349         DP(NETIF_MSG_IFUP,
6350            "set number of queues to %d\n", bp->num_queues);
6351
6352         if (bnx2x_alloc_mem(bp))
6353                 return -ENOMEM;
6354
6355         for_each_queue(bp, i)
6356                 bnx2x_fp(bp, i, disable_tpa) =
6357                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6358
6359         if (bp->flags & USING_MSIX_FLAG) {
6360                 rc = bnx2x_req_msix_irqs(bp);
6361                 if (rc) {
6362                         pci_disable_msix(bp->pdev);
6363                         goto load_error;
6364                 }
6365         } else {
6366                 bnx2x_ack_int(bp);
6367                 rc = bnx2x_req_irq(bp);
6368                 if (rc) {
6369                         BNX2X_ERR("IRQ request failed, aborting\n");
6370                         goto load_error;
6371                 }
6372         }
6373
6374         for_each_queue(bp, i)
6375                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6376                                bnx2x_poll, 128);
6377
6378         /* Initialize HW */
6379         rc = bnx2x_init_hw(bp, load_code);
6380         if (rc) {
6381                 BNX2X_ERR("HW init failed, aborting\n");
6382                 goto load_int_disable;
6383         }
6384
6385         /* Setup NIC internals and enable interrupts */
6386         bnx2x_nic_init(bp, load_code);
6387
6388         /* Send LOAD_DONE command to MCP */
6389         if (!BP_NOMCP(bp)) {
6390                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6391                 if (!load_code) {
6392                         BNX2X_ERR("MCP response failure, aborting\n");
6393                         rc = -EBUSY;
6394                         goto load_rings_free;
6395                 }
6396         }
6397
6398         bnx2x_stats_init(bp);
6399
6400         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6401
6402         /* Enable Rx interrupt handling before sending the ramrod
6403            as it's completed on Rx FP queue */
6404         bnx2x_napi_enable(bp);
6405
6406         /* Enable interrupt handling */
6407         atomic_set(&bp->intr_sem, 0);
6408
6409         rc = bnx2x_setup_leading(bp);
6410         if (rc) {
6411                 BNX2X_ERR("Setup leading failed!\n");
6412                 goto load_netif_stop;
6413         }
6414
6415         if (CHIP_IS_E1H(bp))
6416                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6417                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6418                         bp->state = BNX2X_STATE_DISABLED;
6419                 }
6420
6421         if (bp->state == BNX2X_STATE_OPEN)
6422                 for_each_nondefault_queue(bp, i) {
6423                         rc = bnx2x_setup_multi(bp, i);
6424                         if (rc)
6425                                 goto load_netif_stop;
6426                 }
6427
6428         if (CHIP_IS_E1(bp))
6429                 bnx2x_set_mac_addr_e1(bp, 1);
6430         else
6431                 bnx2x_set_mac_addr_e1h(bp, 1);
6432
6433         if (bp->port.pmf)
6434                 bnx2x_initial_phy_init(bp);
6435
6436         /* Start fast path */
6437         switch (load_mode) {
6438         case LOAD_NORMAL:
6439                 /* Tx queue should be only reenabled */
6440                 netif_wake_queue(bp->dev);
6441                 bnx2x_set_rx_mode(bp->dev);
6442                 break;
6443
6444         case LOAD_OPEN:
6445                 netif_start_queue(bp->dev);
6446                 bnx2x_set_rx_mode(bp->dev);
6447                 if (bp->flags & USING_MSIX_FLAG)
6448                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6449                                bp->dev->name);
6450                 break;
6451
6452         case LOAD_DIAG:
6453                 bnx2x_set_rx_mode(bp->dev);
6454                 bp->state = BNX2X_STATE_DIAG;
6455                 break;
6456
6457         default:
6458                 break;
6459         }
6460
6461         if (!bp->port.pmf)
6462                 bnx2x__link_status_update(bp);
6463
6464         /* start the timer */
6465         mod_timer(&bp->timer, jiffies + bp->current_interval);
6466
6467
6468         return 0;
6469
6470 load_netif_stop:
6471         bnx2x_napi_disable(bp);
6472 load_rings_free:
6473         /* Free SKBs, SGEs, TPA pool and driver internals */
6474         bnx2x_free_skbs(bp);
6475         for_each_queue(bp, i)
6476                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6477 load_int_disable:
6478         bnx2x_int_disable_sync(bp, 1);
6479         /* Release IRQs */
6480         bnx2x_free_irq(bp);
6481 load_error:
6482         bnx2x_free_mem(bp);
6483         bp->port.pmf = 0;
6484
6485         /* TBD we really need to reset the chip
6486            if we want to recover from this */
6487         return rc;
6488 }
6489
6490 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6491 {
6492         int rc;
6493
6494         /* halt the connection */
6495         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6496         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6497
6498         /* Wait for completion */
6499         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6500                                &(bp->fp[index].state), 1);
6501         if (rc) /* timeout */
6502                 return rc;
6503
6504         /* delete cfc entry */
6505         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6506
6507         /* Wait for completion */
6508         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6509                                &(bp->fp[index].state), 1);
6510         return rc;
6511 }
6512
6513 static int bnx2x_stop_leading(struct bnx2x *bp)
6514 {
6515         u16 dsb_sp_prod_idx;
6516         /* if the other port is handling traffic,
6517            this can take a lot of time */
6518         int cnt = 500;
6519         int rc;
6520
6521         might_sleep();
6522
6523         /* Send HALT ramrod */
6524         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6525         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6526
6527         /* Wait for completion */
6528         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6529                                &(bp->fp[0].state), 1);
6530         if (rc) /* timeout */
6531                 return rc;
6532
6533         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6534
6535         /* Send PORT_DELETE ramrod */
6536         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6537
6538         /* Wait for completion to arrive on default status block
6539            we are going to reset the chip anyway
6540            so there is not much to do if this times out
6541          */
6542         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6543                 if (!cnt) {
6544                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6545                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6546                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6547 #ifdef BNX2X_STOP_ON_ERROR
6548                         bnx2x_panic();
6549 #else
6550                         rc = -EBUSY;
6551 #endif
6552                         break;
6553                 }
6554                 cnt--;
6555                 msleep(1);
6556         }
6557         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6558         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6559
6560         return rc;
6561 }
6562
6563 static void bnx2x_reset_func(struct bnx2x *bp)
6564 {
6565         int port = BP_PORT(bp);
6566         int func = BP_FUNC(bp);
6567         int base, i;
6568
6569         /* Configure IGU */
6570         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6571         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6572
6573         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6574
6575         /* Clear ILT */
6576         base = FUNC_ILT_BASE(func);
6577         for (i = base; i < base + ILT_PER_FUNC; i++)
6578                 bnx2x_ilt_wr(bp, i, 0);
6579 }
6580
6581 static void bnx2x_reset_port(struct bnx2x *bp)
6582 {
6583         int port = BP_PORT(bp);
6584         u32 val;
6585
6586         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6587
6588         /* Do not rcv packets to BRB */
6589         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6590         /* Do not direct rcv packets that are not for MCP to the BRB */
6591         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6592                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6593
6594         /* Configure AEU */
6595         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6596
6597         msleep(100);
6598         /* Check for BRB port occupancy */
6599         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6600         if (val)
6601                 DP(NETIF_MSG_IFDOWN,
6602                    "BRB1 is not empty  %d blocks are occupied\n", val);
6603
6604         /* TODO: Close Doorbell port? */
6605 }
6606
6607 static void bnx2x_reset_common(struct bnx2x *bp)
6608 {
6609         /* reset_common */
6610         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6611                0xd3ffff7f);
6612         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6613 }
6614
6615 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6616 {
6617         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6618            BP_FUNC(bp), reset_code);
6619
6620         switch (reset_code) {
6621         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6622                 bnx2x_reset_port(bp);
6623                 bnx2x_reset_func(bp);
6624                 bnx2x_reset_common(bp);
6625                 break;
6626
6627         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6628                 bnx2x_reset_port(bp);
6629                 bnx2x_reset_func(bp);
6630                 break;
6631
6632         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6633                 bnx2x_reset_func(bp);
6634                 break;
6635
6636         default:
6637                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6638                 break;
6639         }
6640 }
6641
6642 /* must be called with rtnl_lock */
6643 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6644 {
6645         int port = BP_PORT(bp);
6646         u32 reset_code = 0;
6647         int i, cnt, rc;
6648
6649         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6650
6651         bp->rx_mode = BNX2X_RX_MODE_NONE;
6652         bnx2x_set_storm_rx_mode(bp);
6653
6654         bnx2x_netif_stop(bp, 1);
6655         if (!netif_running(bp->dev))
6656                 bnx2x_napi_disable(bp);
6657         del_timer_sync(&bp->timer);
6658         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6659                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6660         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6661
6662         /* Wait until tx fast path tasks complete */
6663         for_each_queue(bp, i) {
6664                 struct bnx2x_fastpath *fp = &bp->fp[i];
6665
6666                 cnt = 1000;
6667                 smp_rmb();
6668                 while (BNX2X_HAS_TX_WORK(fp)) {
6669
6670                         bnx2x_tx_int(fp, 1000);
6671                         if (!cnt) {
6672                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6673                                           i);
6674 #ifdef BNX2X_STOP_ON_ERROR
6675                                 bnx2x_panic();
6676                                 return -EBUSY;
6677 #else
6678                                 break;
6679 #endif
6680                         }
6681                         cnt--;
6682                         msleep(1);
6683                         smp_rmb();
6684                 }
6685         }
6686         /* Give HW time to discard old tx messages */
6687         msleep(1);
6688
6689         /* Release IRQs */
6690         bnx2x_free_irq(bp);
6691
6692         if (CHIP_IS_E1(bp)) {
6693                 struct mac_configuration_cmd *config =
6694                                                 bnx2x_sp(bp, mcast_config);
6695
6696                 bnx2x_set_mac_addr_e1(bp, 0);
6697
6698                 for (i = 0; i < config->hdr.length_6b; i++)
6699                         CAM_INVALIDATE(config->config_table[i]);
6700
6701                 config->hdr.length_6b = i;
6702                 if (CHIP_REV_IS_SLOW(bp))
6703                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6704                 else
6705                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6706                 config->hdr.client_id = BP_CL_ID(bp);
6707                 config->hdr.reserved1 = 0;
6708
6709                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6711                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712
6713         } else { /* E1H */
6714                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6715
6716                 bnx2x_set_mac_addr_e1h(bp, 0);
6717
6718                 for (i = 0; i < MC_HASH_SIZE; i++)
6719                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6720         }
6721
6722         if (unload_mode == UNLOAD_NORMAL)
6723                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6724
6725         else if (bp->flags & NO_WOL_FLAG) {
6726                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6727                 if (CHIP_IS_E1H(bp))
6728                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6729
6730         } else if (bp->wol) {
6731                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6732                 u8 *mac_addr = bp->dev->dev_addr;
6733                 u32 val;
6734                 /* The mac address is written to entries 1-4 to
6735                    preserve entry 0 which is used by the PMF */
6736                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6737
6738                 val = (mac_addr[0] << 8) | mac_addr[1];
6739                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6740
6741                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6742                       (mac_addr[4] << 8) | mac_addr[5];
6743                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6744
6745                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6746
6747         } else
6748                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6749
6750         /* Close multi and leading connections
6751            Completions for ramrods are collected in a synchronous way */
6752         for_each_nondefault_queue(bp, i)
6753                 if (bnx2x_stop_multi(bp, i))
6754                         goto unload_error;
6755
6756         rc = bnx2x_stop_leading(bp);
6757         if (rc) {
6758                 BNX2X_ERR("Stop leading failed!\n");
6759 #ifdef BNX2X_STOP_ON_ERROR
6760                 return -EBUSY;
6761 #else
6762                 goto unload_error;
6763 #endif
6764         }
6765
6766 unload_error:
6767         if (!BP_NOMCP(bp))
6768                 reset_code = bnx2x_fw_command(bp, reset_code);
6769         else {
6770                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6771                    load_count[0], load_count[1], load_count[2]);
6772                 load_count[0]--;
6773                 load_count[1 + port]--;
6774                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6775                    load_count[0], load_count[1], load_count[2]);
6776                 if (load_count[0] == 0)
6777                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6778                 else if (load_count[1 + port] == 0)
6779                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6780                 else
6781                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6782         }
6783
6784         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6785             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6786                 bnx2x__link_reset(bp);
6787
6788         /* Reset the chip */
6789         bnx2x_reset_chip(bp, reset_code);
6790
6791         /* Report UNLOAD_DONE to MCP */
6792         if (!BP_NOMCP(bp))
6793                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6794         bp->port.pmf = 0;
6795
6796         /* Free SKBs, SGEs, TPA pool and driver internals */
6797         bnx2x_free_skbs(bp);
6798         for_each_queue(bp, i)
6799                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6800         bnx2x_free_mem(bp);
6801
6802         bp->state = BNX2X_STATE_CLOSED;
6803
6804         netif_carrier_off(bp->dev);
6805
6806         return 0;
6807 }
6808
6809 static void bnx2x_reset_task(struct work_struct *work)
6810 {
6811         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6812
6813 #ifdef BNX2X_STOP_ON_ERROR
6814         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6815                   " so reset not done to allow debug dump,\n"
6816          KERN_ERR " you will need to reboot when done\n");
6817         return;
6818 #endif
6819
6820         rtnl_lock();
6821
6822         if (!netif_running(bp->dev))
6823                 goto reset_task_exit;
6824
6825         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6826         bnx2x_nic_load(bp, LOAD_NORMAL);
6827
6828 reset_task_exit:
6829         rtnl_unlock();
6830 }
6831
6832 /* end of nic load/unload */
6833
6834 /* ethtool_ops */
6835
6836 /*
6837  * Init service functions
6838  */
6839
6840 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6841 {
6842         u32 val;
6843
6844         /* Check if there is any driver already loaded */
6845         val = REG_RD(bp, MISC_REG_UNPREPARED);
6846         if (val == 0x1) {
6847                 /* Check if it is the UNDI driver
6848                  * UNDI driver initializes CID offset for normal bell to 0x7
6849                  */
6850                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6851                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6852                 if (val == 0x7)
6853                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6854                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6855
6856                 if (val == 0x7) {
6857                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6858                         /* save our func */
6859                         int func = BP_FUNC(bp);
6860                         u32 swap_en;
6861                         u32 swap_val;
6862
6863                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6864
6865                         /* try unload UNDI on port 0 */
6866                         bp->func = 0;
6867                         bp->fw_seq =
6868                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6869                                 DRV_MSG_SEQ_NUMBER_MASK);
6870                         reset_code = bnx2x_fw_command(bp, reset_code);
6871
6872                         /* if UNDI is loaded on the other port */
6873                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6874
6875                                 /* send "DONE" for previous unload */
6876                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6877
6878                                 /* unload UNDI on port 1 */
6879                                 bp->func = 1;
6880                                 bp->fw_seq =
6881                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6882                                         DRV_MSG_SEQ_NUMBER_MASK);
6883                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6884
6885                                 bnx2x_fw_command(bp, reset_code);
6886                         }
6887
6888                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6889                                     HC_REG_CONFIG_0), 0x1000);
6890
6891                         /* close input traffic and wait for it */
6892                         /* Do not rcv packets to BRB */
6893                         REG_WR(bp,
6894                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6895                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6896                         /* Do not direct rcv packets that are not for MCP to
6897                          * the BRB */
6898                         REG_WR(bp,
6899                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6900                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6901                         /* clear AEU */
6902                         REG_WR(bp,
6903                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6904                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6905                         msleep(10);
6906
6907                         /* save NIG port swap info */
6908                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6909                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6910                         /* reset device */
6911                         REG_WR(bp,
6912                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6913                                0xd3ffffff);
6914                         REG_WR(bp,
6915                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6916                                0x1403);
6917                         /* take the NIG out of reset and restore swap values */
6918                         REG_WR(bp,
6919                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6920                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6921                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6922                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6923
6924                         /* send unload done to the MCP */
6925                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6926
6927                         /* restore our func and fw_seq */
6928                         bp->func = func;
6929                         bp->fw_seq =
6930                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6931                                 DRV_MSG_SEQ_NUMBER_MASK);
6932                 }
6933         }
6934 }
6935
6936 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6937 {
6938         u32 val, val2, val3, val4, id;
6939         u16 pmc;
6940
6941         /* Get the chip revision id and number. */
6942         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6943         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6944         id = ((val & 0xffff) << 16);
6945         val = REG_RD(bp, MISC_REG_CHIP_REV);
6946         id |= ((val & 0xf) << 12);
6947         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6948         id |= ((val & 0xff) << 4);
6949         REG_RD(bp, MISC_REG_BOND_ID);
6950         id |= (val & 0xf);
6951         bp->common.chip_id = id;
6952         bp->link_params.chip_id = bp->common.chip_id;
6953         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6954
6955         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6956         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6957                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6958         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6959                        bp->common.flash_size, bp->common.flash_size);
6960
6961         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6962         bp->link_params.shmem_base = bp->common.shmem_base;
6963         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6964
6965         if (!bp->common.shmem_base ||
6966             (bp->common.shmem_base < 0xA0000) ||
6967             (bp->common.shmem_base >= 0xC0000)) {
6968                 BNX2X_DEV_INFO("MCP not active\n");
6969                 bp->flags |= NO_MCP_FLAG;
6970                 return;
6971         }
6972
6973         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6974         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6975                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6976                 BNX2X_ERR("BAD MCP validity signature\n");
6977
6978         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6979         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6980
6981         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6982                        bp->common.hw_config, bp->common.board);
6983
6984         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6985                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6986                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6987
6988         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6989         bp->common.bc_ver = val;
6990         BNX2X_DEV_INFO("bc_ver %X\n", val);
6991         if (val < BNX2X_BC_VER) {
6992                 /* for now only warn
6993                  * later we might need to enforce this */
6994                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6995                           " please upgrade BC\n", BNX2X_BC_VER, val);
6996         }
6997
6998         if (BP_E1HVN(bp) == 0) {
6999                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7000                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7001         } else {
7002                 /* no WOL capability for E1HVN != 0 */
7003                 bp->flags |= NO_WOL_FLAG;
7004         }
7005         BNX2X_DEV_INFO("%sWoL capable\n",
7006                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7007
7008         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7009         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7010         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7011         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7012
7013         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7014                val, val2, val3, val4);
7015 }
7016
7017 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7018                                                     u32 switch_cfg)
7019 {
7020         int port = BP_PORT(bp);
7021         u32 ext_phy_type;
7022
7023         switch (switch_cfg) {
7024         case SWITCH_CFG_1G:
7025                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7026
7027                 ext_phy_type =
7028                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7029                 switch (ext_phy_type) {
7030                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7031                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7032                                        ext_phy_type);
7033
7034                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7035                                                SUPPORTED_10baseT_Full |
7036                                                SUPPORTED_100baseT_Half |
7037                                                SUPPORTED_100baseT_Full |
7038                                                SUPPORTED_1000baseT_Full |
7039                                                SUPPORTED_2500baseX_Full |
7040                                                SUPPORTED_TP |
7041                                                SUPPORTED_FIBRE |
7042                                                SUPPORTED_Autoneg |
7043                                                SUPPORTED_Pause |
7044                                                SUPPORTED_Asym_Pause);
7045                         break;
7046
7047                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7048                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7049                                        ext_phy_type);
7050
7051                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7052                                                SUPPORTED_10baseT_Full |
7053                                                SUPPORTED_100baseT_Half |
7054                                                SUPPORTED_100baseT_Full |
7055                                                SUPPORTED_1000baseT_Full |
7056                                                SUPPORTED_TP |
7057                                                SUPPORTED_FIBRE |
7058                                                SUPPORTED_Autoneg |
7059                                                SUPPORTED_Pause |
7060                                                SUPPORTED_Asym_Pause);
7061                         break;
7062
7063                 default:
7064                         BNX2X_ERR("NVRAM config error. "
7065                                   "BAD SerDes ext_phy_config 0x%x\n",
7066                                   bp->link_params.ext_phy_config);
7067                         return;
7068                 }
7069
7070                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7071                                            port*0x10);
7072                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7073                 break;
7074
7075         case SWITCH_CFG_10G:
7076                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7077
7078                 ext_phy_type =
7079                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7080                 switch (ext_phy_type) {
7081                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7082                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7083                                        ext_phy_type);
7084
7085                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7086                                                SUPPORTED_10baseT_Full |
7087                                                SUPPORTED_100baseT_Half |
7088                                                SUPPORTED_100baseT_Full |
7089                                                SUPPORTED_1000baseT_Full |
7090                                                SUPPORTED_2500baseX_Full |
7091                                                SUPPORTED_10000baseT_Full |
7092                                                SUPPORTED_TP |
7093                                                SUPPORTED_FIBRE |
7094                                                SUPPORTED_Autoneg |
7095                                                SUPPORTED_Pause |
7096                                                SUPPORTED_Asym_Pause);
7097                         break;
7098
7099                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7100                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7101                                        ext_phy_type);
7102
7103                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7104                                                SUPPORTED_FIBRE |
7105                                                SUPPORTED_Pause |
7106                                                SUPPORTED_Asym_Pause);
7107                         break;
7108
7109                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7110                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7111                                        ext_phy_type);
7112
7113                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7114                                                SUPPORTED_1000baseT_Full |
7115                                                SUPPORTED_FIBRE |
7116                                                SUPPORTED_Pause |
7117                                                SUPPORTED_Asym_Pause);
7118                         break;
7119
7120                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7121                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7122                                        ext_phy_type);
7123
7124                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125                                                SUPPORTED_1000baseT_Full |
7126                                                SUPPORTED_FIBRE |
7127                                                SUPPORTED_Autoneg |
7128                                                SUPPORTED_Pause |
7129                                                SUPPORTED_Asym_Pause);
7130                         break;
7131
7132                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7133                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7134                                        ext_phy_type);
7135
7136                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7137                                                SUPPORTED_2500baseX_Full |
7138                                                SUPPORTED_1000baseT_Full |
7139                                                SUPPORTED_FIBRE |
7140                                                SUPPORTED_Autoneg |
7141                                                SUPPORTED_Pause |
7142                                                SUPPORTED_Asym_Pause);
7143                         break;
7144
7145                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7146                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7147                                        ext_phy_type);
7148
7149                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7150                                                SUPPORTED_TP |
7151                                                SUPPORTED_Autoneg |
7152                                                SUPPORTED_Pause |
7153                                                SUPPORTED_Asym_Pause);
7154                         break;
7155
7156                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7157                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7158                                   bp->link_params.ext_phy_config);
7159                         break;
7160
7161                 default:
7162                         BNX2X_ERR("NVRAM config error. "
7163                                   "BAD XGXS ext_phy_config 0x%x\n",
7164                                   bp->link_params.ext_phy_config);
7165                         return;
7166                 }
7167
7168                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7169                                            port*0x18);
7170                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7171
7172                 break;
7173
7174         default:
7175                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7176                           bp->port.link_config);
7177                 return;
7178         }
7179         bp->link_params.phy_addr = bp->port.phy_addr;
7180
7181         /* mask what we support according to speed_cap_mask */
7182         if (!(bp->link_params.speed_cap_mask &
7183                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7184                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7185
7186         if (!(bp->link_params.speed_cap_mask &
7187                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7188                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7189
7190         if (!(bp->link_params.speed_cap_mask &
7191                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7192                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7193
7194         if (!(bp->link_params.speed_cap_mask &
7195                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7196                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7197
7198         if (!(bp->link_params.speed_cap_mask &
7199                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7200                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7201                                         SUPPORTED_1000baseT_Full);
7202
7203         if (!(bp->link_params.speed_cap_mask &
7204                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7205                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7206
7207         if (!(bp->link_params.speed_cap_mask &
7208                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7209                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7210
7211         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7212 }
7213
7214 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7215 {
7216         bp->link_params.req_duplex = DUPLEX_FULL;
7217
7218         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7219         case PORT_FEATURE_LINK_SPEED_AUTO:
7220                 if (bp->port.supported & SUPPORTED_Autoneg) {
7221                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7222                         bp->port.advertising = bp->port.supported;
7223                 } else {
7224                         u32 ext_phy_type =
7225                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7226
7227                         if ((ext_phy_type ==
7228                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7229                             (ext_phy_type ==
7230                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7231                                 /* force 10G, no AN */
7232                                 bp->link_params.req_line_speed = SPEED_10000;
7233                                 bp->port.advertising =
7234                                                 (ADVERTISED_10000baseT_Full |
7235                                                  ADVERTISED_FIBRE);
7236                                 break;
7237                         }
7238                         BNX2X_ERR("NVRAM config error. "
7239                                   "Invalid link_config 0x%x"
7240                                   "  Autoneg not supported\n",
7241                                   bp->port.link_config);
7242                         return;
7243                 }
7244                 break;
7245
7246         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7247                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7248                         bp->link_params.req_line_speed = SPEED_10;
7249                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7250                                                 ADVERTISED_TP);
7251                 } else {
7252                         BNX2X_ERR("NVRAM config error. "
7253                                   "Invalid link_config 0x%x"
7254                                   "  speed_cap_mask 0x%x\n",
7255                                   bp->port.link_config,
7256                                   bp->link_params.speed_cap_mask);
7257                         return;
7258                 }
7259                 break;
7260
7261         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7262                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7263                         bp->link_params.req_line_speed = SPEED_10;
7264                         bp->link_params.req_duplex = DUPLEX_HALF;
7265                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7266                                                 ADVERTISED_TP);
7267                 } else {
7268                         BNX2X_ERR("NVRAM config error. "
7269                                   "Invalid link_config 0x%x"
7270                                   "  speed_cap_mask 0x%x\n",
7271                                   bp->port.link_config,
7272                                   bp->link_params.speed_cap_mask);
7273                         return;
7274                 }
7275                 break;
7276
7277         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7278                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7279                         bp->link_params.req_line_speed = SPEED_100;
7280                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7281                                                 ADVERTISED_TP);
7282                 } else {
7283                         BNX2X_ERR("NVRAM config error. "
7284                                   "Invalid link_config 0x%x"
7285                                   "  speed_cap_mask 0x%x\n",
7286                                   bp->port.link_config,
7287                                   bp->link_params.speed_cap_mask);
7288                         return;
7289                 }
7290                 break;
7291
7292         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7293                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7294                         bp->link_params.req_line_speed = SPEED_100;
7295                         bp->link_params.req_duplex = DUPLEX_HALF;
7296                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7297                                                 ADVERTISED_TP);
7298                 } else {
7299                         BNX2X_ERR("NVRAM config error. "
7300                                   "Invalid link_config 0x%x"
7301                                   "  speed_cap_mask 0x%x\n",
7302                                   bp->port.link_config,
7303                                   bp->link_params.speed_cap_mask);
7304                         return;
7305                 }
7306                 break;
7307
7308         case PORT_FEATURE_LINK_SPEED_1G:
7309                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7310                         bp->link_params.req_line_speed = SPEED_1000;
7311                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7312                                                 ADVERTISED_TP);
7313                 } else {
7314                         BNX2X_ERR("NVRAM config error. "
7315                                   "Invalid link_config 0x%x"
7316                                   "  speed_cap_mask 0x%x\n",
7317                                   bp->port.link_config,
7318                                   bp->link_params.speed_cap_mask);
7319                         return;
7320                 }
7321                 break;
7322
7323         case PORT_FEATURE_LINK_SPEED_2_5G:
7324                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7325                         bp->link_params.req_line_speed = SPEED_2500;
7326                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7327                                                 ADVERTISED_TP);
7328                 } else {
7329                         BNX2X_ERR("NVRAM config error. "
7330                                   "Invalid link_config 0x%x"
7331                                   "  speed_cap_mask 0x%x\n",
7332                                   bp->port.link_config,
7333                                   bp->link_params.speed_cap_mask);
7334                         return;
7335                 }
7336                 break;
7337
7338         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7339         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7340         case PORT_FEATURE_LINK_SPEED_10G_KR:
7341                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7342                         bp->link_params.req_line_speed = SPEED_10000;
7343                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7344                                                 ADVERTISED_FIBRE);
7345                 } else {
7346                         BNX2X_ERR("NVRAM config error. "
7347                                   "Invalid link_config 0x%x"
7348                                   "  speed_cap_mask 0x%x\n",
7349                                   bp->port.link_config,
7350                                   bp->link_params.speed_cap_mask);
7351                         return;
7352                 }
7353                 break;
7354
7355         default:
7356                 BNX2X_ERR("NVRAM config error. "
7357                           "BAD link speed link_config 0x%x\n",
7358                           bp->port.link_config);
7359                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7360                 bp->port.advertising = bp->port.supported;
7361                 break;
7362         }
7363
7364         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7365                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7366         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7367             !(bp->port.supported & SUPPORTED_Autoneg))
7368                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7369
7370         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7371                        "  advertising 0x%x\n",
7372                        bp->link_params.req_line_speed,
7373                        bp->link_params.req_duplex,
7374                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7375 }
7376
7377 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7378 {
7379         int port = BP_PORT(bp);
7380         u32 val, val2;
7381
7382         bp->link_params.bp = bp;
7383         bp->link_params.port = port;
7384
7385         bp->link_params.serdes_config =
7386                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7387         bp->link_params.lane_config =
7388                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7389         bp->link_params.ext_phy_config =
7390                 SHMEM_RD(bp,
7391                          dev_info.port_hw_config[port].external_phy_config);
7392         bp->link_params.speed_cap_mask =
7393                 SHMEM_RD(bp,
7394                          dev_info.port_hw_config[port].speed_capability_mask);
7395
7396         bp->port.link_config =
7397                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7398
7399         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7400              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7401                        "  link_config 0x%08x\n",
7402                        bp->link_params.serdes_config,
7403                        bp->link_params.lane_config,
7404                        bp->link_params.ext_phy_config,
7405                        bp->link_params.speed_cap_mask, bp->port.link_config);
7406
7407         bp->link_params.switch_cfg = (bp->port.link_config &
7408                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7409         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7410
7411         bnx2x_link_settings_requested(bp);
7412
7413         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7414         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7415         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7416         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7417         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7418         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7419         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7420         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7421         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7422         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7423 }
7424
7425 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7426 {
7427         int func = BP_FUNC(bp);
7428         u32 val, val2;
7429         int rc = 0;
7430
7431         bnx2x_get_common_hwinfo(bp);
7432
7433         bp->e1hov = 0;
7434         bp->e1hmf = 0;
7435         if (CHIP_IS_E1H(bp)) {
7436                 bp->mf_config =
7437                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7438
7439                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7440                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7441                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7442
7443                         bp->e1hov = val;
7444                         bp->e1hmf = 1;
7445                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7446                                        "(0x%04x)\n",
7447                                        func, bp->e1hov, bp->e1hov);
7448                 } else {
7449                         BNX2X_DEV_INFO("Single function mode\n");
7450                         if (BP_E1HVN(bp)) {
7451                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7452                                           "  aborting\n", func);
7453                                 rc = -EPERM;
7454                         }
7455                 }
7456         }
7457
7458         if (!BP_NOMCP(bp)) {
7459                 bnx2x_get_port_hwinfo(bp);
7460
7461                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7462                               DRV_MSG_SEQ_NUMBER_MASK);
7463                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7464         }
7465
7466         if (IS_E1HMF(bp)) {
7467                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7468                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7469                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7470                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7471                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7472                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7473                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7474                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7475                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7476                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7477                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7478                                ETH_ALEN);
7479                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7480                                ETH_ALEN);
7481                 }
7482
7483                 return rc;
7484         }
7485
7486         if (BP_NOMCP(bp)) {
7487                 /* only supposed to happen on emulation/FPGA */
7488                 BNX2X_ERR("warning random MAC workaround active\n");
7489                 random_ether_addr(bp->dev->dev_addr);
7490                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7491         }
7492
7493         return rc;
7494 }
7495
7496 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7497 {
7498         int func = BP_FUNC(bp);
7499         int rc;
7500
7501         /* Disable interrupt handling until HW is initialized */
7502         atomic_set(&bp->intr_sem, 1);
7503
7504         mutex_init(&bp->port.phy_mutex);
7505
7506         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7507         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7508
7509         rc = bnx2x_get_hwinfo(bp);
7510
7511         /* need to reset chip if undi was active */
7512         if (!BP_NOMCP(bp))
7513                 bnx2x_undi_unload(bp);
7514
7515         if (CHIP_REV_IS_FPGA(bp))
7516                 printk(KERN_ERR PFX "FPGA detected\n");
7517
7518         if (BP_NOMCP(bp) && (func == 0))
7519                 printk(KERN_ERR PFX
7520                        "MCP disabled, must load devices in order!\n");
7521
7522         /* Set TPA flags */
7523         if (disable_tpa) {
7524                 bp->flags &= ~TPA_ENABLE_FLAG;
7525                 bp->dev->features &= ~NETIF_F_LRO;
7526         } else {
7527                 bp->flags |= TPA_ENABLE_FLAG;
7528                 bp->dev->features |= NETIF_F_LRO;
7529         }
7530
7531
7532         bp->tx_ring_size = MAX_TX_AVAIL;
7533         bp->rx_ring_size = MAX_RX_AVAIL;
7534
7535         bp->rx_csum = 1;
7536         bp->rx_offset = 0;
7537
7538         bp->tx_ticks = 50;
7539         bp->rx_ticks = 25;
7540
7541         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7542         bp->current_interval = (poll ? poll : bp->timer_interval);
7543
7544         init_timer(&bp->timer);
7545         bp->timer.expires = jiffies + bp->current_interval;
7546         bp->timer.data = (unsigned long) bp;
7547         bp->timer.function = bnx2x_timer;
7548
7549         return rc;
7550 }
7551
7552 /*
7553  * ethtool service functions
7554  */
7555
7556 /* All ethtool functions called with rtnl_lock */
7557
7558 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7559 {
7560         struct bnx2x *bp = netdev_priv(dev);
7561
7562         cmd->supported = bp->port.supported;
7563         cmd->advertising = bp->port.advertising;
7564
7565         if (netif_carrier_ok(dev)) {
7566                 cmd->speed = bp->link_vars.line_speed;
7567                 cmd->duplex = bp->link_vars.duplex;
7568         } else {
7569                 cmd->speed = bp->link_params.req_line_speed;
7570                 cmd->duplex = bp->link_params.req_duplex;
7571         }
7572         if (IS_E1HMF(bp)) {
7573                 u16 vn_max_rate;
7574
7575                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7576                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7577                 if (vn_max_rate < cmd->speed)
7578                         cmd->speed = vn_max_rate;
7579         }
7580
7581         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7582                 u32 ext_phy_type =
7583                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7584
7585                 switch (ext_phy_type) {
7586                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7587                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7588                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7589                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7590                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7591                         cmd->port = PORT_FIBRE;
7592                         break;
7593
7594                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7595                         cmd->port = PORT_TP;
7596                         break;
7597
7598                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7599                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7600                                   bp->link_params.ext_phy_config);
7601                         break;
7602
7603                 default:
7604                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7605                            bp->link_params.ext_phy_config);
7606                         break;
7607                 }
7608         } else
7609                 cmd->port = PORT_TP;
7610
7611         cmd->phy_address = bp->port.phy_addr;
7612         cmd->transceiver = XCVR_INTERNAL;
7613
7614         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7615                 cmd->autoneg = AUTONEG_ENABLE;
7616         else
7617                 cmd->autoneg = AUTONEG_DISABLE;
7618
7619         cmd->maxtxpkt = 0;
7620         cmd->maxrxpkt = 0;
7621
7622         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7623            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7624            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7625            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7626            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7627            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7628            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7629
7630         return 0;
7631 }
7632
7633 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7634 {
7635         struct bnx2x *bp = netdev_priv(dev);
7636         u32 advertising;
7637
7638         if (IS_E1HMF(bp))
7639                 return 0;
7640
7641         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7642            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7643            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7644            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7645            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7646            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7647            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7648
7649         if (cmd->autoneg == AUTONEG_ENABLE) {
7650                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7651                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7652                         return -EINVAL;
7653                 }
7654
7655                 /* advertise the requested speed and duplex if supported */
7656                 cmd->advertising &= bp->port.supported;
7657
7658                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7659                 bp->link_params.req_duplex = DUPLEX_FULL;
7660                 bp->port.advertising |= (ADVERTISED_Autoneg |
7661                                          cmd->advertising);
7662
7663         } else { /* forced speed */
7664                 /* advertise the requested speed and duplex if supported */
7665                 switch (cmd->speed) {
7666                 case SPEED_10:
7667                         if (cmd->duplex == DUPLEX_FULL) {
7668                                 if (!(bp->port.supported &
7669                                       SUPPORTED_10baseT_Full)) {
7670                                         DP(NETIF_MSG_LINK,
7671                                            "10M full not supported\n");
7672                                         return -EINVAL;
7673                                 }
7674
7675                                 advertising = (ADVERTISED_10baseT_Full |
7676                                                ADVERTISED_TP);
7677                         } else {
7678                                 if (!(bp->port.supported &
7679                                       SUPPORTED_10baseT_Half)) {
7680                                         DP(NETIF_MSG_LINK,
7681                                            "10M half not supported\n");
7682                                         return -EINVAL;
7683                                 }
7684
7685                                 advertising = (ADVERTISED_10baseT_Half |
7686                                                ADVERTISED_TP);
7687                         }
7688                         break;
7689
7690                 case SPEED_100:
7691                         if (cmd->duplex == DUPLEX_FULL) {
7692                                 if (!(bp->port.supported &
7693                                                 SUPPORTED_100baseT_Full)) {
7694                                         DP(NETIF_MSG_LINK,
7695                                            "100M full not supported\n");
7696                                         return -EINVAL;
7697                                 }
7698
7699                                 advertising = (ADVERTISED_100baseT_Full |
7700                                                ADVERTISED_TP);
7701                         } else {
7702                                 if (!(bp->port.supported &
7703                                                 SUPPORTED_100baseT_Half)) {
7704                                         DP(NETIF_MSG_LINK,
7705                                            "100M half not supported\n");
7706                                         return -EINVAL;
7707                                 }
7708
7709                                 advertising = (ADVERTISED_100baseT_Half |
7710                                                ADVERTISED_TP);
7711                         }
7712                         break;
7713
7714                 case SPEED_1000:
7715                         if (cmd->duplex != DUPLEX_FULL) {
7716                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7717                                 return -EINVAL;
7718                         }
7719
7720                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7721                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7722                                 return -EINVAL;
7723                         }
7724
7725                         advertising = (ADVERTISED_1000baseT_Full |
7726                                        ADVERTISED_TP);
7727                         break;
7728
7729                 case SPEED_2500:
7730                         if (cmd->duplex != DUPLEX_FULL) {
7731                                 DP(NETIF_MSG_LINK,
7732                                    "2.5G half not supported\n");
7733                                 return -EINVAL;
7734                         }
7735
7736                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7737                                 DP(NETIF_MSG_LINK,
7738                                    "2.5G full not supported\n");
7739                                 return -EINVAL;
7740                         }
7741
7742                         advertising = (ADVERTISED_2500baseX_Full |
7743                                        ADVERTISED_TP);
7744                         break;
7745
7746                 case SPEED_10000:
7747                         if (cmd->duplex != DUPLEX_FULL) {
7748                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7749                                 return -EINVAL;
7750                         }
7751
7752                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7753                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7754                                 return -EINVAL;
7755                         }
7756
7757                         advertising = (ADVERTISED_10000baseT_Full |
7758                                        ADVERTISED_FIBRE);
7759                         break;
7760
7761                 default:
7762                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7763                         return -EINVAL;
7764                 }
7765
7766                 bp->link_params.req_line_speed = cmd->speed;
7767                 bp->link_params.req_duplex = cmd->duplex;
7768                 bp->port.advertising = advertising;
7769         }
7770
7771         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7772            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7773            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7774            bp->port.advertising);
7775
7776         if (netif_running(dev)) {
7777                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7778                 bnx2x_link_set(bp);
7779         }
7780
7781         return 0;
7782 }
7783
7784 #define PHY_FW_VER_LEN                  10
7785
7786 static void bnx2x_get_drvinfo(struct net_device *dev,
7787                               struct ethtool_drvinfo *info)
7788 {
7789         struct bnx2x *bp = netdev_priv(dev);
7790         u8 phy_fw_ver[PHY_FW_VER_LEN];
7791
7792         strcpy(info->driver, DRV_MODULE_NAME);
7793         strcpy(info->version, DRV_MODULE_VERSION);
7794
7795         phy_fw_ver[0] = '\0';
7796         if (bp->port.pmf) {
7797                 bnx2x_acquire_phy_lock(bp);
7798                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7799                                              (bp->state != BNX2X_STATE_CLOSED),
7800                                              phy_fw_ver, PHY_FW_VER_LEN);
7801                 bnx2x_release_phy_lock(bp);
7802         }
7803
7804         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7805                  (bp->common.bc_ver & 0xff0000) >> 16,
7806                  (bp->common.bc_ver & 0xff00) >> 8,
7807                  (bp->common.bc_ver & 0xff),
7808                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7809         strcpy(info->bus_info, pci_name(bp->pdev));
7810         info->n_stats = BNX2X_NUM_STATS;
7811         info->testinfo_len = BNX2X_NUM_TESTS;
7812         info->eedump_len = bp->common.flash_size;
7813         info->regdump_len = 0;
7814 }
7815
7816 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7817 {
7818         struct bnx2x *bp = netdev_priv(dev);
7819
7820         if (bp->flags & NO_WOL_FLAG) {
7821                 wol->supported = 0;
7822                 wol->wolopts = 0;
7823         } else {
7824                 wol->supported = WAKE_MAGIC;
7825                 if (bp->wol)
7826                         wol->wolopts = WAKE_MAGIC;
7827                 else
7828                         wol->wolopts = 0;
7829         }
7830         memset(&wol->sopass, 0, sizeof(wol->sopass));
7831 }
7832
7833 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7834 {
7835         struct bnx2x *bp = netdev_priv(dev);
7836
7837         if (wol->wolopts & ~WAKE_MAGIC)
7838                 return -EINVAL;
7839
7840         if (wol->wolopts & WAKE_MAGIC) {
7841                 if (bp->flags & NO_WOL_FLAG)
7842                         return -EINVAL;
7843
7844                 bp->wol = 1;
7845         } else
7846                 bp->wol = 0;
7847
7848         return 0;
7849 }
7850
7851 static u32 bnx2x_get_msglevel(struct net_device *dev)
7852 {
7853         struct bnx2x *bp = netdev_priv(dev);
7854
7855         return bp->msglevel;
7856 }
7857
7858 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7859 {
7860         struct bnx2x *bp = netdev_priv(dev);
7861
7862         if (capable(CAP_NET_ADMIN))
7863                 bp->msglevel = level;
7864 }
7865
7866 static int bnx2x_nway_reset(struct net_device *dev)
7867 {
7868         struct bnx2x *bp = netdev_priv(dev);
7869
7870         if (!bp->port.pmf)
7871                 return 0;
7872
7873         if (netif_running(dev)) {
7874                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7875                 bnx2x_link_set(bp);
7876         }
7877
7878         return 0;
7879 }
7880
7881 static int bnx2x_get_eeprom_len(struct net_device *dev)
7882 {
7883         struct bnx2x *bp = netdev_priv(dev);
7884
7885         return bp->common.flash_size;
7886 }
7887
7888 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7889 {
7890         int port = BP_PORT(bp);
7891         int count, i;
7892         u32 val = 0;
7893
7894         /* adjust timeout for emulation/FPGA */
7895         count = NVRAM_TIMEOUT_COUNT;
7896         if (CHIP_REV_IS_SLOW(bp))
7897                 count *= 100;
7898
7899         /* request access to nvram interface */
7900         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7901                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7902
7903         for (i = 0; i < count*10; i++) {
7904                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7905                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7906                         break;
7907
7908                 udelay(5);
7909         }
7910
7911         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7912                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7913                 return -EBUSY;
7914         }
7915
7916         return 0;
7917 }
7918
7919 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7920 {
7921         int port = BP_PORT(bp);
7922         int count, i;
7923         u32 val = 0;
7924
7925         /* adjust timeout for emulation/FPGA */
7926         count = NVRAM_TIMEOUT_COUNT;
7927         if (CHIP_REV_IS_SLOW(bp))
7928                 count *= 100;
7929
7930         /* relinquish nvram interface */
7931         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7932                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7933
7934         for (i = 0; i < count*10; i++) {
7935                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7936                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7937                         break;
7938
7939                 udelay(5);
7940         }
7941
7942         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7943                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7944                 return -EBUSY;
7945         }
7946
7947         return 0;
7948 }
7949
7950 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7951 {
7952         u32 val;
7953
7954         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7955
7956         /* enable both bits, even on read */
7957         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7958                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7959                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7960 }
7961
7962 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7963 {
7964         u32 val;
7965
7966         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7967
7968         /* disable both bits, even after read */
7969         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7970                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7971                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7972 }
7973
7974 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7975                                   u32 cmd_flags)
7976 {
7977         int count, i, rc;
7978         u32 val;
7979
7980         /* build the command word */
7981         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7982
7983         /* need to clear DONE bit separately */
7984         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7985
7986         /* address of the NVRAM to read from */
7987         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7988                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7989
7990         /* issue a read command */
7991         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7992
7993         /* adjust timeout for emulation/FPGA */
7994         count = NVRAM_TIMEOUT_COUNT;
7995         if (CHIP_REV_IS_SLOW(bp))
7996                 count *= 100;
7997
7998         /* wait for completion */
7999         *ret_val = 0;
8000         rc = -EBUSY;
8001         for (i = 0; i < count; i++) {
8002                 udelay(5);
8003                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8004
8005                 if (val & MCPR_NVM_COMMAND_DONE) {
8006                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8007                         /* we read nvram data in cpu order
8008                          * but ethtool sees it as an array of bytes
8009                          * converting to big-endian will do the work */
8010                         val = cpu_to_be32(val);
8011                         *ret_val = val;
8012                         rc = 0;
8013                         break;
8014                 }
8015         }
8016
8017         return rc;
8018 }
8019
8020 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8021                             int buf_size)
8022 {
8023         int rc;
8024         u32 cmd_flags;
8025         u32 val;
8026
8027         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8028                 DP(BNX2X_MSG_NVM,
8029                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8030                    offset, buf_size);
8031                 return -EINVAL;
8032         }
8033
8034         if (offset + buf_size > bp->common.flash_size) {
8035                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8036                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8037                    offset, buf_size, bp->common.flash_size);
8038                 return -EINVAL;
8039         }
8040
8041         /* request access to nvram interface */
8042         rc = bnx2x_acquire_nvram_lock(bp);
8043         if (rc)
8044                 return rc;
8045
8046         /* enable access to nvram interface */
8047         bnx2x_enable_nvram_access(bp);
8048
8049         /* read the first word(s) */
8050         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8051         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8052                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8053                 memcpy(ret_buf, &val, 4);
8054
8055                 /* advance to the next dword */
8056                 offset += sizeof(u32);
8057                 ret_buf += sizeof(u32);
8058                 buf_size -= sizeof(u32);
8059                 cmd_flags = 0;
8060         }
8061
8062         if (rc == 0) {
8063                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8064                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8065                 memcpy(ret_buf, &val, 4);
8066         }
8067
8068         /* disable access to nvram interface */
8069         bnx2x_disable_nvram_access(bp);
8070         bnx2x_release_nvram_lock(bp);
8071
8072         return rc;
8073 }
8074
8075 static int bnx2x_get_eeprom(struct net_device *dev,
8076                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8077 {
8078         struct bnx2x *bp = netdev_priv(dev);
8079         int rc;
8080
8081         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8082            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8083            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8084            eeprom->len, eeprom->len);
8085
8086         /* parameters already validated in ethtool_get_eeprom */
8087
8088         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8089
8090         return rc;
8091 }
8092
8093 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8094                                    u32 cmd_flags)
8095 {
8096         int count, i, rc;
8097
8098         /* build the command word */
8099         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8100
8101         /* need to clear DONE bit separately */
8102         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8103
8104         /* write the data */
8105         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8106
8107         /* address of the NVRAM to write to */
8108         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8109                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8110
8111         /* issue the write command */
8112         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8113
8114         /* adjust timeout for emulation/FPGA */
8115         count = NVRAM_TIMEOUT_COUNT;
8116         if (CHIP_REV_IS_SLOW(bp))
8117                 count *= 100;
8118
8119         /* wait for completion */
8120         rc = -EBUSY;
8121         for (i = 0; i < count; i++) {
8122                 udelay(5);
8123                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8124                 if (val & MCPR_NVM_COMMAND_DONE) {
8125                         rc = 0;
8126                         break;
8127                 }
8128         }
8129
8130         return rc;
8131 }
8132
8133 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8134
8135 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8136                               int buf_size)
8137 {
8138         int rc;
8139         u32 cmd_flags;
8140         u32 align_offset;
8141         u32 val;
8142
8143         if (offset + buf_size > bp->common.flash_size) {
8144                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8145                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8146                    offset, buf_size, bp->common.flash_size);
8147                 return -EINVAL;
8148         }
8149
8150         /* request access to nvram interface */
8151         rc = bnx2x_acquire_nvram_lock(bp);
8152         if (rc)
8153                 return rc;
8154
8155         /* enable access to nvram interface */
8156         bnx2x_enable_nvram_access(bp);
8157
8158         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8159         align_offset = (offset & ~0x03);
8160         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8161
8162         if (rc == 0) {
8163                 val &= ~(0xff << BYTE_OFFSET(offset));
8164                 val |= (*data_buf << BYTE_OFFSET(offset));
8165
8166                 /* nvram data is returned as an array of bytes
8167                  * convert it back to cpu order */
8168                 val = be32_to_cpu(val);
8169
8170                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8171                                              cmd_flags);
8172         }
8173
8174         /* disable access to nvram interface */
8175         bnx2x_disable_nvram_access(bp);
8176         bnx2x_release_nvram_lock(bp);
8177
8178         return rc;
8179 }
8180
8181 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8182                              int buf_size)
8183 {
8184         int rc;
8185         u32 cmd_flags;
8186         u32 val;
8187         u32 written_so_far;
8188
8189         if (buf_size == 1)      /* ethtool */
8190                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8191
8192         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8193                 DP(BNX2X_MSG_NVM,
8194                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8195                    offset, buf_size);
8196                 return -EINVAL;
8197         }
8198
8199         if (offset + buf_size > bp->common.flash_size) {
8200                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8201                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8202                    offset, buf_size, bp->common.flash_size);
8203                 return -EINVAL;
8204         }
8205
8206         /* request access to nvram interface */
8207         rc = bnx2x_acquire_nvram_lock(bp);
8208         if (rc)
8209                 return rc;
8210
8211         /* enable access to nvram interface */
8212         bnx2x_enable_nvram_access(bp);
8213
8214         written_so_far = 0;
8215         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8216         while ((written_so_far < buf_size) && (rc == 0)) {
8217                 if (written_so_far == (buf_size - sizeof(u32)))
8218                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8219                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8220                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8221                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8222                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8223
8224                 memcpy(&val, data_buf, 4);
8225
8226                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8227
8228                 /* advance to the next dword */
8229                 offset += sizeof(u32);
8230                 data_buf += sizeof(u32);
8231                 written_so_far += sizeof(u32);
8232                 cmd_flags = 0;
8233         }
8234
8235         /* disable access to nvram interface */
8236         bnx2x_disable_nvram_access(bp);
8237         bnx2x_release_nvram_lock(bp);
8238
8239         return rc;
8240 }
8241
8242 static int bnx2x_set_eeprom(struct net_device *dev,
8243                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8244 {
8245         struct bnx2x *bp = netdev_priv(dev);
8246         int rc;
8247
8248         if (!netif_running(dev))
8249                 return -EAGAIN;
8250
8251         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8252            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8253            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8254            eeprom->len, eeprom->len);
8255
8256         /* parameters already validated in ethtool_set_eeprom */
8257
8258         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8259         if (eeprom->magic == 0x00504859)
8260                 if (bp->port.pmf) {
8261
8262                         bnx2x_acquire_phy_lock(bp);
8263                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8264                                              bp->link_params.ext_phy_config,
8265                                              (bp->state != BNX2X_STATE_CLOSED),
8266                                              eebuf, eeprom->len);
8267                         if ((bp->state == BNX2X_STATE_OPEN) ||
8268                             (bp->state == BNX2X_STATE_DISABLED)) {
8269                                 rc |= bnx2x_link_reset(&bp->link_params,
8270                                                        &bp->link_vars);
8271                                 rc |= bnx2x_phy_init(&bp->link_params,
8272                                                      &bp->link_vars);
8273                         }
8274                         bnx2x_release_phy_lock(bp);
8275
8276                 } else /* Only the PMF can access the PHY */
8277                         return -EINVAL;
8278         else
8279                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8280
8281         return rc;
8282 }
8283
8284 static int bnx2x_get_coalesce(struct net_device *dev,
8285                               struct ethtool_coalesce *coal)
8286 {
8287         struct bnx2x *bp = netdev_priv(dev);
8288
8289         memset(coal, 0, sizeof(struct ethtool_coalesce));
8290
8291         coal->rx_coalesce_usecs = bp->rx_ticks;
8292         coal->tx_coalesce_usecs = bp->tx_ticks;
8293
8294         return 0;
8295 }
8296
8297 static int bnx2x_set_coalesce(struct net_device *dev,
8298                               struct ethtool_coalesce *coal)
8299 {
8300         struct bnx2x *bp = netdev_priv(dev);
8301
8302         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8303         if (bp->rx_ticks > 3000)
8304                 bp->rx_ticks = 3000;
8305
8306         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8307         if (bp->tx_ticks > 0x3000)
8308                 bp->tx_ticks = 0x3000;
8309
8310         if (netif_running(dev))
8311                 bnx2x_update_coalesce(bp);
8312
8313         return 0;
8314 }
8315
8316 static void bnx2x_get_ringparam(struct net_device *dev,
8317                                 struct ethtool_ringparam *ering)
8318 {
8319         struct bnx2x *bp = netdev_priv(dev);
8320
8321         ering->rx_max_pending = MAX_RX_AVAIL;
8322         ering->rx_mini_max_pending = 0;
8323         ering->rx_jumbo_max_pending = 0;
8324
8325         ering->rx_pending = bp->rx_ring_size;
8326         ering->rx_mini_pending = 0;
8327         ering->rx_jumbo_pending = 0;
8328
8329         ering->tx_max_pending = MAX_TX_AVAIL;
8330         ering->tx_pending = bp->tx_ring_size;
8331 }
8332
8333 static int bnx2x_set_ringparam(struct net_device *dev,
8334                                struct ethtool_ringparam *ering)
8335 {
8336         struct bnx2x *bp = netdev_priv(dev);
8337         int rc = 0;
8338
8339         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8340             (ering->tx_pending > MAX_TX_AVAIL) ||
8341             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8342                 return -EINVAL;
8343
8344         bp->rx_ring_size = ering->rx_pending;
8345         bp->tx_ring_size = ering->tx_pending;
8346
8347         if (netif_running(dev)) {
8348                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8349                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8350         }
8351
8352         return rc;
8353 }
8354
8355 static void bnx2x_get_pauseparam(struct net_device *dev,
8356                                  struct ethtool_pauseparam *epause)
8357 {
8358         struct bnx2x *bp = netdev_priv(dev);
8359
8360         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8361                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8362
8363         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8364                             BNX2X_FLOW_CTRL_RX);
8365         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8366                             BNX2X_FLOW_CTRL_TX);
8367
8368         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8369            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8370            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8371 }
8372
8373 static int bnx2x_set_pauseparam(struct net_device *dev,
8374                                 struct ethtool_pauseparam *epause)
8375 {
8376         struct bnx2x *bp = netdev_priv(dev);
8377
8378         if (IS_E1HMF(bp))
8379                 return 0;
8380
8381         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8382            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8383            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8384
8385         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8386
8387         if (epause->rx_pause)
8388                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8389
8390         if (epause->tx_pause)
8391                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8392
8393         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8394                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8395
8396         if (epause->autoneg) {
8397                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8398                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8399                         return -EINVAL;
8400                 }
8401
8402                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8403                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8404         }
8405
8406         DP(NETIF_MSG_LINK,
8407            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8408
8409         if (netif_running(dev)) {
8410                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8411                 bnx2x_link_set(bp);
8412         }
8413
8414         return 0;
8415 }
8416
8417 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8418 {
8419         struct bnx2x *bp = netdev_priv(dev);
8420         int changed = 0;
8421         int rc = 0;
8422
8423         /* TPA requires Rx CSUM offloading */
8424         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8425                 if (!(dev->features & NETIF_F_LRO)) {
8426                         dev->features |= NETIF_F_LRO;
8427                         bp->flags |= TPA_ENABLE_FLAG;
8428                         changed = 1;
8429                 }
8430
8431         } else if (dev->features & NETIF_F_LRO) {
8432                 dev->features &= ~NETIF_F_LRO;
8433                 bp->flags &= ~TPA_ENABLE_FLAG;
8434                 changed = 1;
8435         }
8436
8437         if (changed && netif_running(dev)) {
8438                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8439                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8440         }
8441
8442         return rc;
8443 }
8444
8445 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8446 {
8447         struct bnx2x *bp = netdev_priv(dev);
8448
8449         return bp->rx_csum;
8450 }
8451
8452 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8453 {
8454         struct bnx2x *bp = netdev_priv(dev);
8455         int rc = 0;
8456
8457         bp->rx_csum = data;
8458
8459         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8460            TPA'ed packets will be discarded due to wrong TCP CSUM */
8461         if (!data) {
8462                 u32 flags = ethtool_op_get_flags(dev);
8463
8464                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8465         }
8466
8467         return rc;
8468 }
8469
8470 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8471 {
8472         if (data) {
8473                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8474                 dev->features |= NETIF_F_TSO6;
8475         } else {
8476                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8477                 dev->features &= ~NETIF_F_TSO6;
8478         }
8479
8480         return 0;
8481 }
8482
8483 static const struct {
8484         char string[ETH_GSTRING_LEN];
8485 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8486         { "register_test (offline)" },
8487         { "memory_test (offline)" },
8488         { "loopback_test (offline)" },
8489         { "nvram_test (online)" },
8490         { "interrupt_test (online)" },
8491         { "link_test (online)" },
8492         { "idle check (online)" },
8493         { "MC errors (online)" }
8494 };
8495
8496 static int bnx2x_self_test_count(struct net_device *dev)
8497 {
8498         return BNX2X_NUM_TESTS;
8499 }
8500
8501 static int bnx2x_test_registers(struct bnx2x *bp)
8502 {
8503         int idx, i, rc = -ENODEV;
8504         u32 wr_val = 0;
8505         int port = BP_PORT(bp);
8506         static const struct {
8507                 u32  offset0;
8508                 u32  offset1;
8509                 u32  mask;
8510         } reg_tbl[] = {
8511 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8512                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8513                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8514                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8515                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8516                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8517                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8518                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8519                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8520                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8521 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8522                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8523                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8524                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8525                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8526                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8527                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8528                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8529                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8530                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8531 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8532                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8533                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8534                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8535                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8536                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8537                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8538                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8539                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8540                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8541 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8542                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8543                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8544                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8545                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8546                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8547                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8548                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8549
8550                 { 0xffffffff, 0, 0x00000000 }
8551         };
8552
8553         if (!netif_running(bp->dev))
8554                 return rc;
8555
8556         /* Repeat the test twice:
8557            First by writing 0x00000000, second by writing 0xffffffff */
8558         for (idx = 0; idx < 2; idx++) {
8559
8560                 switch (idx) {
8561                 case 0:
8562                         wr_val = 0;
8563                         break;
8564                 case 1:
8565                         wr_val = 0xffffffff;
8566                         break;
8567                 }
8568
8569                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8570                         u32 offset, mask, save_val, val;
8571
8572                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8573                         mask = reg_tbl[i].mask;
8574
8575                         save_val = REG_RD(bp, offset);
8576
8577                         REG_WR(bp, offset, wr_val);
8578                         val = REG_RD(bp, offset);
8579
8580                         /* Restore the original register's value */
8581                         REG_WR(bp, offset, save_val);
8582
8583                         /* verify that value is as expected value */
8584                         if ((val & mask) != (wr_val & mask))
8585                                 goto test_reg_exit;
8586                 }
8587         }
8588
8589         rc = 0;
8590
8591 test_reg_exit:
8592         return rc;
8593 }
8594
8595 static int bnx2x_test_memory(struct bnx2x *bp)
8596 {
8597         int i, j, rc = -ENODEV;
8598         u32 val;
8599         static const struct {
8600                 u32 offset;
8601                 int size;
8602         } mem_tbl[] = {
8603                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8604                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8605                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8606                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8607                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8608                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8609                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8610
8611                 { 0xffffffff, 0 }
8612         };
8613         static const struct {
8614                 char *name;
8615                 u32 offset;
8616                 u32 e1_mask;
8617                 u32 e1h_mask;
8618         } prty_tbl[] = {
8619                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8620                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8621                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8622                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8623                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8624                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8625
8626                 { NULL, 0xffffffff, 0, 0 }
8627         };
8628
8629         if (!netif_running(bp->dev))
8630                 return rc;
8631
8632         /* Go through all the memories */
8633         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8634                 for (j = 0; j < mem_tbl[i].size; j++)
8635                         REG_RD(bp, mem_tbl[i].offset + j*4);
8636
8637         /* Check the parity status */
8638         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8639                 val = REG_RD(bp, prty_tbl[i].offset);
8640                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8641                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8642                         DP(NETIF_MSG_HW,
8643                            "%s is 0x%x\n", prty_tbl[i].name, val);
8644                         goto test_mem_exit;
8645                 }
8646         }
8647
8648         rc = 0;
8649
8650 test_mem_exit:
8651         return rc;
8652 }
8653
8654 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8655 {
8656         int cnt = 1000;
8657
8658         if (link_up)
8659                 while (bnx2x_link_test(bp) && cnt--)
8660                         msleep(10);
8661 }
8662
8663 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8664 {
8665         unsigned int pkt_size, num_pkts, i;
8666         struct sk_buff *skb;
8667         unsigned char *packet;
8668         struct bnx2x_fastpath *fp = &bp->fp[0];
8669         u16 tx_start_idx, tx_idx;
8670         u16 rx_start_idx, rx_idx;
8671         u16 pkt_prod;
8672         struct sw_tx_bd *tx_buf;
8673         struct eth_tx_bd *tx_bd;
8674         dma_addr_t mapping;
8675         union eth_rx_cqe *cqe;
8676         u8 cqe_fp_flags;
8677         struct sw_rx_bd *rx_buf;
8678         u16 len;
8679         int rc = -ENODEV;
8680
8681         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8682                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8683                 bnx2x_acquire_phy_lock(bp);
8684                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8685                 bnx2x_release_phy_lock(bp);
8686
8687         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8688                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8689                 bnx2x_acquire_phy_lock(bp);
8690                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8691                 bnx2x_release_phy_lock(bp);
8692                 /* wait until link state is restored */
8693                 bnx2x_wait_for_link(bp, link_up);
8694
8695         } else
8696                 return -EINVAL;
8697
8698         pkt_size = 1514;
8699         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8700         if (!skb) {
8701                 rc = -ENOMEM;
8702                 goto test_loopback_exit;
8703         }
8704         packet = skb_put(skb, pkt_size);
8705         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8706         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8707         for (i = ETH_HLEN; i < pkt_size; i++)
8708                 packet[i] = (unsigned char) (i & 0xff);
8709
8710         num_pkts = 0;
8711         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8712         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8713
8714         pkt_prod = fp->tx_pkt_prod++;
8715         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8716         tx_buf->first_bd = fp->tx_bd_prod;
8717         tx_buf->skb = skb;
8718
8719         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8720         mapping = pci_map_single(bp->pdev, skb->data,
8721                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8722         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8723         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8724         tx_bd->nbd = cpu_to_le16(1);
8725         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8726         tx_bd->vlan = cpu_to_le16(pkt_prod);
8727         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8728                                        ETH_TX_BD_FLAGS_END_BD);
8729         tx_bd->general_data = ((UNICAST_ADDRESS <<
8730                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8731
8732         fp->hw_tx_prods->bds_prod =
8733                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8734         mb(); /* FW restriction: must not reorder writing nbd and packets */
8735         fp->hw_tx_prods->packets_prod =
8736                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8737         DOORBELL(bp, FP_IDX(fp), 0);
8738
8739         mmiowb();
8740
8741         num_pkts++;
8742         fp->tx_bd_prod++;
8743         bp->dev->trans_start = jiffies;
8744
8745         udelay(100);
8746
8747         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8748         if (tx_idx != tx_start_idx + num_pkts)
8749                 goto test_loopback_exit;
8750
8751         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8752         if (rx_idx != rx_start_idx + num_pkts)
8753                 goto test_loopback_exit;
8754
8755         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8756         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8757         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8758                 goto test_loopback_rx_exit;
8759
8760         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8761         if (len != pkt_size)
8762                 goto test_loopback_rx_exit;
8763
8764         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8765         skb = rx_buf->skb;
8766         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8767         for (i = ETH_HLEN; i < pkt_size; i++)
8768                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8769                         goto test_loopback_rx_exit;
8770
8771         rc = 0;
8772
8773 test_loopback_rx_exit:
8774
8775         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8776         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8777         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8778         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8779
8780         /* Update producers */
8781         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8782                              fp->rx_sge_prod);
8783         mmiowb(); /* keep prod updates ordered */
8784
8785 test_loopback_exit:
8786         bp->link_params.loopback_mode = LOOPBACK_NONE;
8787
8788         return rc;
8789 }
8790
8791 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8792 {
8793         int rc = 0;
8794
8795         if (!netif_running(bp->dev))
8796                 return BNX2X_LOOPBACK_FAILED;
8797
8798         bnx2x_netif_stop(bp, 1);
8799
8800         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8801                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8802                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8803         }
8804
8805         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8806                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8807                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8808         }
8809
8810         bnx2x_netif_start(bp);
8811
8812         return rc;
8813 }
8814
8815 #define CRC32_RESIDUAL                  0xdebb20e3
8816
8817 static int bnx2x_test_nvram(struct bnx2x *bp)
8818 {
8819         static const struct {
8820                 int offset;
8821                 int size;
8822         } nvram_tbl[] = {
8823                 {     0,  0x14 }, /* bootstrap */
8824                 {  0x14,  0xec }, /* dir */
8825                 { 0x100, 0x350 }, /* manuf_info */
8826                 { 0x450,  0xf0 }, /* feature_info */
8827                 { 0x640,  0x64 }, /* upgrade_key_info */
8828                 { 0x6a4,  0x64 },
8829                 { 0x708,  0x70 }, /* manuf_key_info */
8830                 { 0x778,  0x70 },
8831                 {     0,     0 }
8832         };
8833         u32 buf[0x350 / 4];
8834         u8 *data = (u8 *)buf;
8835         int i, rc;
8836         u32 magic, csum;
8837
8838         rc = bnx2x_nvram_read(bp, 0, data, 4);
8839         if (rc) {
8840                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8841                 goto test_nvram_exit;
8842         }
8843
8844         magic = be32_to_cpu(buf[0]);
8845         if (magic != 0x669955aa) {
8846                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8847                 rc = -ENODEV;
8848                 goto test_nvram_exit;
8849         }
8850
8851         for (i = 0; nvram_tbl[i].size; i++) {
8852
8853                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8854                                       nvram_tbl[i].size);
8855                 if (rc) {
8856                         DP(NETIF_MSG_PROBE,
8857                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8858                         goto test_nvram_exit;
8859                 }
8860
8861                 csum = ether_crc_le(nvram_tbl[i].size, data);
8862                 if (csum != CRC32_RESIDUAL) {
8863                         DP(NETIF_MSG_PROBE,
8864                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8865                         rc = -ENODEV;
8866                         goto test_nvram_exit;
8867                 }
8868         }
8869
8870 test_nvram_exit:
8871         return rc;
8872 }
8873
8874 static int bnx2x_test_intr(struct bnx2x *bp)
8875 {
8876         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8877         int i, rc;
8878
8879         if (!netif_running(bp->dev))
8880                 return -ENODEV;
8881
8882         config->hdr.length_6b = 0;
8883         config->hdr.offset = 0;
8884         config->hdr.client_id = BP_CL_ID(bp);
8885         config->hdr.reserved1 = 0;
8886
8887         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8888                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8889                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8890         if (rc == 0) {
8891                 bp->set_mac_pending++;
8892                 for (i = 0; i < 10; i++) {
8893                         if (!bp->set_mac_pending)
8894                                 break;
8895                         msleep_interruptible(10);
8896                 }
8897                 if (i == 10)
8898                         rc = -ENODEV;
8899         }
8900
8901         return rc;
8902 }
8903
8904 static void bnx2x_self_test(struct net_device *dev,
8905                             struct ethtool_test *etest, u64 *buf)
8906 {
8907         struct bnx2x *bp = netdev_priv(dev);
8908
8909         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8910
8911         if (!netif_running(dev))
8912                 return;
8913
8914         /* offline tests are not supported in MF mode */
8915         if (IS_E1HMF(bp))
8916                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8917
8918         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8919                 u8 link_up;
8920
8921                 link_up = bp->link_vars.link_up;
8922                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8923                 bnx2x_nic_load(bp, LOAD_DIAG);
8924                 /* wait until link state is restored */
8925                 bnx2x_wait_for_link(bp, link_up);
8926
8927                 if (bnx2x_test_registers(bp) != 0) {
8928                         buf[0] = 1;
8929                         etest->flags |= ETH_TEST_FL_FAILED;
8930                 }
8931                 if (bnx2x_test_memory(bp) != 0) {
8932                         buf[1] = 1;
8933                         etest->flags |= ETH_TEST_FL_FAILED;
8934                 }
8935                 buf[2] = bnx2x_test_loopback(bp, link_up);
8936                 if (buf[2] != 0)
8937                         etest->flags |= ETH_TEST_FL_FAILED;
8938
8939                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8940                 bnx2x_nic_load(bp, LOAD_NORMAL);
8941                 /* wait until link state is restored */
8942                 bnx2x_wait_for_link(bp, link_up);
8943         }
8944         if (bnx2x_test_nvram(bp) != 0) {
8945                 buf[3] = 1;
8946                 etest->flags |= ETH_TEST_FL_FAILED;
8947         }
8948         if (bnx2x_test_intr(bp) != 0) {
8949                 buf[4] = 1;
8950                 etest->flags |= ETH_TEST_FL_FAILED;
8951         }
8952         if (bp->port.pmf)
8953                 if (bnx2x_link_test(bp) != 0) {
8954                         buf[5] = 1;
8955                         etest->flags |= ETH_TEST_FL_FAILED;
8956                 }
8957         buf[7] = bnx2x_mc_assert(bp);
8958         if (buf[7] != 0)
8959                 etest->flags |= ETH_TEST_FL_FAILED;
8960
8961 #ifdef BNX2X_EXTRA_DEBUG
8962         bnx2x_panic_dump(bp);
8963 #endif
8964 }
8965
8966 static const struct {
8967         long offset;
8968         int size;
8969         u32 flags;
8970 #define STATS_FLAGS_PORT                1
8971 #define STATS_FLAGS_FUNC                2
8972         u8 string[ETH_GSTRING_LEN];
8973 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8974 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8975                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8976         { STATS_OFFSET32(error_bytes_received_hi),
8977                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8978         { STATS_OFFSET32(total_bytes_transmitted_hi),
8979                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8980         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8981                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8982         { STATS_OFFSET32(total_unicast_packets_received_hi),
8983                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8984         { STATS_OFFSET32(total_multicast_packets_received_hi),
8985                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8986         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8987                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8988         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8989                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8990         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8991                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8992 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8993                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8994         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8995                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8996         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8997                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8998         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8999                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9000         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9001                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9002         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9003                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9004         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9006         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9007                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9008         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9009                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9010         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9011                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9012 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9013                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9014         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9015                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9016         { STATS_OFFSET32(jabber_packets_received),
9017                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9018         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9019                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9020         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9021                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9022         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9023                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9024         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9025                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9026         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9027                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9028         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9029                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9030         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9031                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9032 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9033                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9034         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9035                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9036         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9037                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9038         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9039                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9040         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9041                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9042         { STATS_OFFSET32(mac_filter_discard),
9043                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9044         { STATS_OFFSET32(no_buff_discard),
9045                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9046         { STATS_OFFSET32(xxoverflow_discard),
9047                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9048         { STATS_OFFSET32(brb_drop_hi),
9049                                 8, STATS_FLAGS_PORT, "brb_discard" },
9050         { STATS_OFFSET32(brb_truncate_hi),
9051                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9052 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9053                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9054         { STATS_OFFSET32(rx_skb_alloc_failed),
9055                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9056 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9057                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9058 };
9059
9060 #define IS_NOT_E1HMF_STAT(bp, i) \
9061                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9062
9063 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9064 {
9065         struct bnx2x *bp = netdev_priv(dev);
9066         int i, j;
9067
9068         switch (stringset) {
9069         case ETH_SS_STATS:
9070                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9071                         if (IS_NOT_E1HMF_STAT(bp, i))
9072                                 continue;
9073                         strcpy(buf + j*ETH_GSTRING_LEN,
9074                                bnx2x_stats_arr[i].string);
9075                         j++;
9076                 }
9077                 break;
9078
9079         case ETH_SS_TEST:
9080                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9081                 break;
9082         }
9083 }
9084
9085 static int bnx2x_get_stats_count(struct net_device *dev)
9086 {
9087         struct bnx2x *bp = netdev_priv(dev);
9088         int i, num_stats = 0;
9089
9090         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9091                 if (IS_NOT_E1HMF_STAT(bp, i))
9092                         continue;
9093                 num_stats++;
9094         }
9095         return num_stats;
9096 }
9097
9098 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9099                                     struct ethtool_stats *stats, u64 *buf)
9100 {
9101         struct bnx2x *bp = netdev_priv(dev);
9102         u32 *hw_stats = (u32 *)&bp->eth_stats;
9103         int i, j;
9104
9105         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9106                 if (IS_NOT_E1HMF_STAT(bp, i))
9107                         continue;
9108
9109                 if (bnx2x_stats_arr[i].size == 0) {
9110                         /* skip this counter */
9111                         buf[j] = 0;
9112                         j++;
9113                         continue;
9114                 }
9115                 if (bnx2x_stats_arr[i].size == 4) {
9116                         /* 4-byte counter */
9117                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9118                         j++;
9119                         continue;
9120                 }
9121                 /* 8-byte counter */
9122                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9123                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9124                 j++;
9125         }
9126 }
9127
9128 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9129 {
9130         struct bnx2x *bp = netdev_priv(dev);
9131         int port = BP_PORT(bp);
9132         int i;
9133
9134         if (!netif_running(dev))
9135                 return 0;
9136
9137         if (!bp->port.pmf)
9138                 return 0;
9139
9140         if (data == 0)
9141                 data = 2;
9142
9143         for (i = 0; i < (data * 2); i++) {
9144                 if ((i % 2) == 0)
9145                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9146                                       bp->link_params.hw_led_mode,
9147                                       bp->link_params.chip_id);
9148                 else
9149                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9150                                       bp->link_params.hw_led_mode,
9151                                       bp->link_params.chip_id);
9152
9153                 msleep_interruptible(500);
9154                 if (signal_pending(current))
9155                         break;
9156         }
9157
9158         if (bp->link_vars.link_up)
9159                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9160                               bp->link_vars.line_speed,
9161                               bp->link_params.hw_led_mode,
9162                               bp->link_params.chip_id);
9163
9164         return 0;
9165 }
9166
9167 static struct ethtool_ops bnx2x_ethtool_ops = {
9168         .get_settings           = bnx2x_get_settings,
9169         .set_settings           = bnx2x_set_settings,
9170         .get_drvinfo            = bnx2x_get_drvinfo,
9171         .get_wol                = bnx2x_get_wol,
9172         .set_wol                = bnx2x_set_wol,
9173         .get_msglevel           = bnx2x_get_msglevel,
9174         .set_msglevel           = bnx2x_set_msglevel,
9175         .nway_reset             = bnx2x_nway_reset,
9176         .get_link               = ethtool_op_get_link,
9177         .get_eeprom_len         = bnx2x_get_eeprom_len,
9178         .get_eeprom             = bnx2x_get_eeprom,
9179         .set_eeprom             = bnx2x_set_eeprom,
9180         .get_coalesce           = bnx2x_get_coalesce,
9181         .set_coalesce           = bnx2x_set_coalesce,
9182         .get_ringparam          = bnx2x_get_ringparam,
9183         .set_ringparam          = bnx2x_set_ringparam,
9184         .get_pauseparam         = bnx2x_get_pauseparam,
9185         .set_pauseparam         = bnx2x_set_pauseparam,
9186         .get_rx_csum            = bnx2x_get_rx_csum,
9187         .set_rx_csum            = bnx2x_set_rx_csum,
9188         .get_tx_csum            = ethtool_op_get_tx_csum,
9189         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9190         .set_flags              = bnx2x_set_flags,
9191         .get_flags              = ethtool_op_get_flags,
9192         .get_sg                 = ethtool_op_get_sg,
9193         .set_sg                 = ethtool_op_set_sg,
9194         .get_tso                = ethtool_op_get_tso,
9195         .set_tso                = bnx2x_set_tso,
9196         .self_test_count        = bnx2x_self_test_count,
9197         .self_test              = bnx2x_self_test,
9198         .get_strings            = bnx2x_get_strings,
9199         .phys_id                = bnx2x_phys_id,
9200         .get_stats_count        = bnx2x_get_stats_count,
9201         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9202 };
9203
9204 /* end of ethtool_ops */
9205
9206 /****************************************************************************
9207 * General service functions
9208 ****************************************************************************/
9209
9210 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9211 {
9212         u16 pmcsr;
9213
9214         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9215
9216         switch (state) {
9217         case PCI_D0:
9218                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9219                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9220                                        PCI_PM_CTRL_PME_STATUS));
9221
9222                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9223                         /* delay required during transition out of D3hot */
9224                         msleep(20);
9225                 break;
9226
9227         case PCI_D3hot:
9228                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9229                 pmcsr |= 3;
9230
9231                 if (bp->wol)
9232                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9233
9234                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9235                                       pmcsr);
9236
9237                 /* No more memory access after this point until
9238                 * device is brought back to D0.
9239                 */
9240                 break;
9241
9242         default:
9243                 return -EINVAL;
9244         }
9245         return 0;
9246 }
9247
9248 /*
9249  * net_device service functions
9250  */
9251
9252 static int bnx2x_poll(struct napi_struct *napi, int budget)
9253 {
9254         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9255                                                  napi);
9256         struct bnx2x *bp = fp->bp;
9257         int work_done = 0;
9258         u16 rx_cons_sb;
9259
9260 #ifdef BNX2X_STOP_ON_ERROR
9261         if (unlikely(bp->panic))
9262                 goto poll_panic;
9263 #endif
9264
9265         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9266         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9267         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9268
9269         bnx2x_update_fpsb_idx(fp);
9270
9271         if (BNX2X_HAS_TX_WORK(fp))
9272                 bnx2x_tx_int(fp, budget);
9273
9274         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9275         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9276                 rx_cons_sb++;
9277         if (BNX2X_HAS_RX_WORK(fp))
9278                 work_done = bnx2x_rx_int(fp, budget);
9279
9280         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9281         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9282         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9283                 rx_cons_sb++;
9284
9285         /* must not complete if we consumed full budget */
9286         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9287
9288 #ifdef BNX2X_STOP_ON_ERROR
9289 poll_panic:
9290 #endif
9291                 netif_rx_complete(napi);
9292
9293                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9294                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9295                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9296                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9297         }
9298         return work_done;
9299 }
9300
9301
9302 /* we split the first BD into headers and data BDs
9303  * to ease the pain of our fellow microcode engineers
9304  * we use one mapping for both BDs
9305  * So far this has only been observed to happen
9306  * in Other Operating Systems(TM)
9307  */
9308 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9309                                    struct bnx2x_fastpath *fp,
9310                                    struct eth_tx_bd **tx_bd, u16 hlen,
9311                                    u16 bd_prod, int nbd)
9312 {
9313         struct eth_tx_bd *h_tx_bd = *tx_bd;
9314         struct eth_tx_bd *d_tx_bd;
9315         dma_addr_t mapping;
9316         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9317
9318         /* first fix first BD */
9319         h_tx_bd->nbd = cpu_to_le16(nbd);
9320         h_tx_bd->nbytes = cpu_to_le16(hlen);
9321
9322         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9323            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9324            h_tx_bd->addr_lo, h_tx_bd->nbd);
9325
9326         /* now get a new data BD
9327          * (after the pbd) and fill it */
9328         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9329         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9330
9331         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9332                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9333
9334         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9335         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9336         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9337         d_tx_bd->vlan = 0;
9338         /* this marks the BD as one that has no individual mapping
9339          * the FW ignores this flag in a BD not marked start
9340          */
9341         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9342         DP(NETIF_MSG_TX_QUEUED,
9343            "TSO split data size is %d (%x:%x)\n",
9344            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9345
9346         /* update tx_bd for marking the last BD flag */
9347         *tx_bd = d_tx_bd;
9348
9349         return bd_prod;
9350 }
9351
9352 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9353 {
9354         if (fix > 0)
9355                 csum = (u16) ~csum_fold(csum_sub(csum,
9356                                 csum_partial(t_header - fix, fix, 0)));
9357
9358         else if (fix < 0)
9359                 csum = (u16) ~csum_fold(csum_add(csum,
9360                                 csum_partial(t_header, -fix, 0)));
9361
9362         return swab16(csum);
9363 }
9364
9365 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9366 {
9367         u32 rc;
9368
9369         if (skb->ip_summed != CHECKSUM_PARTIAL)
9370                 rc = XMIT_PLAIN;
9371
9372         else {
9373                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9374                         rc = XMIT_CSUM_V6;
9375                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9376                                 rc |= XMIT_CSUM_TCP;
9377
9378                 } else {
9379                         rc = XMIT_CSUM_V4;
9380                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9381                                 rc |= XMIT_CSUM_TCP;
9382                 }
9383         }
9384
9385         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9386                 rc |= XMIT_GSO_V4;
9387
9388         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9389                 rc |= XMIT_GSO_V6;
9390
9391         return rc;
9392 }
9393
9394 /* check if packet requires linearization (packet is too fragmented) */
9395 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9396                              u32 xmit_type)
9397 {
9398         int to_copy = 0;
9399         int hlen = 0;
9400         int first_bd_sz = 0;
9401
9402         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9403         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9404
9405                 if (xmit_type & XMIT_GSO) {
9406                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9407                         /* Check if LSO packet needs to be copied:
9408                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9409                         int wnd_size = MAX_FETCH_BD - 3;
9410                         /* Number of windows to check */
9411                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9412                         int wnd_idx = 0;
9413                         int frag_idx = 0;
9414                         u32 wnd_sum = 0;
9415
9416                         /* Headers length */
9417                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9418                                 tcp_hdrlen(skb);
9419
9420                         /* Amount of data (w/o headers) on linear part of SKB*/
9421                         first_bd_sz = skb_headlen(skb) - hlen;
9422
9423                         wnd_sum  = first_bd_sz;
9424
9425                         /* Calculate the first sum - it's special */
9426                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9427                                 wnd_sum +=
9428                                         skb_shinfo(skb)->frags[frag_idx].size;
9429
9430                         /* If there was data on linear skb data - check it */
9431                         if (first_bd_sz > 0) {
9432                                 if (unlikely(wnd_sum < lso_mss)) {
9433                                         to_copy = 1;
9434                                         goto exit_lbl;
9435                                 }
9436
9437                                 wnd_sum -= first_bd_sz;
9438                         }
9439
9440                         /* Others are easier: run through the frag list and
9441                            check all windows */
9442                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9443                                 wnd_sum +=
9444                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9445
9446                                 if (unlikely(wnd_sum < lso_mss)) {
9447                                         to_copy = 1;
9448                                         break;
9449                                 }
9450                                 wnd_sum -=
9451                                         skb_shinfo(skb)->frags[wnd_idx].size;
9452                         }
9453
9454                 } else {
9455                         /* in non-LSO too fragmented packet should always
9456                            be linearized */
9457                         to_copy = 1;
9458                 }
9459         }
9460
9461 exit_lbl:
9462         if (unlikely(to_copy))
9463                 DP(NETIF_MSG_TX_QUEUED,
9464                    "Linearization IS REQUIRED for %s packet. "
9465                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9466                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9467                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9468
9469         return to_copy;
9470 }
9471
9472 /* called with netif_tx_lock
9473  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9474  * netif_wake_queue()
9475  */
9476 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9477 {
9478         struct bnx2x *bp = netdev_priv(dev);
9479         struct bnx2x_fastpath *fp;
9480         struct sw_tx_bd *tx_buf;
9481         struct eth_tx_bd *tx_bd;
9482         struct eth_tx_parse_bd *pbd = NULL;
9483         u16 pkt_prod, bd_prod;
9484         int nbd, fp_index;
9485         dma_addr_t mapping;
9486         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9487         int vlan_off = (bp->e1hov ? 4 : 0);
9488         int i;
9489         u8 hlen = 0;
9490
9491 #ifdef BNX2X_STOP_ON_ERROR
9492         if (unlikely(bp->panic))
9493                 return NETDEV_TX_BUSY;
9494 #endif
9495
9496         fp_index = (smp_processor_id() % bp->num_queues);
9497         fp = &bp->fp[fp_index];
9498
9499         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9500                 bp->eth_stats.driver_xoff++,
9501                 netif_stop_queue(dev);
9502                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9503                 return NETDEV_TX_BUSY;
9504         }
9505
9506         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9507            "  gso type %x  xmit_type %x\n",
9508            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9509            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9510
9511         /* First, check if we need to linearize the skb
9512            (due to FW restrictions) */
9513         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9514                 /* Statistics of linearization */
9515                 bp->lin_cnt++;
9516                 if (skb_linearize(skb) != 0) {
9517                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9518                            "silently dropping this SKB\n");
9519                         dev_kfree_skb_any(skb);
9520                         return NETDEV_TX_OK;
9521                 }
9522         }
9523
9524         /*
9525         Please read carefully. First we use one BD which we mark as start,
9526         then for TSO or xsum we have a parsing info BD,
9527         and only then we have the rest of the TSO BDs.
9528         (don't forget to mark the last one as last,
9529         and to unmap only AFTER you write to the BD ...)
9530         And above all, all pdb sizes are in words - NOT DWORDS!
9531         */
9532
9533         pkt_prod = fp->tx_pkt_prod++;
9534         bd_prod = TX_BD(fp->tx_bd_prod);
9535
9536         /* get a tx_buf and first BD */
9537         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9538         tx_bd = &fp->tx_desc_ring[bd_prod];
9539
9540         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9541         tx_bd->general_data = (UNICAST_ADDRESS <<
9542                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9543         /* header nbd */
9544         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9545
9546         /* remember the first BD of the packet */
9547         tx_buf->first_bd = fp->tx_bd_prod;
9548         tx_buf->skb = skb;
9549
9550         DP(NETIF_MSG_TX_QUEUED,
9551            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9552            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9553
9554         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9555                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9556                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9557                 vlan_off += 4;
9558         } else
9559                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9560
9561         if (xmit_type) {
9562                 /* turn on parsing and get a BD */
9563                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9564                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9565
9566                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9567         }
9568
9569         if (xmit_type & XMIT_CSUM) {
9570                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9571
9572                 /* for now NS flag is not used in Linux */
9573                 pbd->global_data = (hlen |
9574                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9575                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9576
9577                 pbd->ip_hlen = (skb_transport_header(skb) -
9578                                 skb_network_header(skb)) / 2;
9579
9580                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9581
9582                 pbd->total_hlen = cpu_to_le16(hlen);
9583                 hlen = hlen*2 - vlan_off;
9584
9585                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9586
9587                 if (xmit_type & XMIT_CSUM_V4)
9588                         tx_bd->bd_flags.as_bitfield |=
9589                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9590                 else
9591                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9592
9593                 if (xmit_type & XMIT_CSUM_TCP) {
9594                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9595
9596                 } else {
9597                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9598
9599                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9600                         pbd->cs_offset = fix / 2;
9601
9602                         DP(NETIF_MSG_TX_QUEUED,
9603                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9604                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9605                            SKB_CS(skb));
9606
9607                         /* HW bug: fixup the CSUM */
9608                         pbd->tcp_pseudo_csum =
9609                                 bnx2x_csum_fix(skb_transport_header(skb),
9610                                                SKB_CS(skb), fix);
9611
9612                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9613                            pbd->tcp_pseudo_csum);
9614                 }
9615         }
9616
9617         mapping = pci_map_single(bp->pdev, skb->data,
9618                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9619
9620         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9621         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9622         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9623         tx_bd->nbd = cpu_to_le16(nbd);
9624         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9625
9626         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9627            "  nbytes %d  flags %x  vlan %x\n",
9628            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9629            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9630            le16_to_cpu(tx_bd->vlan));
9631
9632         if (xmit_type & XMIT_GSO) {
9633
9634                 DP(NETIF_MSG_TX_QUEUED,
9635                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9636                    skb->len, hlen, skb_headlen(skb),
9637                    skb_shinfo(skb)->gso_size);
9638
9639                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9640
9641                 if (unlikely(skb_headlen(skb) > hlen))
9642                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9643                                                  bd_prod, ++nbd);
9644
9645                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9646                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9647                 pbd->tcp_flags = pbd_tcp_flags(skb);
9648
9649                 if (xmit_type & XMIT_GSO_V4) {
9650                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9651                         pbd->tcp_pseudo_csum =
9652                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9653                                                           ip_hdr(skb)->daddr,
9654                                                           0, IPPROTO_TCP, 0));
9655
9656                 } else
9657                         pbd->tcp_pseudo_csum =
9658                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9659                                                         &ipv6_hdr(skb)->daddr,
9660                                                         0, IPPROTO_TCP, 0));
9661
9662                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9663         }
9664
9665         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9666                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9667
9668                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9669                 tx_bd = &fp->tx_desc_ring[bd_prod];
9670
9671                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9672                                        frag->size, PCI_DMA_TODEVICE);
9673
9674                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9675                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9676                 tx_bd->nbytes = cpu_to_le16(frag->size);
9677                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9678                 tx_bd->bd_flags.as_bitfield = 0;
9679
9680                 DP(NETIF_MSG_TX_QUEUED,
9681                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9682                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9683                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9684         }
9685
9686         /* now at last mark the BD as the last BD */
9687         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9688
9689         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9690            tx_bd, tx_bd->bd_flags.as_bitfield);
9691
9692         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9693
9694         /* now send a tx doorbell, counting the next BD
9695          * if the packet contains or ends with it
9696          */
9697         if (TX_BD_POFF(bd_prod) < nbd)
9698                 nbd++;
9699
9700         if (pbd)
9701                 DP(NETIF_MSG_TX_QUEUED,
9702                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9703                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9704                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9705                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9706                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9707
9708         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9709
9710         fp->hw_tx_prods->bds_prod =
9711                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9712         mb(); /* FW restriction: must not reorder writing nbd and packets */
9713         fp->hw_tx_prods->packets_prod =
9714                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9715         DOORBELL(bp, FP_IDX(fp), 0);
9716
9717         mmiowb();
9718
9719         fp->tx_bd_prod += nbd;
9720         dev->trans_start = jiffies;
9721
9722         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9723                 netif_stop_queue(dev);
9724                 bp->eth_stats.driver_xoff++;
9725                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9726                         netif_wake_queue(dev);
9727         }
9728         fp->tx_pkt++;
9729
9730         return NETDEV_TX_OK;
9731 }
9732
9733 /* called with rtnl_lock */
9734 static int bnx2x_open(struct net_device *dev)
9735 {
9736         struct bnx2x *bp = netdev_priv(dev);
9737
9738         bnx2x_set_power_state(bp, PCI_D0);
9739
9740         return bnx2x_nic_load(bp, LOAD_OPEN);
9741 }
9742
9743 /* called with rtnl_lock */
9744 static int bnx2x_close(struct net_device *dev)
9745 {
9746         struct bnx2x *bp = netdev_priv(dev);
9747
9748         /* Unload the driver, release IRQs */
9749         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9750         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9751                 if (!CHIP_REV_IS_SLOW(bp))
9752                         bnx2x_set_power_state(bp, PCI_D3hot);
9753
9754         return 0;
9755 }
9756
9757 /* called with netif_tx_lock from set_multicast */
9758 static void bnx2x_set_rx_mode(struct net_device *dev)
9759 {
9760         struct bnx2x *bp = netdev_priv(dev);
9761         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9762         int port = BP_PORT(bp);
9763
9764         if (bp->state != BNX2X_STATE_OPEN) {
9765                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9766                 return;
9767         }
9768
9769         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9770
9771         if (dev->flags & IFF_PROMISC)
9772                 rx_mode = BNX2X_RX_MODE_PROMISC;
9773
9774         else if ((dev->flags & IFF_ALLMULTI) ||
9775                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9776                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9777
9778         else { /* some multicasts */
9779                 if (CHIP_IS_E1(bp)) {
9780                         int i, old, offset;
9781                         struct dev_mc_list *mclist;
9782                         struct mac_configuration_cmd *config =
9783                                                 bnx2x_sp(bp, mcast_config);
9784
9785                         for (i = 0, mclist = dev->mc_list;
9786                              mclist && (i < dev->mc_count);
9787                              i++, mclist = mclist->next) {
9788
9789                                 config->config_table[i].
9790                                         cam_entry.msb_mac_addr =
9791                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9792                                 config->config_table[i].
9793                                         cam_entry.middle_mac_addr =
9794                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9795                                 config->config_table[i].
9796                                         cam_entry.lsb_mac_addr =
9797                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9798                                 config->config_table[i].cam_entry.flags =
9799                                                         cpu_to_le16(port);
9800                                 config->config_table[i].
9801                                         target_table_entry.flags = 0;
9802                                 config->config_table[i].
9803                                         target_table_entry.client_id = 0;
9804                                 config->config_table[i].
9805                                         target_table_entry.vlan_id = 0;
9806
9807                                 DP(NETIF_MSG_IFUP,
9808                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9809                                    config->config_table[i].
9810                                                 cam_entry.msb_mac_addr,
9811                                    config->config_table[i].
9812                                                 cam_entry.middle_mac_addr,
9813                                    config->config_table[i].
9814                                                 cam_entry.lsb_mac_addr);
9815                         }
9816                         old = config->hdr.length_6b;
9817                         if (old > i) {
9818                                 for (; i < old; i++) {
9819                                         if (CAM_IS_INVALID(config->
9820                                                            config_table[i])) {
9821                                                 i--; /* already invalidated */
9822                                                 break;
9823                                         }
9824                                         /* invalidate */
9825                                         CAM_INVALIDATE(config->
9826                                                        config_table[i]);
9827                                 }
9828                         }
9829
9830                         if (CHIP_REV_IS_SLOW(bp))
9831                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9832                         else
9833                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9834
9835                         config->hdr.length_6b = i;
9836                         config->hdr.offset = offset;
9837                         config->hdr.client_id = BP_CL_ID(bp);
9838                         config->hdr.reserved1 = 0;
9839
9840                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9841                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9842                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9843                                       0);
9844                 } else { /* E1H */
9845                         /* Accept one or more multicasts */
9846                         struct dev_mc_list *mclist;
9847                         u32 mc_filter[MC_HASH_SIZE];
9848                         u32 crc, bit, regidx;
9849                         int i;
9850
9851                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9852
9853                         for (i = 0, mclist = dev->mc_list;
9854                              mclist && (i < dev->mc_count);
9855                              i++, mclist = mclist->next) {
9856
9857                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9858                                    mclist->dmi_addr);
9859
9860                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9861                                 bit = (crc >> 24) & 0xff;
9862                                 regidx = bit >> 5;
9863                                 bit &= 0x1f;
9864                                 mc_filter[regidx] |= (1 << bit);
9865                         }
9866
9867                         for (i = 0; i < MC_HASH_SIZE; i++)
9868                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9869                                        mc_filter[i]);
9870                 }
9871         }
9872
9873         bp->rx_mode = rx_mode;
9874         bnx2x_set_storm_rx_mode(bp);
9875 }
9876
9877 /* called with rtnl_lock */
9878 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9879 {
9880         struct sockaddr *addr = p;
9881         struct bnx2x *bp = netdev_priv(dev);
9882
9883         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9884                 return -EINVAL;
9885
9886         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9887         if (netif_running(dev)) {
9888                 if (CHIP_IS_E1(bp))
9889                         bnx2x_set_mac_addr_e1(bp, 1);
9890                 else
9891                         bnx2x_set_mac_addr_e1h(bp, 1);
9892         }
9893
9894         return 0;
9895 }
9896
9897 /* called with rtnl_lock */
9898 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9899 {
9900         struct mii_ioctl_data *data = if_mii(ifr);
9901         struct bnx2x *bp = netdev_priv(dev);
9902         int port = BP_PORT(bp);
9903         int err;
9904
9905         switch (cmd) {
9906         case SIOCGMIIPHY:
9907                 data->phy_id = bp->port.phy_addr;
9908
9909                 /* fallthrough */
9910
9911         case SIOCGMIIREG: {
9912                 u16 mii_regval;
9913
9914                 if (!netif_running(dev))
9915                         return -EAGAIN;
9916
9917                 mutex_lock(&bp->port.phy_mutex);
9918                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9919                                       DEFAULT_PHY_DEV_ADDR,
9920                                       (data->reg_num & 0x1f), &mii_regval);
9921                 data->val_out = mii_regval;
9922                 mutex_unlock(&bp->port.phy_mutex);
9923                 return err;
9924         }
9925
9926         case SIOCSMIIREG:
9927                 if (!capable(CAP_NET_ADMIN))
9928                         return -EPERM;
9929
9930                 if (!netif_running(dev))
9931                         return -EAGAIN;
9932
9933                 mutex_lock(&bp->port.phy_mutex);
9934                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9935                                        DEFAULT_PHY_DEV_ADDR,
9936                                        (data->reg_num & 0x1f), data->val_in);
9937                 mutex_unlock(&bp->port.phy_mutex);
9938                 return err;
9939
9940         default:
9941                 /* do nothing */
9942                 break;
9943         }
9944
9945         return -EOPNOTSUPP;
9946 }
9947
9948 /* called with rtnl_lock */
9949 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9950 {
9951         struct bnx2x *bp = netdev_priv(dev);
9952         int rc = 0;
9953
9954         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9955             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9956                 return -EINVAL;
9957
9958         /* This does not race with packet allocation
9959          * because the actual alloc size is
9960          * only updated as part of load
9961          */
9962         dev->mtu = new_mtu;
9963
9964         if (netif_running(dev)) {
9965                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9966                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9967         }
9968
9969         return rc;
9970 }
9971
9972 static void bnx2x_tx_timeout(struct net_device *dev)
9973 {
9974         struct bnx2x *bp = netdev_priv(dev);
9975
9976 #ifdef BNX2X_STOP_ON_ERROR
9977         if (!bp->panic)
9978                 bnx2x_panic();
9979 #endif
9980         /* This allows the netif to be shutdown gracefully before resetting */
9981         schedule_work(&bp->reset_task);
9982 }
9983
9984 #ifdef BCM_VLAN
9985 /* called with rtnl_lock */
9986 static void bnx2x_vlan_rx_register(struct net_device *dev,
9987                                    struct vlan_group *vlgrp)
9988 {
9989         struct bnx2x *bp = netdev_priv(dev);
9990
9991         bp->vlgrp = vlgrp;
9992         if (netif_running(dev))
9993                 bnx2x_set_client_config(bp);
9994 }
9995
9996 #endif
9997
9998 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9999 static void poll_bnx2x(struct net_device *dev)
10000 {
10001         struct bnx2x *bp = netdev_priv(dev);
10002
10003         disable_irq(bp->pdev->irq);
10004         bnx2x_interrupt(bp->pdev->irq, dev);
10005         enable_irq(bp->pdev->irq);
10006 }
10007 #endif
10008
10009 static const struct net_device_ops bnx2x_netdev_ops = {
10010         .ndo_open               = bnx2x_open,
10011         .ndo_stop               = bnx2x_close,
10012         .ndo_start_xmit         = bnx2x_start_xmit,
10013         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10014         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10015         .ndo_validate_addr      = eth_validate_addr,
10016         .ndo_do_ioctl           = bnx2x_ioctl,
10017         .ndo_change_mtu         = bnx2x_change_mtu,
10018         .ndo_tx_timeout         = bnx2x_tx_timeout,
10019 #ifdef BCM_VLAN
10020         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10021 #endif
10022 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10023         .ndo_poll_controller    = poll_bnx2x,
10024 #endif
10025 };
10026
10027
10028 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10029                                     struct net_device *dev)
10030 {
10031         struct bnx2x *bp;
10032         int rc;
10033
10034         SET_NETDEV_DEV(dev, &pdev->dev);
10035         bp = netdev_priv(dev);
10036
10037         bp->dev = dev;
10038         bp->pdev = pdev;
10039         bp->flags = 0;
10040         bp->func = PCI_FUNC(pdev->devfn);
10041
10042         rc = pci_enable_device(pdev);
10043         if (rc) {
10044                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10045                 goto err_out;
10046         }
10047
10048         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10049                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10050                        " aborting\n");
10051                 rc = -ENODEV;
10052                 goto err_out_disable;
10053         }
10054
10055         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10056                 printk(KERN_ERR PFX "Cannot find second PCI device"
10057                        " base address, aborting\n");
10058                 rc = -ENODEV;
10059                 goto err_out_disable;
10060         }
10061
10062         if (atomic_read(&pdev->enable_cnt) == 1) {
10063                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10064                 if (rc) {
10065                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10066                                " aborting\n");
10067                         goto err_out_disable;
10068                 }
10069
10070                 pci_set_master(pdev);
10071                 pci_save_state(pdev);
10072         }
10073
10074         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10075         if (bp->pm_cap == 0) {
10076                 printk(KERN_ERR PFX "Cannot find power management"
10077                        " capability, aborting\n");
10078                 rc = -EIO;
10079                 goto err_out_release;
10080         }
10081
10082         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10083         if (bp->pcie_cap == 0) {
10084                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10085                        " aborting\n");
10086                 rc = -EIO;
10087                 goto err_out_release;
10088         }
10089
10090         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10091                 bp->flags |= USING_DAC_FLAG;
10092                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10093                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10094                                " failed, aborting\n");
10095                         rc = -EIO;
10096                         goto err_out_release;
10097                 }
10098
10099         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10100                 printk(KERN_ERR PFX "System does not support DMA,"
10101                        " aborting\n");
10102                 rc = -EIO;
10103                 goto err_out_release;
10104         }
10105
10106         dev->mem_start = pci_resource_start(pdev, 0);
10107         dev->base_addr = dev->mem_start;
10108         dev->mem_end = pci_resource_end(pdev, 0);
10109
10110         dev->irq = pdev->irq;
10111
10112         bp->regview = pci_ioremap_bar(pdev, 0);
10113         if (!bp->regview) {
10114                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10115                 rc = -ENOMEM;
10116                 goto err_out_release;
10117         }
10118
10119         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10120                                         min_t(u64, BNX2X_DB_SIZE,
10121                                               pci_resource_len(pdev, 2)));
10122         if (!bp->doorbells) {
10123                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10124                 rc = -ENOMEM;
10125                 goto err_out_unmap;
10126         }
10127
10128         bnx2x_set_power_state(bp, PCI_D0);
10129
10130         /* clean indirect addresses */
10131         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10132                                PCICFG_VENDOR_ID_OFFSET);
10133         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10134         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10135         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10136         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10137
10138         dev->watchdog_timeo = TX_TIMEOUT;
10139
10140         dev->netdev_ops = &bnx2x_netdev_ops;
10141         dev->ethtool_ops = &bnx2x_ethtool_ops;
10142         dev->features |= NETIF_F_SG;
10143         dev->features |= NETIF_F_HW_CSUM;
10144         if (bp->flags & USING_DAC_FLAG)
10145                 dev->features |= NETIF_F_HIGHDMA;
10146 #ifdef BCM_VLAN
10147         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10148 #endif
10149         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10150         dev->features |= NETIF_F_TSO6;
10151
10152         return 0;
10153
10154 err_out_unmap:
10155         if (bp->regview) {
10156                 iounmap(bp->regview);
10157                 bp->regview = NULL;
10158         }
10159         if (bp->doorbells) {
10160                 iounmap(bp->doorbells);
10161                 bp->doorbells = NULL;
10162         }
10163
10164 err_out_release:
10165         if (atomic_read(&pdev->enable_cnt) == 1)
10166                 pci_release_regions(pdev);
10167
10168 err_out_disable:
10169         pci_disable_device(pdev);
10170         pci_set_drvdata(pdev, NULL);
10171
10172 err_out:
10173         return rc;
10174 }
10175
10176 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10177 {
10178         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10179
10180         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10181         return val;
10182 }
10183
10184 /* return value of 1=2.5GHz 2=5GHz */
10185 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10186 {
10187         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10188
10189         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10190         return val;
10191 }
10192
10193 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10194                                     const struct pci_device_id *ent)
10195 {
10196         static int version_printed;
10197         struct net_device *dev = NULL;
10198         struct bnx2x *bp;
10199         int rc;
10200
10201         if (version_printed++ == 0)
10202                 printk(KERN_INFO "%s", version);
10203
10204         /* dev zeroed in init_etherdev */
10205         dev = alloc_etherdev(sizeof(*bp));
10206         if (!dev) {
10207                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10208                 return -ENOMEM;
10209         }
10210
10211         bp = netdev_priv(dev);
10212         bp->msglevel = debug;
10213
10214         rc = bnx2x_init_dev(pdev, dev);
10215         if (rc < 0) {
10216                 free_netdev(dev);
10217                 return rc;
10218         }
10219
10220         rc = register_netdev(dev);
10221         if (rc) {
10222                 dev_err(&pdev->dev, "Cannot register net device\n");
10223                 goto init_one_exit;
10224         }
10225
10226         pci_set_drvdata(pdev, dev);
10227
10228         rc = bnx2x_init_bp(bp);
10229         if (rc) {
10230                 unregister_netdev(dev);
10231                 goto init_one_exit;
10232         }
10233
10234         netif_carrier_off(dev);
10235
10236         bp->common.name = board_info[ent->driver_data].name;
10237         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10238                " IRQ %d, ", dev->name, bp->common.name,
10239                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10240                bnx2x_get_pcie_width(bp),
10241                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10242                dev->base_addr, bp->pdev->irq);
10243         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10244         return 0;
10245
10246 init_one_exit:
10247         if (bp->regview)
10248                 iounmap(bp->regview);
10249
10250         if (bp->doorbells)
10251                 iounmap(bp->doorbells);
10252
10253         free_netdev(dev);
10254
10255         if (atomic_read(&pdev->enable_cnt) == 1)
10256                 pci_release_regions(pdev);
10257
10258         pci_disable_device(pdev);
10259         pci_set_drvdata(pdev, NULL);
10260
10261         return rc;
10262 }
10263
10264 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10265 {
10266         struct net_device *dev = pci_get_drvdata(pdev);
10267         struct bnx2x *bp;
10268
10269         if (!dev) {
10270                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10271                 return;
10272         }
10273         bp = netdev_priv(dev);
10274
10275         unregister_netdev(dev);
10276
10277         if (bp->regview)
10278                 iounmap(bp->regview);
10279
10280         if (bp->doorbells)
10281                 iounmap(bp->doorbells);
10282
10283         free_netdev(dev);
10284
10285         if (atomic_read(&pdev->enable_cnt) == 1)
10286                 pci_release_regions(pdev);
10287
10288         pci_disable_device(pdev);
10289         pci_set_drvdata(pdev, NULL);
10290 }
10291
10292 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10293 {
10294         struct net_device *dev = pci_get_drvdata(pdev);
10295         struct bnx2x *bp;
10296
10297         if (!dev) {
10298                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10299                 return -ENODEV;
10300         }
10301         bp = netdev_priv(dev);
10302
10303         rtnl_lock();
10304
10305         pci_save_state(pdev);
10306
10307         if (!netif_running(dev)) {
10308                 rtnl_unlock();
10309                 return 0;
10310         }
10311
10312         netif_device_detach(dev);
10313
10314         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10315
10316         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10317
10318         rtnl_unlock();
10319
10320         return 0;
10321 }
10322
10323 static int bnx2x_resume(struct pci_dev *pdev)
10324 {
10325         struct net_device *dev = pci_get_drvdata(pdev);
10326         struct bnx2x *bp;
10327         int rc;
10328
10329         if (!dev) {
10330                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10331                 return -ENODEV;
10332         }
10333         bp = netdev_priv(dev);
10334
10335         rtnl_lock();
10336
10337         pci_restore_state(pdev);
10338
10339         if (!netif_running(dev)) {
10340                 rtnl_unlock();
10341                 return 0;
10342         }
10343
10344         bnx2x_set_power_state(bp, PCI_D0);
10345         netif_device_attach(dev);
10346
10347         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10348
10349         rtnl_unlock();
10350
10351         return rc;
10352 }
10353
10354 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10355 {
10356         int i;
10357
10358         bp->state = BNX2X_STATE_ERROR;
10359
10360         bp->rx_mode = BNX2X_RX_MODE_NONE;
10361
10362         bnx2x_netif_stop(bp, 0);
10363
10364         del_timer_sync(&bp->timer);
10365         bp->stats_state = STATS_STATE_DISABLED;
10366         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10367
10368         /* Release IRQs */
10369         bnx2x_free_irq(bp);
10370
10371         if (CHIP_IS_E1(bp)) {
10372                 struct mac_configuration_cmd *config =
10373                                                 bnx2x_sp(bp, mcast_config);
10374
10375                 for (i = 0; i < config->hdr.length_6b; i++)
10376                         CAM_INVALIDATE(config->config_table[i]);
10377         }
10378
10379         /* Free SKBs, SGEs, TPA pool and driver internals */
10380         bnx2x_free_skbs(bp);
10381         for_each_queue(bp, i)
10382                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10383         bnx2x_free_mem(bp);
10384
10385         bp->state = BNX2X_STATE_CLOSED;
10386
10387         netif_carrier_off(bp->dev);
10388
10389         return 0;
10390 }
10391
10392 static void bnx2x_eeh_recover(struct bnx2x *bp)
10393 {
10394         u32 val;
10395
10396         mutex_init(&bp->port.phy_mutex);
10397
10398         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10399         bp->link_params.shmem_base = bp->common.shmem_base;
10400         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10401
10402         if (!bp->common.shmem_base ||
10403             (bp->common.shmem_base < 0xA0000) ||
10404             (bp->common.shmem_base >= 0xC0000)) {
10405                 BNX2X_DEV_INFO("MCP not active\n");
10406                 bp->flags |= NO_MCP_FLAG;
10407                 return;
10408         }
10409
10410         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10411         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10412                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10413                 BNX2X_ERR("BAD MCP validity signature\n");
10414
10415         if (!BP_NOMCP(bp)) {
10416                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10417                               & DRV_MSG_SEQ_NUMBER_MASK);
10418                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10419         }
10420 }
10421
10422 /**
10423  * bnx2x_io_error_detected - called when PCI error is detected
10424  * @pdev: Pointer to PCI device
10425  * @state: The current pci connection state
10426  *
10427  * This function is called after a PCI bus error affecting
10428  * this device has been detected.
10429  */
10430 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10431                                                 pci_channel_state_t state)
10432 {
10433         struct net_device *dev = pci_get_drvdata(pdev);
10434         struct bnx2x *bp = netdev_priv(dev);
10435
10436         rtnl_lock();
10437
10438         netif_device_detach(dev);
10439
10440         if (netif_running(dev))
10441                 bnx2x_eeh_nic_unload(bp);
10442
10443         pci_disable_device(pdev);
10444
10445         rtnl_unlock();
10446
10447         /* Request a slot reset */
10448         return PCI_ERS_RESULT_NEED_RESET;
10449 }
10450
10451 /**
10452  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10453  * @pdev: Pointer to PCI device
10454  *
10455  * Restart the card from scratch, as if from a cold-boot.
10456  */
10457 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10458 {
10459         struct net_device *dev = pci_get_drvdata(pdev);
10460         struct bnx2x *bp = netdev_priv(dev);
10461
10462         rtnl_lock();
10463
10464         if (pci_enable_device(pdev)) {
10465                 dev_err(&pdev->dev,
10466                         "Cannot re-enable PCI device after reset\n");
10467                 rtnl_unlock();
10468                 return PCI_ERS_RESULT_DISCONNECT;
10469         }
10470
10471         pci_set_master(pdev);
10472         pci_restore_state(pdev);
10473
10474         if (netif_running(dev))
10475                 bnx2x_set_power_state(bp, PCI_D0);
10476
10477         rtnl_unlock();
10478
10479         return PCI_ERS_RESULT_RECOVERED;
10480 }
10481
10482 /**
10483  * bnx2x_io_resume - called when traffic can start flowing again
10484  * @pdev: Pointer to PCI device
10485  *
10486  * This callback is called when the error recovery driver tells us that
10487  * its OK to resume normal operation.
10488  */
10489 static void bnx2x_io_resume(struct pci_dev *pdev)
10490 {
10491         struct net_device *dev = pci_get_drvdata(pdev);
10492         struct bnx2x *bp = netdev_priv(dev);
10493
10494         rtnl_lock();
10495
10496         bnx2x_eeh_recover(bp);
10497
10498         if (netif_running(dev))
10499                 bnx2x_nic_load(bp, LOAD_NORMAL);
10500
10501         netif_device_attach(dev);
10502
10503         rtnl_unlock();
10504 }
10505
10506 static struct pci_error_handlers bnx2x_err_handler = {
10507         .error_detected = bnx2x_io_error_detected,
10508         .slot_reset = bnx2x_io_slot_reset,
10509         .resume = bnx2x_io_resume,
10510 };
10511
10512 static struct pci_driver bnx2x_pci_driver = {
10513         .name        = DRV_MODULE_NAME,
10514         .id_table    = bnx2x_pci_tbl,
10515         .probe       = bnx2x_init_one,
10516         .remove      = __devexit_p(bnx2x_remove_one),
10517         .suspend     = bnx2x_suspend,
10518         .resume      = bnx2x_resume,
10519         .err_handler = &bnx2x_err_handler,
10520 };
10521
10522 static int __init bnx2x_init(void)
10523 {
10524         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10525         if (bnx2x_wq == NULL) {
10526                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10527                 return -ENOMEM;
10528         }
10529
10530         return pci_register_driver(&bnx2x_pci_driver);
10531 }
10532
10533 static void __exit bnx2x_cleanup(void)
10534 {
10535         pci_unregister_driver(&bnx2x_pci_driver);
10536
10537         destroy_workqueue(bnx2x_wq);
10538 }
10539
10540 module_init(bnx2x_init);
10541 module_exit(bnx2x_cleanup);
10542