]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2x_main.c
bnx2x: Reset HW before use
[karo-tx-linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.24"
61 #define DRV_MODULE_RELDATE      "2009/01/14"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return ((fp->tx_pkt_prod != tx_cons_sb) ||
744                 (fp->tx_pkt_prod != fp->tx_pkt_cons));
745 }
746
747 /* free skb in the packet ring at pos idx
748  * return idx of last bd freed
749  */
750 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751                              u16 idx)
752 {
753         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754         struct eth_tx_bd *tx_bd;
755         struct sk_buff *skb = tx_buf->skb;
756         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
757         int nbd;
758
759         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
760            idx, tx_buf, skb);
761
762         /* unmap first bd */
763         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764         tx_bd = &fp->tx_desc_ring[bd_idx];
765         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
767
768         nbd = le16_to_cpu(tx_bd->nbd) - 1;
769         new_cons = nbd + tx_buf->first_bd;
770 #ifdef BNX2X_STOP_ON_ERROR
771         if (nbd > (MAX_SKB_FRAGS + 2)) {
772                 BNX2X_ERR("BAD nbd!\n");
773                 bnx2x_panic();
774         }
775 #endif
776
777         /* Skip a parse bd and the TSO split header bd
778            since they have no mapping */
779         if (nbd)
780                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781
782         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783                                            ETH_TX_BD_FLAGS_TCP_CSUM |
784                                            ETH_TX_BD_FLAGS_SW_LSO)) {
785                 if (--nbd)
786                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787                 tx_bd = &fp->tx_desc_ring[bd_idx];
788                 /* is this a TSO split header bd? */
789                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
790                         if (--nbd)
791                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792                 }
793         }
794
795         /* now free frags */
796         while (nbd > 0) {
797
798                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799                 tx_bd = &fp->tx_desc_ring[bd_idx];
800                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
802                 if (--nbd)
803                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
804         }
805
806         /* release skb */
807         WARN_ON(!skb);
808         dev_kfree_skb(skb);
809         tx_buf->first_bd = 0;
810         tx_buf->skb = NULL;
811
812         return new_cons;
813 }
814
815 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
816 {
817         s16 used;
818         u16 prod;
819         u16 cons;
820
821         barrier(); /* Tell compiler that prod and cons can change */
822         prod = fp->tx_bd_prod;
823         cons = fp->tx_bd_cons;
824
825         /* NUM_TX_RINGS = number of "next-page" entries
826            It will be used as a threshold */
827         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
828
829 #ifdef BNX2X_STOP_ON_ERROR
830         WARN_ON(used < 0);
831         WARN_ON(used > fp->bp->tx_ring_size);
832         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
833 #endif
834
835         return (s16)(fp->bp->tx_ring_size) - used;
836 }
837
838 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
839 {
840         struct bnx2x *bp = fp->bp;
841         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
842         int done = 0;
843
844 #ifdef BNX2X_STOP_ON_ERROR
845         if (unlikely(bp->panic))
846                 return;
847 #endif
848
849         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850         sw_cons = fp->tx_pkt_cons;
851
852         while (sw_cons != hw_cons) {
853                 u16 pkt_cons;
854
855                 pkt_cons = TX_BD(sw_cons);
856
857                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
858
859                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
860                    hw_cons, sw_cons, pkt_cons);
861
862 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
863                         rmb();
864                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
865                 }
866 */
867                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
868                 sw_cons++;
869                 done++;
870
871                 if (done == work)
872                         break;
873         }
874
875         fp->tx_pkt_cons = sw_cons;
876         fp->tx_bd_cons = bd_cons;
877
878         /* Need to make the tx_cons update visible to start_xmit()
879          * before checking for netif_queue_stopped().  Without the
880          * memory barrier, there is a small possibility that start_xmit()
881          * will miss it and cause the queue to be stopped forever.
882          */
883         smp_mb();
884
885         /* TBD need a thresh? */
886         if (unlikely(netif_queue_stopped(bp->dev))) {
887
888                 netif_tx_lock(bp->dev);
889
890                 if (netif_queue_stopped(bp->dev) &&
891                     (bp->state == BNX2X_STATE_OPEN) &&
892                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893                         netif_wake_queue(bp->dev);
894
895                 netif_tx_unlock(bp->dev);
896         }
897 }
898
899
900 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901                            union eth_rx_cqe *rr_cqe)
902 {
903         struct bnx2x *bp = fp->bp;
904         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
906
907         DP(BNX2X_MSG_SP,
908            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
909            FP_IDX(fp), cid, command, bp->state,
910            rr_cqe->ramrod_cqe.ramrod_type);
911
912         bp->spq_left++;
913
914         if (FP_IDX(fp)) {
915                 switch (command | fp->state) {
916                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917                                                 BNX2X_FP_STATE_OPENING):
918                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
919                            cid);
920                         fp->state = BNX2X_FP_STATE_OPEN;
921                         break;
922
923                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
925                            cid);
926                         fp->state = BNX2X_FP_STATE_HALTED;
927                         break;
928
929                 default:
930                         BNX2X_ERR("unexpected MC reply (%d)  "
931                                   "fp->state is %x\n", command, fp->state);
932                         break;
933                 }
934                 mb(); /* force bnx2x_wait_ramrod() to see the change */
935                 return;
936         }
937
938         switch (command | bp->state) {
939         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941                 bp->state = BNX2X_STATE_OPEN;
942                 break;
943
944         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947                 fp->state = BNX2X_FP_STATE_HALTED;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
952                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
953                 break;
954
955
956         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
957         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
958                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
959                 bp->set_mac_pending = 0;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
964                 break;
965
966         default:
967                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
968                           command, bp->state);
969                 break;
970         }
971         mb(); /* force bnx2x_wait_ramrod() to see the change */
972 }
973
974 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975                                      struct bnx2x_fastpath *fp, u16 index)
976 {
977         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978         struct page *page = sw_buf->page;
979         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
980
981         /* Skip "next page" elements */
982         if (!page)
983                 return;
984
985         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
986                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
987         __free_pages(page, PAGES_PER_SGE_SHIFT);
988
989         sw_buf->page = NULL;
990         sge->addr_hi = 0;
991         sge->addr_lo = 0;
992 }
993
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995                                            struct bnx2x_fastpath *fp, int last)
996 {
997         int i;
998
999         for (i = 0; i < last; i++)
1000                 bnx2x_free_rx_sge(bp, fp, i);
1001 }
1002
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004                                      struct bnx2x_fastpath *fp, u16 index)
1005 {
1006         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1009         dma_addr_t mapping;
1010
1011         if (unlikely(page == NULL))
1012                 return -ENOMEM;
1013
1014         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1015                                PCI_DMA_FROMDEVICE);
1016         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1017                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1018                 return -ENOMEM;
1019         }
1020
1021         sw_buf->page = page;
1022         pci_unmap_addr_set(sw_buf, mapping, mapping);
1023
1024         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1026
1027         return 0;
1028 }
1029
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sk_buff *skb;
1034         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1036         dma_addr_t mapping;
1037
1038         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039         if (unlikely(skb == NULL))
1040                 return -ENOMEM;
1041
1042         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1043                                  PCI_DMA_FROMDEVICE);
1044         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1045                 dev_kfree_skb(skb);
1046                 return -ENOMEM;
1047         }
1048
1049         rx_buf->skb = skb;
1050         pci_unmap_addr_set(rx_buf, mapping, mapping);
1051
1052         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1054
1055         return 0;
1056 }
1057
1058 /* note that we are not allocating a new skb,
1059  * we are just moving one from cons to prod
1060  * we are not creating a new mapping,
1061  * so there is no need to check for dma_mapping_error().
1062  */
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064                                struct sk_buff *skb, u16 cons, u16 prod)
1065 {
1066         struct bnx2x *bp = fp->bp;
1067         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1071
1072         pci_dma_sync_single_for_device(bp->pdev,
1073                                        pci_unmap_addr(cons_rx_buf, mapping),
1074                                        bp->rx_offset + RX_COPY_THRESH,
1075                                        PCI_DMA_FROMDEVICE);
1076
1077         prod_rx_buf->skb = cons_rx_buf->skb;
1078         pci_unmap_addr_set(prod_rx_buf, mapping,
1079                            pci_unmap_addr(cons_rx_buf, mapping));
1080         *prod_bd = *cons_bd;
1081 }
1082
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1084                                              u16 idx)
1085 {
1086         u16 last_max = fp->last_max_sge;
1087
1088         if (SUB_S16(idx, last_max) > 0)
1089                 fp->last_max_sge = idx;
1090 }
1091
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1093 {
1094         int i, j;
1095
1096         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097                 int idx = RX_SGE_CNT * i - 1;
1098
1099                 for (j = 0; j < 2; j++) {
1100                         SGE_MASK_CLEAR_BIT(fp, idx);
1101                         idx--;
1102                 }
1103         }
1104 }
1105
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107                                   struct eth_fast_path_rx_cqe *fp_cqe)
1108 {
1109         struct bnx2x *bp = fp->bp;
1110         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1111                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1112                       SGE_PAGE_SHIFT;
1113         u16 last_max, last_elem, first_elem;
1114         u16 delta = 0;
1115         u16 i;
1116
1117         if (!sge_len)
1118                 return;
1119
1120         /* First mark all used pages */
1121         for (i = 0; i < sge_len; i++)
1122                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1123
1124         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1126
1127         /* Here we assume that the last SGE index is the biggest */
1128         prefetch((void *)(fp->sge_mask));
1129         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1130
1131         last_max = RX_SGE(fp->last_max_sge);
1132         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1134
1135         /* If ring is not full */
1136         if (last_elem + 1 != first_elem)
1137                 last_elem++;
1138
1139         /* Now update the prod */
1140         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141                 if (likely(fp->sge_mask[i]))
1142                         break;
1143
1144                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145                 delta += RX_SGE_MASK_ELEM_SZ;
1146         }
1147
1148         if (delta > 0) {
1149                 fp->rx_sge_prod += delta;
1150                 /* clear page-end entries */
1151                 bnx2x_clear_sge_mask_next_elems(fp);
1152         }
1153
1154         DP(NETIF_MSG_RX_STATUS,
1155            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1156            fp->last_max_sge, fp->rx_sge_prod);
1157 }
1158
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1160 {
1161         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162         memset(fp->sge_mask, 0xff,
1163                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1164
1165         /* Clear the two last indices in the page to 1:
1166            these are the indices that correspond to the "next" element,
1167            hence will never be indicated and should be removed from
1168            the calculations. */
1169         bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173                             struct sk_buff *skb, u16 cons, u16 prod)
1174 {
1175         struct bnx2x *bp = fp->bp;
1176         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1179         dma_addr_t mapping;
1180
1181         /* move empty skb from pool to prod and map it */
1182         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1184                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1185         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1186
1187         /* move partial skb from cons to pool (don't unmap yet) */
1188         fp->tpa_pool[queue] = *cons_rx_buf;
1189
1190         /* mark bin state as start - print error if current state != stop */
1191         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1193
1194         fp->tpa_state[queue] = BNX2X_TPA_START;
1195
1196         /* point prod_bd to new skb */
1197         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 #ifdef BNX2X_STOP_ON_ERROR
1201         fp->tpa_queue_used |= (1 << queue);
1202 #ifdef __powerpc64__
1203         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1204 #else
1205         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1206 #endif
1207            fp->tpa_queue_used);
1208 #endif
1209 }
1210
1211 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212                                struct sk_buff *skb,
1213                                struct eth_fast_path_rx_cqe *fp_cqe,
1214                                u16 cqe_idx)
1215 {
1216         struct sw_rx_page *rx_pg, old_rx_pg;
1217         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218         u32 i, frag_len, frag_size, pages;
1219         int err;
1220         int j;
1221
1222         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1223         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1224
1225         /* This is needed in order to enable forwarding support */
1226         if (frag_size)
1227                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1228                                                max(frag_size, (u32)len_on_bd));
1229
1230 #ifdef BNX2X_STOP_ON_ERROR
1231         if (pages >
1232             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1233                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1234                           pages, cqe_idx);
1235                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1236                           fp_cqe->pkt_len, len_on_bd);
1237                 bnx2x_panic();
1238                 return -EINVAL;
1239         }
1240 #endif
1241
1242         /* Run through the SGL and compose the fragmented skb */
1243         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1245
1246                 /* FW gives the indices of the SGE as if the ring is an array
1247                    (meaning that "next" element will consume 2 indices) */
1248                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1249                 rx_pg = &fp->rx_page_ring[sge_idx];
1250                 old_rx_pg = *rx_pg;
1251
1252                 /* If we fail to allocate a substitute page, we simply stop
1253                    where we are and drop the whole packet */
1254                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255                 if (unlikely(err)) {
1256                         bp->eth_stats.rx_skb_alloc_failed++;
1257                         return err;
1258                 }
1259
1260                 /* Unmap the page as we r going to pass it to the stack */
1261                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1262                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1263
1264                 /* Add one frag and update the appropriate fields in the skb */
1265                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1266
1267                 skb->data_len += frag_len;
1268                 skb->truesize += frag_len;
1269                 skb->len += frag_len;
1270
1271                 frag_size -= frag_len;
1272         }
1273
1274         return 0;
1275 }
1276
1277 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1279                            u16 cqe_idx)
1280 {
1281         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282         struct sk_buff *skb = rx_buf->skb;
1283         /* alloc new skb */
1284         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1285
1286         /* Unmap skb in the pool anyway, as we are going to change
1287            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1288            fails. */
1289         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1290                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1291
1292         if (likely(new_skb)) {
1293                 /* fix ip xsum and give it to the stack */
1294                 /* (no need to map the new skb) */
1295 #ifdef BCM_VLAN
1296                 int is_vlan_cqe =
1297                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298                          PARSING_FLAGS_VLAN);
1299                 int is_not_hwaccel_vlan_cqe =
1300                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1301 #endif
1302
1303                 prefetch(skb);
1304                 prefetch(((char *)(skb)) + 128);
1305
1306 #ifdef BNX2X_STOP_ON_ERROR
1307                 if (pad + len > bp->rx_buf_size) {
1308                         BNX2X_ERR("skb_put is about to fail...  "
1309                                   "pad %d  len %d  rx_buf_size %d\n",
1310                                   pad, len, bp->rx_buf_size);
1311                         bnx2x_panic();
1312                         return;
1313                 }
1314 #endif
1315
1316                 skb_reserve(skb, pad);
1317                 skb_put(skb, len);
1318
1319                 skb->protocol = eth_type_trans(skb, bp->dev);
1320                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322                 {
1323                         struct iphdr *iph;
1324
1325                         iph = (struct iphdr *)skb->data;
1326 #ifdef BCM_VLAN
1327                         /* If there is no Rx VLAN offloading -
1328                            take VLAN tag into an account */
1329                         if (unlikely(is_not_hwaccel_vlan_cqe))
1330                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1331 #endif
1332                         iph->check = 0;
1333                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1334                 }
1335
1336                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337                                          &cqe->fast_path_cqe, cqe_idx)) {
1338 #ifdef BCM_VLAN
1339                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340                             (!is_not_hwaccel_vlan_cqe))
1341                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342                                                 le16_to_cpu(cqe->fast_path_cqe.
1343                                                             vlan_tag));
1344                         else
1345 #endif
1346                                 netif_receive_skb(skb);
1347                 } else {
1348                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349                            " - dropping packet!\n");
1350                         dev_kfree_skb(skb);
1351                 }
1352
1353
1354                 /* put new skb in bin */
1355                 fp->tpa_pool[queue].skb = new_skb;
1356
1357         } else {
1358                 /* else drop the packet and keep the buffer in the bin */
1359                 DP(NETIF_MSG_RX_STATUS,
1360                    "Failed to allocate new skb - dropping packet!\n");
1361                 bp->eth_stats.rx_skb_alloc_failed++;
1362         }
1363
1364         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 }
1366
1367 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368                                         struct bnx2x_fastpath *fp,
1369                                         u16 bd_prod, u16 rx_comp_prod,
1370                                         u16 rx_sge_prod)
1371 {
1372         struct tstorm_eth_rx_producers rx_prods = {0};
1373         int i;
1374
1375         /* Update producers */
1376         rx_prods.bd_prod = bd_prod;
1377         rx_prods.cqe_prod = rx_comp_prod;
1378         rx_prods.sge_prod = rx_sge_prod;
1379
1380         /*
1381          * Make sure that the BD and SGE data is updated before updating the
1382          * producers since FW might read the BD/SGE right after the producer
1383          * is updated.
1384          * This is only applicable for weak-ordered memory model archs such
1385          * as IA-64. The following barrier is also mandatory since FW will
1386          * assumes BDs must have buffers.
1387          */
1388         wmb();
1389
1390         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393                        ((u32 *)&rx_prods)[i]);
1394
1395         mmiowb(); /* keep prod updates ordered */
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1399            bd_prod, rx_comp_prod, rx_sge_prod);
1400 }
1401
1402 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1403 {
1404         struct bnx2x *bp = fp->bp;
1405         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1406         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1407         int rx_pkt = 0;
1408
1409 #ifdef BNX2X_STOP_ON_ERROR
1410         if (unlikely(bp->panic))
1411                 return 0;
1412 #endif
1413
1414         /* CQ "next element" is of the size of the regular element,
1415            that's why it's ok here */
1416         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1418                 hw_comp_cons++;
1419
1420         bd_cons = fp->rx_bd_cons;
1421         bd_prod = fp->rx_bd_prod;
1422         bd_prod_fw = bd_prod;
1423         sw_comp_cons = fp->rx_comp_cons;
1424         sw_comp_prod = fp->rx_comp_prod;
1425
1426         /* Memory barrier necessary as speculative reads of the rx
1427          * buffer can be ahead of the index in the status block
1428          */
1429         rmb();
1430
1431         DP(NETIF_MSG_RX_STATUS,
1432            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1433            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1434
1435         while (sw_comp_cons != hw_comp_cons) {
1436                 struct sw_rx_bd *rx_buf = NULL;
1437                 struct sk_buff *skb;
1438                 union eth_rx_cqe *cqe;
1439                 u8 cqe_fp_flags;
1440                 u16 len, pad;
1441
1442                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443                 bd_prod = RX_BD(bd_prod);
1444                 bd_cons = RX_BD(bd_cons);
1445
1446                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1447                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1448
1449                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1450                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1451                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1452                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1453                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1455
1456                 /* is this a slowpath msg? */
1457                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1458                         bnx2x_sp_event(fp, cqe);
1459                         goto next_cqe;
1460
1461                 /* this is an rx packet */
1462                 } else {
1463                         rx_buf = &fp->rx_buf_ring[bd_cons];
1464                         skb = rx_buf->skb;
1465                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466                         pad = cqe->fast_path_cqe.placement_offset;
1467
1468                         /* If CQE is marked both TPA_START and TPA_END
1469                            it is a non-TPA CQE */
1470                         if ((!fp->disable_tpa) &&
1471                             (TPA_TYPE(cqe_fp_flags) !=
1472                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1473                                 u16 queue = cqe->fast_path_cqe.queue_index;
1474
1475                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476                                         DP(NETIF_MSG_RX_STATUS,
1477                                            "calling tpa_start on queue %d\n",
1478                                            queue);
1479
1480                                         bnx2x_tpa_start(fp, queue, skb,
1481                                                         bd_cons, bd_prod);
1482                                         goto next_rx;
1483                                 }
1484
1485                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486                                         DP(NETIF_MSG_RX_STATUS,
1487                                            "calling tpa_stop on queue %d\n",
1488                                            queue);
1489
1490                                         if (!BNX2X_RX_SUM_FIX(cqe))
1491                                                 BNX2X_ERR("STOP on none TCP "
1492                                                           "data\n");
1493
1494                                         /* This is a size of the linear data
1495                                            on this skb */
1496                                         len = le16_to_cpu(cqe->fast_path_cqe.
1497                                                                 len_on_bd);
1498                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1499                                                     len, cqe, comp_ring_cons);
1500 #ifdef BNX2X_STOP_ON_ERROR
1501                                         if (bp->panic)
1502                                                 return -EINVAL;
1503 #endif
1504
1505                                         bnx2x_update_sge_prod(fp,
1506                                                         &cqe->fast_path_cqe);
1507                                         goto next_cqe;
1508                                 }
1509                         }
1510
1511                         pci_dma_sync_single_for_device(bp->pdev,
1512                                         pci_unmap_addr(rx_buf, mapping),
1513                                                        pad + RX_COPY_THRESH,
1514                                                        PCI_DMA_FROMDEVICE);
1515                         prefetch(skb);
1516                         prefetch(((char *)(skb)) + 128);
1517
1518                         /* is this an error packet? */
1519                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1520                                 DP(NETIF_MSG_RX_ERR,
1521                                    "ERROR  flags %x  rx packet %u\n",
1522                                    cqe_fp_flags, sw_comp_cons);
1523                                 bp->eth_stats.rx_err_discard_pkt++;
1524                                 goto reuse_rx;
1525                         }
1526
1527                         /* Since we don't have a jumbo ring
1528                          * copy small packets if mtu > 1500
1529                          */
1530                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531                             (len <= RX_COPY_THRESH)) {
1532                                 struct sk_buff *new_skb;
1533
1534                                 new_skb = netdev_alloc_skb(bp->dev,
1535                                                            len + pad);
1536                                 if (new_skb == NULL) {
1537                                         DP(NETIF_MSG_RX_ERR,
1538                                            "ERROR  packet dropped "
1539                                            "because of alloc failure\n");
1540                                         bp->eth_stats.rx_skb_alloc_failed++;
1541                                         goto reuse_rx;
1542                                 }
1543
1544                                 /* aligned copy */
1545                                 skb_copy_from_linear_data_offset(skb, pad,
1546                                                     new_skb->data + pad, len);
1547                                 skb_reserve(new_skb, pad);
1548                                 skb_put(new_skb, len);
1549
1550                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551
1552                                 skb = new_skb;
1553
1554                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555                                 pci_unmap_single(bp->pdev,
1556                                         pci_unmap_addr(rx_buf, mapping),
1557                                                  bp->rx_buf_size,
1558                                                  PCI_DMA_FROMDEVICE);
1559                                 skb_reserve(skb, pad);
1560                                 skb_put(skb, len);
1561
1562                         } else {
1563                                 DP(NETIF_MSG_RX_ERR,
1564                                    "ERROR  packet dropped because "
1565                                    "of alloc failure\n");
1566                                 bp->eth_stats.rx_skb_alloc_failed++;
1567 reuse_rx:
1568                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569                                 goto next_rx;
1570                         }
1571
1572                         skb->protocol = eth_type_trans(skb, bp->dev);
1573
1574                         skb->ip_summed = CHECKSUM_NONE;
1575                         if (bp->rx_csum) {
1576                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                                 else
1579                                         bp->eth_stats.hw_csum_err++;
1580                         }
1581                 }
1582
1583 #ifdef BCM_VLAN
1584                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1585                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586                      PARSING_FLAGS_VLAN))
1587                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1589                 else
1590 #endif
1591                         netif_receive_skb(skb);
1592
1593
1594 next_rx:
1595                 rx_buf->skb = NULL;
1596
1597                 bd_cons = NEXT_RX_IDX(bd_cons);
1598                 bd_prod = NEXT_RX_IDX(bd_prod);
1599                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1600                 rx_pkt++;
1601 next_cqe:
1602                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1604
1605                 if (rx_pkt == budget)
1606                         break;
1607         } /* while */
1608
1609         fp->rx_bd_cons = bd_cons;
1610         fp->rx_bd_prod = bd_prod_fw;
1611         fp->rx_comp_cons = sw_comp_cons;
1612         fp->rx_comp_prod = sw_comp_prod;
1613
1614         /* Update producers */
1615         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1616                              fp->rx_sge_prod);
1617
1618         fp->rx_pkt += rx_pkt;
1619         fp->rx_calls++;
1620
1621         return rx_pkt;
1622 }
1623
1624 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1625 {
1626         struct bnx2x_fastpath *fp = fp_cookie;
1627         struct bnx2x *bp = fp->bp;
1628         int index = FP_IDX(fp);
1629
1630         /* Return here if interrupt is disabled */
1631         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1633                 return IRQ_HANDLED;
1634         }
1635
1636         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637            index, FP_SB_ID(fp));
1638         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1639
1640 #ifdef BNX2X_STOP_ON_ERROR
1641         if (unlikely(bp->panic))
1642                 return IRQ_HANDLED;
1643 #endif
1644
1645         prefetch(fp->rx_cons_sb);
1646         prefetch(fp->tx_cons_sb);
1647         prefetch(&fp->status_blk->c_status_block.status_block_index);
1648         prefetch(&fp->status_blk->u_status_block.status_block_index);
1649
1650         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1651
1652         return IRQ_HANDLED;
1653 }
1654
1655 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656 {
1657         struct net_device *dev = dev_instance;
1658         struct bnx2x *bp = netdev_priv(dev);
1659         u16 status = bnx2x_ack_int(bp);
1660         u16 mask;
1661
1662         /* Return here if interrupt is shared and it's not for us */
1663         if (unlikely(status == 0)) {
1664                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1665                 return IRQ_NONE;
1666         }
1667         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1668
1669         /* Return here if interrupt is disabled */
1670         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672                 return IRQ_HANDLED;
1673         }
1674
1675 #ifdef BNX2X_STOP_ON_ERROR
1676         if (unlikely(bp->panic))
1677                 return IRQ_HANDLED;
1678 #endif
1679
1680         mask = 0x2 << bp->fp[0].sb_id;
1681         if (status & mask) {
1682                 struct bnx2x_fastpath *fp = &bp->fp[0];
1683
1684                 prefetch(fp->rx_cons_sb);
1685                 prefetch(fp->tx_cons_sb);
1686                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
1689                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1690
1691                 status &= ~mask;
1692         }
1693
1694
1695         if (unlikely(status & 0x1)) {
1696                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1697
1698                 status &= ~0x1;
1699                 if (!status)
1700                         return IRQ_HANDLED;
1701         }
1702
1703         if (status)
1704                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1705                    status);
1706
1707         return IRQ_HANDLED;
1708 }
1709
1710 /* end of fast path */
1711
1712 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1713
1714 /* Link */
1715
1716 /*
1717  * General service functions
1718  */
1719
1720 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1721 {
1722         u32 lock_status;
1723         u32 resource_bit = (1 << resource);
1724         int func = BP_FUNC(bp);
1725         u32 hw_lock_control_reg;
1726         int cnt;
1727
1728         /* Validating that the resource is within range */
1729         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1730                 DP(NETIF_MSG_HW,
1731                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1733                 return -EINVAL;
1734         }
1735
1736         if (func <= 5) {
1737                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1738         } else {
1739                 hw_lock_control_reg =
1740                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1741         }
1742
1743         /* Validating that the resource is not already taken */
1744         lock_status = REG_RD(bp, hw_lock_control_reg);
1745         if (lock_status & resource_bit) {
1746                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1747                    lock_status, resource_bit);
1748                 return -EEXIST;
1749         }
1750
1751         /* Try for 5 second every 5ms */
1752         for (cnt = 0; cnt < 1000; cnt++) {
1753                 /* Try to acquire the lock */
1754                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755                 lock_status = REG_RD(bp, hw_lock_control_reg);
1756                 if (lock_status & resource_bit)
1757                         return 0;
1758
1759                 msleep(5);
1760         }
1761         DP(NETIF_MSG_HW, "Timeout\n");
1762         return -EAGAIN;
1763 }
1764
1765 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1766 {
1767         u32 lock_status;
1768         u32 resource_bit = (1 << resource);
1769         int func = BP_FUNC(bp);
1770         u32 hw_lock_control_reg;
1771
1772         /* Validating that the resource is within range */
1773         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1774                 DP(NETIF_MSG_HW,
1775                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777                 return -EINVAL;
1778         }
1779
1780         if (func <= 5) {
1781                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1782         } else {
1783                 hw_lock_control_reg =
1784                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1785         }
1786
1787         /* Validating that the resource is currently taken */
1788         lock_status = REG_RD(bp, hw_lock_control_reg);
1789         if (!(lock_status & resource_bit)) {
1790                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1791                    lock_status, resource_bit);
1792                 return -EFAULT;
1793         }
1794
1795         REG_WR(bp, hw_lock_control_reg, resource_bit);
1796         return 0;
1797 }
1798
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         mutex_lock(&bp->port.phy_mutex);
1805
1806         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1808                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1809 }
1810
1811 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1812 {
1813         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1814
1815         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1817                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1818
1819         mutex_unlock(&bp->port.phy_mutex);
1820 }
1821
1822 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1823 {
1824         /* The GPIO should be swapped if swap register is set and active */
1825         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1826                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1827         int gpio_shift = gpio_num +
1828                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829         u32 gpio_mask = (1 << gpio_shift);
1830         u32 gpio_reg;
1831
1832         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1834                 return -EINVAL;
1835         }
1836
1837         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838         /* read GPIO and mask except the float bits */
1839         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1840
1841         switch (mode) {
1842         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844                    gpio_num, gpio_shift);
1845                 /* clear FLOAT and set CLR */
1846                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1848                 break;
1849
1850         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852                    gpio_num, gpio_shift);
1853                 /* clear FLOAT and set SET */
1854                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1856                 break;
1857
1858         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1859                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860                    gpio_num, gpio_shift);
1861                 /* set FLOAT */
1862                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863                 break;
1864
1865         default:
1866                 break;
1867         }
1868
1869         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1870         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1871
1872         return 0;
1873 }
1874
1875 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1876 {
1877         u32 spio_mask = (1 << spio_num);
1878         u32 spio_reg;
1879
1880         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881             (spio_num > MISC_REGISTERS_SPIO_7)) {
1882                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1883                 return -EINVAL;
1884         }
1885
1886         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887         /* read SPIO and mask except the float bits */
1888         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1889
1890         switch (mode) {
1891         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1892                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893                 /* clear FLOAT and set CLR */
1894                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1896                 break;
1897
1898         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1899                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900                 /* clear FLOAT and set SET */
1901                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1903                 break;
1904
1905         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1907                 /* set FLOAT */
1908                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909                 break;
1910
1911         default:
1912                 break;
1913         }
1914
1915         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1916         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1917
1918         return 0;
1919 }
1920
1921 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1922 {
1923         switch (bp->link_vars.ieee_fc &
1924                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1925         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1930                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1931                                          ADVERTISED_Pause);
1932                 break;
1933         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1934                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1935                 break;
1936         default:
1937                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1938                                           ADVERTISED_Pause);
1939                 break;
1940         }
1941 }
1942
1943 static void bnx2x_link_report(struct bnx2x *bp)
1944 {
1945         if (bp->link_vars.link_up) {
1946                 if (bp->state == BNX2X_STATE_OPEN)
1947                         netif_carrier_on(bp->dev);
1948                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1949
1950                 printk("%d Mbps ", bp->link_vars.line_speed);
1951
1952                 if (bp->link_vars.duplex == DUPLEX_FULL)
1953                         printk("full duplex");
1954                 else
1955                         printk("half duplex");
1956
1957                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1959                                 printk(", receive ");
1960                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1961                                         printk("& transmit ");
1962                         } else {
1963                                 printk(", transmit ");
1964                         }
1965                         printk("flow control ON");
1966                 }
1967                 printk("\n");
1968
1969         } else { /* link_down */
1970                 netif_carrier_off(bp->dev);
1971                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1972         }
1973 }
1974
1975 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1976 {
1977         if (!BP_NOMCP(bp)) {
1978                 u8 rc;
1979
1980                 /* Initialize link parameters structure variables */
1981                 /* It is recommended to turn off RX FC for jumbo frames
1982                    for better performance */
1983                 if (IS_E1HMF(bp))
1984                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1985                 else if (bp->dev->mtu > 5000)
1986                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1987                 else
1988                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1989
1990                 bnx2x_acquire_phy_lock(bp);
1991                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1992                 bnx2x_release_phy_lock(bp);
1993
1994                 bnx2x_calc_fc_adv(bp);
1995
1996                 if (bp->link_vars.link_up)
1997                         bnx2x_link_report(bp);
1998
1999
2000                 return rc;
2001         }
2002         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2003         return -EINVAL;
2004 }
2005
2006 static void bnx2x_link_set(struct bnx2x *bp)
2007 {
2008         if (!BP_NOMCP(bp)) {
2009                 bnx2x_acquire_phy_lock(bp);
2010                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2011                 bnx2x_release_phy_lock(bp);
2012
2013                 bnx2x_calc_fc_adv(bp);
2014         } else
2015                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2016 }
2017
2018 static void bnx2x__link_reset(struct bnx2x *bp)
2019 {
2020         if (!BP_NOMCP(bp)) {
2021                 bnx2x_acquire_phy_lock(bp);
2022                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2023                 bnx2x_release_phy_lock(bp);
2024         } else
2025                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2026 }
2027
2028 static u8 bnx2x_link_test(struct bnx2x *bp)
2029 {
2030         u8 rc;
2031
2032         bnx2x_acquire_phy_lock(bp);
2033         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2034         bnx2x_release_phy_lock(bp);
2035
2036         return rc;
2037 }
2038
2039 /* Calculates the sum of vn_min_rates.
2040    It's needed for further normalizing of the min_rates.
2041
2042    Returns:
2043      sum of vn_min_rates
2044        or
2045      0 - if all the min_rates are 0.
2046      In the later case fairness algorithm should be deactivated.
2047      If not all min_rates are zero then those that are zeroes will
2048      be set to 1.
2049  */
2050 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2051 {
2052         int i, port = BP_PORT(bp);
2053         u32 wsum = 0;
2054         int all_zero = 1;
2055
2056         for (i = 0; i < E1HVN_MAX; i++) {
2057                 u32 vn_cfg =
2058                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062                         /* If min rate is zero - set it to 1 */
2063                         if (!vn_min_rate)
2064                                 vn_min_rate = DEF_MIN_RATE;
2065                         else
2066                                 all_zero = 0;
2067
2068                         wsum += vn_min_rate;
2069                 }
2070         }
2071
2072         /* ... only if all min rates are zeros - disable FAIRNESS */
2073         if (all_zero)
2074                 return 0;
2075
2076         return wsum;
2077 }
2078
2079 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2080                                    int en_fness,
2081                                    u16 port_rate,
2082                                    struct cmng_struct_per_port *m_cmng_port)
2083 {
2084         u32 r_param = port_rate / 8;
2085         int port = BP_PORT(bp);
2086         int i;
2087
2088         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2089
2090         /* Enable minmax only if we are in e1hmf mode */
2091         if (IS_E1HMF(bp)) {
2092                 u32 fair_periodic_timeout_usec;
2093                 u32 t_fair;
2094
2095                 /* Enable rate shaping and fairness */
2096                 m_cmng_port->flags.cmng_vn_enable = 1;
2097                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098                 m_cmng_port->flags.rate_shaping_enable = 1;
2099
2100                 if (!en_fness)
2101                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102                            "  fairness will be disabled\n");
2103
2104                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105                 m_cmng_port->rs_vars.rs_periodic_timeout =
2106                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2107
2108                 /* this is the threshold below which no timer arming will occur
2109                    1.25 coefficient is for the threshold to be a little bigger
2110                    than the real time, to compensate for timer in-accuracy */
2111                 m_cmng_port->rs_vars.rs_threshold =
2112                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2113
2114                 /* resolution of fairness timer */
2115                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117                 t_fair = T_FAIR_COEF / port_rate;
2118
2119                 /* this is the threshold below which we won't arm
2120                    the timer anymore */
2121                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2122
2123                 /* we multiply by 1e3/8 to get bytes/msec.
2124                    We don't want the credits to pass a credit
2125                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126                 m_cmng_port->fair_vars.upper_bound =
2127                                                 r_param * t_fair * FAIR_MEM;
2128                 /* since each tick is 4 usec */
2129                 m_cmng_port->fair_vars.fairness_timeout =
2130                                                 fair_periodic_timeout_usec / 4;
2131
2132         } else {
2133                 /* Disable rate shaping and fairness */
2134                 m_cmng_port->flags.cmng_vn_enable = 0;
2135                 m_cmng_port->flags.fairness_enable = 0;
2136                 m_cmng_port->flags.rate_shaping_enable = 0;
2137
2138                 DP(NETIF_MSG_IFUP,
2139                    "Single function mode  minmax will be disabled\n");
2140         }
2141
2142         /* Store it to internal memory */
2143         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146                        ((u32 *)(m_cmng_port))[i]);
2147 }
2148
2149 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150                                    u32 wsum, u16 port_rate,
2151                                  struct cmng_struct_per_port *m_cmng_port)
2152 {
2153         struct rate_shaping_vars_per_vn m_rs_vn;
2154         struct fairness_vars_per_vn m_fair_vn;
2155         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156         u16 vn_min_rate, vn_max_rate;
2157         int i;
2158
2159         /* If function is hidden - set min and max to zeroes */
2160         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2161                 vn_min_rate = 0;
2162                 vn_max_rate = 0;
2163
2164         } else {
2165                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168                    if current min rate is zero - set it to 1.
2169                    This is a requirement of the algorithm. */
2170                 if ((vn_min_rate == 0) && wsum)
2171                         vn_min_rate = DEF_MIN_RATE;
2172                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2174         }
2175
2176         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2177            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2178
2179         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2181
2182         /* global vn counter - maximal Mbps for this vn */
2183         m_rs_vn.vn_counter.rate = vn_max_rate;
2184
2185         /* quota - number of bytes transmitted in this period */
2186         m_rs_vn.vn_counter.quota =
2187                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2188
2189 #ifdef BNX2X_PER_PROT_QOS
2190         /* per protocol counter */
2191         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192                 /* maximal Mbps for this protocol */
2193                 m_rs_vn.protocol_counters[protocol].rate =
2194                                                 protocol_max_rate[protocol];
2195                 /* the quota in each timer period -
2196                    number of bytes transmitted in this period */
2197                 m_rs_vn.protocol_counters[protocol].quota =
2198                         (u32)(rs_periodic_timeout_usec *
2199                           ((double)m_rs_vn.
2200                                    protocol_counters[protocol].rate/8));
2201         }
2202 #endif
2203
2204         if (wsum) {
2205                 /* credit for each period of the fairness algorithm:
2206                    number of bytes in T_FAIR (the vn share the port rate).
2207                    wsum should not be larger than 10000, thus
2208                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209                 m_fair_vn.vn_credit_delta =
2210                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213                    m_fair_vn.vn_credit_delta);
2214         }
2215
2216 #ifdef BNX2X_PER_PROT_QOS
2217         do {
2218                 u32 protocolWeightSum = 0;
2219
2220                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221                         protocolWeightSum +=
2222                                         drvInit.protocol_min_rate[protocol];
2223                 /* per protocol counter -
2224                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225                 if (protocolWeightSum > 0) {
2226                         for (protocol = 0;
2227                              protocol < NUM_OF_PROTOCOLS; protocol++)
2228                                 /* credit for each period of the
2229                                    fairness algorithm - number of bytes in
2230                                    T_FAIR (the protocol share the vn rate) */
2231                                 m_fair_vn.protocol_credit_delta[protocol] =
2232                                         (u32)((vn_min_rate / 8) * t_fair *
2233                                         protocol_min_rate / protocolWeightSum);
2234                 }
2235         } while (0);
2236 #endif
2237
2238         /* Store it to internal memory */
2239         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242                        ((u32 *)(&m_rs_vn))[i]);
2243
2244         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247                        ((u32 *)(&m_fair_vn))[i]);
2248 }
2249
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x *bp)
2252 {
2253         int vn;
2254
2255         /* Make sure that we are synced with the current statistics */
2256         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2257
2258         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2259
2260         if (bp->link_vars.link_up) {
2261
2262                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263                         struct host_port_stats *pstats;
2264
2265                         pstats = bnx2x_sp(bp, port_stats);
2266                         /* reset old bmac stats */
2267                         memset(&(pstats->mac_stx[0]), 0,
2268                                sizeof(struct mac_stx));
2269                 }
2270                 if ((bp->state == BNX2X_STATE_OPEN) ||
2271                     (bp->state == BNX2X_STATE_DISABLED))
2272                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2273         }
2274
2275         /* indicate link status */
2276         bnx2x_link_report(bp);
2277
2278         if (IS_E1HMF(bp)) {
2279                 int func;
2280
2281                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282                         if (vn == BP_E1HVN(bp))
2283                                 continue;
2284
2285                         func = ((vn << 1) | BP_PORT(bp));
2286
2287                         /* Set the attention towards other drivers
2288                            on the same port */
2289                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2291                 }
2292         }
2293
2294         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295                 struct cmng_struct_per_port m_cmng_port;
2296                 u32 wsum;
2297                 int port = BP_PORT(bp);
2298
2299                 /* Init RATE SHAPING and FAIRNESS contexts */
2300                 wsum = bnx2x_calc_vn_wsum(bp);
2301                 bnx2x_init_port_minmax(bp, (int)wsum,
2302                                         bp->link_vars.line_speed,
2303                                         &m_cmng_port);
2304                 if (IS_E1HMF(bp))
2305                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307                                         wsum, bp->link_vars.line_speed,
2308                                                      &m_cmng_port);
2309         }
2310 }
2311
2312 static void bnx2x__link_status_update(struct bnx2x *bp)
2313 {
2314         if (bp->state != BNX2X_STATE_OPEN)
2315                 return;
2316
2317         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2318
2319         if (bp->link_vars.link_up)
2320                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2321         else
2322                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2323
2324         /* indicate link status */
2325         bnx2x_link_report(bp);
2326 }
2327
2328 static void bnx2x_pmf_update(struct bnx2x *bp)
2329 {
2330         int port = BP_PORT(bp);
2331         u32 val;
2332
2333         bp->port.pmf = 1;
2334         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2335
2336         /* enable nig attention */
2337         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2340
2341         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2342 }
2343
2344 /* end of Link */
2345
2346 /* slow path */
2347
2348 /*
2349  * General service functions
2350  */
2351
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354                          u32 data_hi, u32 data_lo, int common)
2355 {
2356         int func = BP_FUNC(bp);
2357
2358         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2360            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2363
2364 #ifdef BNX2X_STOP_ON_ERROR
2365         if (unlikely(bp->panic))
2366                 return -EIO;
2367 #endif
2368
2369         spin_lock_bh(&bp->spq_lock);
2370
2371         if (!bp->spq_left) {
2372                 BNX2X_ERR("BUG! SPQ ring full!\n");
2373                 spin_unlock_bh(&bp->spq_lock);
2374                 bnx2x_panic();
2375                 return -EBUSY;
2376         }
2377
2378         /* CID needs port number to be encoded int it */
2379         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2381                                      HW_CID(bp, cid)));
2382         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2383         if (common)
2384                 bp->spq_prod_bd->hdr.type |=
2385                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2386
2387         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2389
2390         bp->spq_left--;
2391
2392         if (bp->spq_prod_bd == bp->spq_last_bd) {
2393                 bp->spq_prod_bd = bp->spq;
2394                 bp->spq_prod_idx = 0;
2395                 DP(NETIF_MSG_TIMER, "end of spq\n");
2396
2397         } else {
2398                 bp->spq_prod_bd++;
2399                 bp->spq_prod_idx++;
2400         }
2401
2402         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2403                bp->spq_prod_idx);
2404
2405         spin_unlock_bh(&bp->spq_lock);
2406         return 0;
2407 }
2408
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x *bp)
2411 {
2412         u32 i, j, val;
2413         int rc = 0;
2414
2415         might_sleep();
2416         i = 100;
2417         for (j = 0; j < i*10; j++) {
2418                 val = (1UL << 31);
2419                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421                 if (val & (1L << 31))
2422                         break;
2423
2424                 msleep(5);
2425         }
2426         if (!(val & (1L << 31))) {
2427                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2428                 rc = -EBUSY;
2429         }
2430
2431         return rc;
2432 }
2433
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x *bp)
2436 {
2437         u32 val = 0;
2438
2439         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2440 }
2441
2442 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2443 {
2444         struct host_def_status_block *def_sb = bp->def_status_blk;
2445         u16 rc = 0;
2446
2447         barrier(); /* status block is written to by the chip */
2448         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2450                 rc |= 1;
2451         }
2452         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2454                 rc |= 2;
2455         }
2456         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2458                 rc |= 4;
2459         }
2460         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2462                 rc |= 8;
2463         }
2464         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2466                 rc |= 16;
2467         }
2468         return rc;
2469 }
2470
2471 /*
2472  * slow path service functions
2473  */
2474
2475 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2476 {
2477         int port = BP_PORT(bp);
2478         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479                        COMMAND_REG_ATTN_BITS_SET);
2480         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2482         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483                                        NIG_REG_MASK_INTERRUPT_PORT0;
2484         u32 aeu_mask;
2485
2486         if (bp->attn_state & asserted)
2487                 BNX2X_ERR("IGU ERROR\n");
2488
2489         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490         aeu_mask = REG_RD(bp, aeu_addr);
2491
2492         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2493            aeu_mask, asserted);
2494         aeu_mask &= ~(asserted & 0xff);
2495         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2496
2497         REG_WR(bp, aeu_addr, aeu_mask);
2498         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2499
2500         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2501         bp->attn_state |= asserted;
2502         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2503
2504         if (asserted & ATTN_HARD_WIRED_MASK) {
2505                 if (asserted & ATTN_NIG_FOR_FUNC) {
2506
2507                         bnx2x_acquire_phy_lock(bp);
2508
2509                         /* save nig interrupt mask */
2510                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511                         REG_WR(bp, nig_int_mask_addr, 0);
2512
2513                         bnx2x_link_attn(bp);
2514
2515                         /* handle unicore attn? */
2516                 }
2517                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2519
2520                 if (asserted & GPIO_2_FUNC)
2521                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2522
2523                 if (asserted & GPIO_3_FUNC)
2524                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2525
2526                 if (asserted & GPIO_4_FUNC)
2527                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2528
2529                 if (port == 0) {
2530                         if (asserted & ATTN_GENERAL_ATTN_1) {
2531                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2533                         }
2534                         if (asserted & ATTN_GENERAL_ATTN_2) {
2535                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2537                         }
2538                         if (asserted & ATTN_GENERAL_ATTN_3) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2541                         }
2542                 } else {
2543                         if (asserted & ATTN_GENERAL_ATTN_4) {
2544                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2546                         }
2547                         if (asserted & ATTN_GENERAL_ATTN_5) {
2548                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2550                         }
2551                         if (asserted & ATTN_GENERAL_ATTN_6) {
2552                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554                         }
2555                 }
2556
2557         } /* if hardwired */
2558
2559         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2560            asserted, hc_addr);
2561         REG_WR(bp, hc_addr, asserted);
2562
2563         /* now set back the mask */
2564         if (asserted & ATTN_NIG_FOR_FUNC) {
2565                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2566                 bnx2x_release_phy_lock(bp);
2567         }
2568 }
2569
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2571 {
2572         int port = BP_PORT(bp);
2573         int reg_offset;
2574         u32 val;
2575
2576         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2578
2579         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2580
2581                 val = REG_RD(bp, reg_offset);
2582                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583                 REG_WR(bp, reg_offset, val);
2584
2585                 BNX2X_ERR("SPIO5 hw attention\n");
2586
2587                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2588                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2589                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590                         /* Fan failure attention */
2591
2592                         /* The PHY reset is controlled by GPIO 1 */
2593                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2594                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595                         /* Low power mode is controlled by GPIO 2 */
2596                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2597                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2598                         /* mark the failure */
2599                         bp->link_params.ext_phy_config &=
2600                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2601                         bp->link_params.ext_phy_config |=
2602                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2603                         SHMEM_WR(bp,
2604                                  dev_info.port_hw_config[port].
2605                                                         external_phy_config,
2606                                  bp->link_params.ext_phy_config);
2607                         /* log the failure */
2608                         printk(KERN_ERR PFX "Fan Failure on Network"
2609                                " Controller %s has caused the driver to"
2610                                " shutdown the card to prevent permanent"
2611                                " damage.  Please contact Dell Support for"
2612                                " assistance\n", bp->dev->name);
2613                         break;
2614
2615                 default:
2616                         break;
2617                 }
2618         }
2619
2620         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2621
2622                 val = REG_RD(bp, reg_offset);
2623                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624                 REG_WR(bp, reg_offset, val);
2625
2626                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627                           (attn & HW_INTERRUT_ASSERT_SET_0));
2628                 bnx2x_panic();
2629         }
2630 }
2631
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633 {
2634         u32 val;
2635
2636         if (attn & BNX2X_DOORQ_ASSERT) {
2637
2638                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640                 /* DORQ discard attention */
2641                 if (val & 0x2)
2642                         BNX2X_ERR("FATAL error from DORQ\n");
2643         }
2644
2645         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2646
2647                 int port = BP_PORT(bp);
2648                 int reg_offset;
2649
2650                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2652
2653                 val = REG_RD(bp, reg_offset);
2654                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655                 REG_WR(bp, reg_offset, val);
2656
2657                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658                           (attn & HW_INTERRUT_ASSERT_SET_1));
2659                 bnx2x_panic();
2660         }
2661 }
2662
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664 {
2665         u32 val;
2666
2667         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2668
2669                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671                 /* CFC error attention */
2672                 if (val & 0x2)
2673                         BNX2X_ERR("FATAL error from CFC\n");
2674         }
2675
2676         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2677
2678                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680                 /* RQ_USDMDP_FIFO_OVERFLOW */
2681                 if (val & 0x18000)
2682                         BNX2X_ERR("FATAL error from PXP\n");
2683         }
2684
2685         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2686
2687                 int port = BP_PORT(bp);
2688                 int reg_offset;
2689
2690                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2692
2693                 val = REG_RD(bp, reg_offset);
2694                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695                 REG_WR(bp, reg_offset, val);
2696
2697                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698                           (attn & HW_INTERRUT_ASSERT_SET_2));
2699                 bnx2x_panic();
2700         }
2701 }
2702
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704 {
2705         u32 val;
2706
2707         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2708
2709                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710                         int func = BP_FUNC(bp);
2711
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713                         bnx2x__link_status_update(bp);
2714                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2715                                                         DRV_STATUS_PMF)
2716                                 bnx2x_pmf_update(bp);
2717
2718                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2719
2720                         BNX2X_ERR("MC assert!\n");
2721                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2725                         bnx2x_panic();
2726
2727                 } else if (attn & BNX2X_MCP_ASSERT) {
2728
2729                         BNX2X_ERR("MCP assert!\n");
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2731                         bnx2x_fw_dump(bp);
2732
2733                 } else
2734                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2735         }
2736
2737         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2738                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739                 if (attn & BNX2X_GRC_TIMEOUT) {
2740                         val = CHIP_IS_E1H(bp) ?
2741                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2743                 }
2744                 if (attn & BNX2X_GRC_RSV) {
2745                         val = CHIP_IS_E1H(bp) ?
2746                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2748                 }
2749                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2750         }
2751 }
2752
2753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2754 {
2755         struct attn_route attn;
2756         struct attn_route group_mask;
2757         int port = BP_PORT(bp);
2758         int index;
2759         u32 reg_addr;
2760         u32 val;
2761         u32 aeu_mask;
2762
2763         /* need to take HW lock because MCP or other port might also
2764            try to handle this event */
2765         bnx2x_acquire_alr(bp);
2766
2767         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2771         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2773
2774         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775                 if (deasserted & (1 << index)) {
2776                         group_mask = bp->attn_group[index];
2777
2778                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779                            index, group_mask.sig[0], group_mask.sig[1],
2780                            group_mask.sig[2], group_mask.sig[3]);
2781
2782                         bnx2x_attn_int_deasserted3(bp,
2783                                         attn.sig[3] & group_mask.sig[3]);
2784                         bnx2x_attn_int_deasserted1(bp,
2785                                         attn.sig[1] & group_mask.sig[1]);
2786                         bnx2x_attn_int_deasserted2(bp,
2787                                         attn.sig[2] & group_mask.sig[2]);
2788                         bnx2x_attn_int_deasserted0(bp,
2789                                         attn.sig[0] & group_mask.sig[0]);
2790
2791                         if ((attn.sig[0] & group_mask.sig[0] &
2792                                                 HW_PRTY_ASSERT_SET_0) ||
2793                             (attn.sig[1] & group_mask.sig[1] &
2794                                                 HW_PRTY_ASSERT_SET_1) ||
2795                             (attn.sig[2] & group_mask.sig[2] &
2796                                                 HW_PRTY_ASSERT_SET_2))
2797                                 BNX2X_ERR("FATAL HW block parity attention\n");
2798                 }
2799         }
2800
2801         bnx2x_release_alr(bp);
2802
2803         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2804
2805         val = ~deasserted;
2806         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2807            val, reg_addr);
2808         REG_WR(bp, reg_addr, val);
2809
2810         if (~bp->attn_state & deasserted)
2811                 BNX2X_ERR("IGU ERROR\n");
2812
2813         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2815
2816         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817         aeu_mask = REG_RD(bp, reg_addr);
2818
2819         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2820            aeu_mask, deasserted);
2821         aeu_mask |= (deasserted & 0xff);
2822         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2823
2824         REG_WR(bp, reg_addr, aeu_mask);
2825         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2826
2827         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828         bp->attn_state &= ~deasserted;
2829         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2830 }
2831
2832 static void bnx2x_attn_int(struct bnx2x *bp)
2833 {
2834         /* read local copy of bits */
2835         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836                                                                 attn_bits);
2837         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838                                                                 attn_bits_ack);
2839         u32 attn_state = bp->attn_state;
2840
2841         /* look for changed bits */
2842         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2843         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2844
2845         DP(NETIF_MSG_HW,
2846            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2847            attn_bits, attn_ack, asserted, deasserted);
2848
2849         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2850                 BNX2X_ERR("BAD attention state\n");
2851
2852         /* handle bits that were raised */
2853         if (asserted)
2854                 bnx2x_attn_int_asserted(bp, asserted);
2855
2856         if (deasserted)
2857                 bnx2x_attn_int_deasserted(bp, deasserted);
2858 }
2859
2860 static void bnx2x_sp_task(struct work_struct *work)
2861 {
2862         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2863         u16 status;
2864
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return;
2870         }
2871
2872         status = bnx2x_update_dsb_idx(bp);
2873 /*      if (status == 0)                                     */
2874 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2875
2876         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2877
2878         /* HW attentions */
2879         if (status & 0x1)
2880                 bnx2x_attn_int(bp);
2881
2882         /* CStorm events: query_stats, port delete ramrod */
2883         if (status & 0x2)
2884                 bp->stats_pending = 0;
2885
2886         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2887                      IGU_INT_NOP, 1);
2888         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889                      IGU_INT_NOP, 1);
2890         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891                      IGU_INT_NOP, 1);
2892         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893                      IGU_INT_NOP, 1);
2894         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2895                      IGU_INT_ENABLE, 1);
2896
2897 }
2898
2899 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900 {
2901         struct net_device *dev = dev_instance;
2902         struct bnx2x *bp = netdev_priv(dev);
2903
2904         /* Return here if interrupt is disabled */
2905         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2907                 return IRQ_HANDLED;
2908         }
2909
2910         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2911
2912 #ifdef BNX2X_STOP_ON_ERROR
2913         if (unlikely(bp->panic))
2914                 return IRQ_HANDLED;
2915 #endif
2916
2917         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2918
2919         return IRQ_HANDLED;
2920 }
2921
2922 /* end of slow path */
2923
2924 /* Statistics */
2925
2926 /****************************************************************************
2927 * Macros
2928 ****************************************************************************/
2929
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932         do { \
2933                 s_lo += a_lo; \
2934                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2935         } while (0)
2936
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939         do { \
2940                 if (m_lo < s_lo) { \
2941                         /* underflow */ \
2942                         d_hi = m_hi - s_hi; \
2943                         if (d_hi > 0) { \
2944                                 /* we can 'loan' 1 */ \
2945                                 d_hi--; \
2946                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2947                         } else { \
2948                                 /* m_hi <= s_hi */ \
2949                                 d_hi = 0; \
2950                                 d_lo = 0; \
2951                         } \
2952                 } else { \
2953                         /* m_lo >= s_lo */ \
2954                         if (m_hi < s_hi) { \
2955                                 d_hi = 0; \
2956                                 d_lo = 0; \
2957                         } else { \
2958                                 /* m_hi >= s_hi */ \
2959                                 d_hi = m_hi - s_hi; \
2960                                 d_lo = m_lo - s_lo; \
2961                         } \
2962                 } \
2963         } while (0)
2964
2965 #define UPDATE_STAT64(s, t) \
2966         do { \
2967                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972                        pstats->mac_stx[1].t##_lo, diff.lo); \
2973         } while (0)
2974
2975 #define UPDATE_STAT64_NIG(s, t) \
2976         do { \
2977                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978                         diff.lo, new->s##_lo, old->s##_lo); \
2979                 ADD_64(estats->t##_hi, diff.hi, \
2980                        estats->t##_lo, diff.lo); \
2981         } while (0)
2982
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2985         do { \
2986                 s_lo += a; \
2987                 s_hi += (s_lo < a) ? 1 : 0; \
2988         } while (0)
2989
2990 #define UPDATE_EXTEND_STAT(s) \
2991         do { \
2992                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993                               pstats->mac_stx[1].s##_lo, \
2994                               new->s); \
2995         } while (0)
2996
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2998         do { \
2999                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000                 old_tclient->s = le32_to_cpu(tclient->s); \
3001                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3002         } while (0)
3003
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3005         do { \
3006                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007                 old_xclient->s = le32_to_cpu(xclient->s); \
3008                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009         } while (0)
3010
3011 /*
3012  * General service functions
3013  */
3014
3015 static inline long bnx2x_hilo(u32 *hiref)
3016 {
3017         u32 lo = *(hiref + 1);
3018 #if (BITS_PER_LONG == 64)
3019         u32 hi = *hiref;
3020
3021         return HILO_U64(hi, lo);
3022 #else
3023         return lo;
3024 #endif
3025 }
3026
3027 /*
3028  * Init service functions
3029  */
3030
3031 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3032 {
3033         if (!bp->stats_pending) {
3034                 struct eth_query_ramrod_data ramrod_data = {0};
3035                 int rc;
3036
3037                 ramrod_data.drv_counter = bp->stats_counter++;
3038                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3040
3041                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042                                    ((u32 *)&ramrod_data)[1],
3043                                    ((u32 *)&ramrod_data)[0], 0);
3044                 if (rc == 0) {
3045                         /* stats ramrod has it's own slot on the spq */
3046                         bp->spq_left++;
3047                         bp->stats_pending = 1;
3048                 }
3049         }
3050 }
3051
3052 static void bnx2x_stats_init(struct bnx2x *bp)
3053 {
3054         int port = BP_PORT(bp);
3055
3056         bp->executer_idx = 0;
3057         bp->stats_counter = 0;
3058
3059         /* port stats */
3060         if (!BP_NOMCP(bp))
3061                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3062         else
3063                 bp->port.port_stx = 0;
3064         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3065
3066         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067         bp->port.old_nig_stats.brb_discard =
3068                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3069         bp->port.old_nig_stats.brb_truncate =
3070                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3071         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3075
3076         /* function stats */
3077         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3081
3082         bp->stats_state = STATS_STATE_DISABLED;
3083         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3085 }
3086
3087 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3088 {
3089         struct dmae_command *dmae = &bp->stats_dmae;
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091
3092         *stats_comp = DMAE_COMP_VAL;
3093
3094         /* loader */
3095         if (bp->executer_idx) {
3096                 int loader_idx = PMF_DMAE_C(bp);
3097
3098                 memset(dmae, 0, sizeof(struct dmae_command));
3099
3100                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102                                 DMAE_CMD_DST_RESET |
3103 #ifdef __BIG_ENDIAN
3104                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3105 #else
3106                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3107 #endif
3108                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3109                                                DMAE_CMD_PORT_0) |
3110                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114                                      sizeof(struct dmae_command) *
3115                                      (loader_idx + 1)) >> 2;
3116                 dmae->dst_addr_hi = 0;
3117                 dmae->len = sizeof(struct dmae_command) >> 2;
3118                 if (CHIP_IS_E1(bp))
3119                         dmae->len--;
3120                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121                 dmae->comp_addr_hi = 0;
3122                 dmae->comp_val = 1;
3123
3124                 *stats_comp = 0;
3125                 bnx2x_post_dmae(bp, dmae, loader_idx);
3126
3127         } else if (bp->func_stx) {
3128                 *stats_comp = 0;
3129                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3130         }
3131 }
3132
3133 static int bnx2x_stats_comp(struct bnx2x *bp)
3134 {
3135         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136         int cnt = 10;
3137
3138         might_sleep();
3139         while (*stats_comp != DMAE_COMP_VAL) {
3140                 if (!cnt) {
3141                         BNX2X_ERR("timeout waiting for stats finished\n");
3142                         break;
3143                 }
3144                 cnt--;
3145                 msleep(1);
3146         }
3147         return 1;
3148 }
3149
3150 /*
3151  * Statistics service functions
3152  */
3153
3154 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3155 {
3156         struct dmae_command *dmae;
3157         u32 opcode;
3158         int loader_idx = PMF_DMAE_C(bp);
3159         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3160
3161         /* sanity */
3162         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163                 BNX2X_ERR("BUG!\n");
3164                 return;
3165         }
3166
3167         bp->executer_idx = 0;
3168
3169         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3170                   DMAE_CMD_C_ENABLE |
3171                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3172 #ifdef __BIG_ENDIAN
3173                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3174 #else
3175                   DMAE_CMD_ENDIANITY_DW_SWAP |
3176 #endif
3177                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182         dmae->src_addr_lo = bp->port.port_stx >> 2;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186         dmae->len = DMAE_LEN32_RD_MAX;
3187         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188         dmae->comp_addr_hi = 0;
3189         dmae->comp_val = 1;
3190
3191         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194         dmae->src_addr_hi = 0;
3195         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196                                    DMAE_LEN32_RD_MAX * 4);
3197         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198                                    DMAE_LEN32_RD_MAX * 4);
3199         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202         dmae->comp_val = DMAE_COMP_VAL;
3203
3204         *stats_comp = 0;
3205         bnx2x_hw_stats_post(bp);
3206         bnx2x_stats_comp(bp);
3207 }
3208
3209 static void bnx2x_port_stats_init(struct bnx2x *bp)
3210 {
3211         struct dmae_command *dmae;
3212         int port = BP_PORT(bp);
3213         int vn = BP_E1HVN(bp);
3214         u32 opcode;
3215         int loader_idx = PMF_DMAE_C(bp);
3216         u32 mac_addr;
3217         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3218
3219         /* sanity */
3220         if (!bp->link_vars.link_up || !bp->port.pmf) {
3221                 BNX2X_ERR("BUG!\n");
3222                 return;
3223         }
3224
3225         bp->executer_idx = 0;
3226
3227         /* MCP */
3228         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3231 #ifdef __BIG_ENDIAN
3232                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3233 #else
3234                   DMAE_CMD_ENDIANITY_DW_SWAP |
3235 #endif
3236                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237                   (vn << DMAE_CMD_E1HVN_SHIFT));
3238
3239         if (bp->port.port_stx) {
3240
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3246                 dmae->dst_addr_hi = 0;
3247                 dmae->len = sizeof(struct host_port_stats) >> 2;
3248                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249                 dmae->comp_addr_hi = 0;
3250                 dmae->comp_val = 1;
3251         }
3252
3253         if (bp->func_stx) {
3254
3255                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256                 dmae->opcode = opcode;
3257                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259                 dmae->dst_addr_lo = bp->func_stx >> 2;
3260                 dmae->dst_addr_hi = 0;
3261                 dmae->len = sizeof(struct host_func_stats) >> 2;
3262                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263                 dmae->comp_addr_hi = 0;
3264                 dmae->comp_val = 1;
3265         }
3266
3267         /* MAC */
3268         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271 #ifdef __BIG_ENDIAN
3272                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273 #else
3274                   DMAE_CMD_ENDIANITY_DW_SWAP |
3275 #endif
3276                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277                   (vn << DMAE_CMD_E1HVN_SHIFT));
3278
3279         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3280
3281                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282                                    NIG_REG_INGRESS_BMAC0_MEM);
3283
3284                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3286                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287                 dmae->opcode = opcode;
3288                 dmae->src_addr_lo = (mac_addr +
3289                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290                 dmae->src_addr_hi = 0;
3291                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296                 dmae->comp_addr_hi = 0;
3297                 dmae->comp_val = 1;
3298
3299                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302                 dmae->opcode = opcode;
3303                 dmae->src_addr_lo = (mac_addr +
3304                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305                 dmae->src_addr_hi = 0;
3306                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3307                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3308                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3309                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3310                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313                 dmae->comp_addr_hi = 0;
3314                 dmae->comp_val = 1;
3315
3316         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3317
3318                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3319
3320                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = (mac_addr +
3324                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325                 dmae->src_addr_hi = 0;
3326                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330                 dmae->comp_addr_hi = 0;
3331                 dmae->comp_val = 1;
3332
3333                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335                 dmae->opcode = opcode;
3336                 dmae->src_addr_lo = (mac_addr +
3337                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338                 dmae->src_addr_hi = 0;
3339                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3341                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3343                 dmae->len = 1;
3344                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345                 dmae->comp_addr_hi = 0;
3346                 dmae->comp_val = 1;
3347
3348                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350                 dmae->opcode = opcode;
3351                 dmae->src_addr_lo = (mac_addr +
3352                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353                 dmae->src_addr_hi = 0;
3354                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3355                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3356                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3357                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3358                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360                 dmae->comp_addr_hi = 0;
3361                 dmae->comp_val = 1;
3362         }
3363
3364         /* NIG */
3365         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366         dmae->opcode = opcode;
3367         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369         dmae->src_addr_hi = 0;
3370         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374         dmae->comp_addr_hi = 0;
3375         dmae->comp_val = 1;
3376
3377         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378         dmae->opcode = opcode;
3379         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381         dmae->src_addr_hi = 0;
3382         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386         dmae->len = (2*sizeof(u32)) >> 2;
3387         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388         dmae->comp_addr_hi = 0;
3389         dmae->comp_val = 1;
3390
3391         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398                         DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401                         (vn << DMAE_CMD_E1HVN_SHIFT));
3402         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3404         dmae->src_addr_hi = 0;
3405         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409         dmae->len = (2*sizeof(u32)) >> 2;
3410         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412         dmae->comp_val = DMAE_COMP_VAL;
3413
3414         *stats_comp = 0;
3415 }
3416
3417 static void bnx2x_func_stats_init(struct bnx2x *bp)
3418 {
3419         struct dmae_command *dmae = &bp->stats_dmae;
3420         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422         /* sanity */
3423         if (!bp->func_stx) {
3424                 BNX2X_ERR("BUG!\n");
3425                 return;
3426         }
3427
3428         bp->executer_idx = 0;
3429         memset(dmae, 0, sizeof(struct dmae_command));
3430
3431         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434 #ifdef __BIG_ENDIAN
3435                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436 #else
3437                         DMAE_CMD_ENDIANITY_DW_SWAP |
3438 #endif
3439                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443         dmae->dst_addr_lo = bp->func_stx >> 2;
3444         dmae->dst_addr_hi = 0;
3445         dmae->len = sizeof(struct host_func_stats) >> 2;
3446         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448         dmae->comp_val = DMAE_COMP_VAL;
3449
3450         *stats_comp = 0;
3451 }
3452
3453 static void bnx2x_stats_start(struct bnx2x *bp)
3454 {
3455         if (bp->port.pmf)
3456                 bnx2x_port_stats_init(bp);
3457
3458         else if (bp->func_stx)
3459                 bnx2x_func_stats_init(bp);
3460
3461         bnx2x_hw_stats_post(bp);
3462         bnx2x_storm_stats_post(bp);
3463 }
3464
3465 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3466 {
3467         bnx2x_stats_comp(bp);
3468         bnx2x_stats_pmf_update(bp);
3469         bnx2x_stats_start(bp);
3470 }
3471
3472 static void bnx2x_stats_restart(struct bnx2x *bp)
3473 {
3474         bnx2x_stats_comp(bp);
3475         bnx2x_stats_start(bp);
3476 }
3477
3478 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3479 {
3480         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482         struct regpair diff;
3483
3484         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3490         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3491         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496         UPDATE_STAT64(tx_stat_gt127,
3497                                 tx_stat_etherstatspkts65octetsto127octets);
3498         UPDATE_STAT64(tx_stat_gt255,
3499                                 tx_stat_etherstatspkts128octetsto255octets);
3500         UPDATE_STAT64(tx_stat_gt511,
3501                                 tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_STAT64(tx_stat_gt1023,
3503                                 tx_stat_etherstatspkts512octetsto1023octets);
3504         UPDATE_STAT64(tx_stat_gt1518,
3505                                 tx_stat_etherstatspkts1024octetsto1522octets);
3506         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510         UPDATE_STAT64(tx_stat_gterr,
3511                                 tx_stat_dot3statsinternalmactransmiterrors);
3512         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3513 }
3514
3515 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3516 {
3517         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519
3520         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3551 }
3552
3553 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3554 {
3555         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556         struct nig_stats *old = &(bp->port.old_nig_stats);
3557         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559         struct regpair diff;
3560
3561         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562                 bnx2x_bmac_stats_update(bp);
3563
3564         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565                 bnx2x_emac_stats_update(bp);
3566
3567         else { /* unreached */
3568                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3569                 return -1;
3570         }
3571
3572         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573                       new->brb_discard - old->brb_discard);
3574         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575                       new->brb_truncate - old->brb_truncate);
3576
3577         UPDATE_STAT64_NIG(egress_mac_pkt0,
3578                                         etherstatspkts1024octetsto1522octets);
3579         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3580
3581         memcpy(old, new, sizeof(struct nig_stats));
3582
3583         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584                sizeof(struct mac_stx));
3585         estats->brb_drop_hi = pstats->brb_drop_hi;
3586         estats->brb_drop_lo = pstats->brb_drop_lo;
3587
3588         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3589
3590         return 0;
3591 }
3592
3593 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3594 {
3595         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3596         int cl_id = BP_CL_ID(bp);
3597         struct tstorm_per_port_stats *tport =
3598                                 &stats->tstorm_common.port_statistics;
3599         struct tstorm_per_client_stats *tclient =
3600                         &stats->tstorm_common.client_statistics[cl_id];
3601         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3602         struct xstorm_per_client_stats *xclient =
3603                         &stats->xstorm_common.client_statistics[cl_id];
3604         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607         u32 diff;
3608
3609         /* are storm stats valid? */
3610         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611                                                         bp->stats_counter) {
3612                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613                    "  tstorm counter (%d) != stats_counter (%d)\n",
3614                    tclient->stats_counter, bp->stats_counter);
3615                 return -1;
3616         }
3617         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618                                                         bp->stats_counter) {
3619                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620                    "  xstorm counter (%d) != stats_counter (%d)\n",
3621                    xclient->stats_counter, bp->stats_counter);
3622                 return -2;
3623         }
3624
3625         fstats->total_bytes_received_hi =
3626         fstats->valid_bytes_received_hi =
3627                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3628         fstats->total_bytes_received_lo =
3629         fstats->valid_bytes_received_lo =
3630                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3631
3632         estats->error_bytes_received_hi =
3633                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634         estats->error_bytes_received_lo =
3635                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636         ADD_64(estats->error_bytes_received_hi,
3637                estats->rx_stat_ifhcinbadoctets_hi,
3638                estats->error_bytes_received_lo,
3639                estats->rx_stat_ifhcinbadoctets_lo);
3640
3641         ADD_64(fstats->total_bytes_received_hi,
3642                estats->error_bytes_received_hi,
3643                fstats->total_bytes_received_lo,
3644                estats->error_bytes_received_lo);
3645
3646         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3647         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3648                                 total_multicast_packets_received);
3649         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3650                                 total_broadcast_packets_received);
3651
3652         fstats->total_bytes_transmitted_hi =
3653                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3654         fstats->total_bytes_transmitted_lo =
3655                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3656
3657         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658                                 total_unicast_packets_transmitted);
3659         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660                                 total_multicast_packets_transmitted);
3661         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662                                 total_broadcast_packets_transmitted);
3663
3664         memcpy(estats, &(fstats->total_bytes_received_hi),
3665                sizeof(struct host_func_stats) - 2*sizeof(u32));
3666
3667         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669         estats->brb_truncate_discard =
3670                                 le32_to_cpu(tport->brb_truncate_discard);
3671         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3672
3673         old_tclient->rcv_unicast_bytes.hi =
3674                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3675         old_tclient->rcv_unicast_bytes.lo =
3676                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3677         old_tclient->rcv_broadcast_bytes.hi =
3678                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3679         old_tclient->rcv_broadcast_bytes.lo =
3680                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3681         old_tclient->rcv_multicast_bytes.hi =
3682                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3683         old_tclient->rcv_multicast_bytes.lo =
3684                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3685         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3686
3687         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688         old_tclient->packets_too_big_discard =
3689                                 le32_to_cpu(tclient->packets_too_big_discard);
3690         estats->no_buff_discard =
3691         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3693
3694         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695         old_xclient->unicast_bytes_sent.hi =
3696                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697         old_xclient->unicast_bytes_sent.lo =
3698                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699         old_xclient->multicast_bytes_sent.hi =
3700                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701         old_xclient->multicast_bytes_sent.lo =
3702                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703         old_xclient->broadcast_bytes_sent.hi =
3704                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705         old_xclient->broadcast_bytes_sent.lo =
3706                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3707
3708         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3709
3710         return 0;
3711 }
3712
3713 static void bnx2x_net_stats_update(struct bnx2x *bp)
3714 {
3715         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717         struct net_device_stats *nstats = &bp->dev->stats;
3718
3719         nstats->rx_packets =
3720                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3723
3724         nstats->tx_packets =
3725                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3728
3729         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3730
3731         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3732
3733         nstats->rx_dropped = old_tclient->checksum_discard +
3734                              estats->mac_discard;
3735         nstats->tx_dropped = 0;
3736
3737         nstats->multicast =
3738                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3739
3740         nstats->collisions =
3741                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3742                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743                         estats->tx_stat_dot3statslatecollisions_lo +
3744                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3745
3746         estats->jabber_packets_received =
3747                                 old_tclient->packets_too_big_discard +
3748                                 estats->rx_stat_dot3statsframestoolong_lo;
3749
3750         nstats->rx_length_errors =
3751                                 estats->rx_stat_etherstatsundersizepkts_lo +
3752                                 estats->jabber_packets_received;
3753         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3754         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3757         nstats->rx_missed_errors = estats->xxoverflow_discard;
3758
3759         nstats->rx_errors = nstats->rx_length_errors +
3760                             nstats->rx_over_errors +
3761                             nstats->rx_crc_errors +
3762                             nstats->rx_frame_errors +
3763                             nstats->rx_fifo_errors +
3764                             nstats->rx_missed_errors;
3765
3766         nstats->tx_aborted_errors =
3767                         estats->tx_stat_dot3statslatecollisions_lo +
3768                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3769         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3770         nstats->tx_fifo_errors = 0;
3771         nstats->tx_heartbeat_errors = 0;
3772         nstats->tx_window_errors = 0;
3773
3774         nstats->tx_errors = nstats->tx_aborted_errors +
3775                             nstats->tx_carrier_errors;
3776 }
3777
3778 static void bnx2x_stats_update(struct bnx2x *bp)
3779 {
3780         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781         int update = 0;
3782
3783         if (*stats_comp != DMAE_COMP_VAL)
3784                 return;
3785
3786         if (bp->port.pmf)
3787                 update = (bnx2x_hw_stats_update(bp) == 0);
3788
3789         update |= (bnx2x_storm_stats_update(bp) == 0);
3790
3791         if (update)
3792                 bnx2x_net_stats_update(bp);
3793
3794         else {
3795                 if (bp->stats_pending) {
3796                         bp->stats_pending++;
3797                         if (bp->stats_pending == 3) {
3798                                 BNX2X_ERR("stats not updated for 3 times\n");
3799                                 bnx2x_panic();
3800                                 return;
3801                         }
3802                 }
3803         }
3804
3805         if (bp->msglevel & NETIF_MSG_TIMER) {
3806                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808                 struct net_device_stats *nstats = &bp->dev->stats;
3809                 int i;
3810
3811                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3813                                   "  tx pkt (%lx)\n",
3814                        bnx2x_tx_avail(bp->fp),
3815                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3816                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3817                                   "  rx pkt (%lx)\n",
3818                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819                              bp->fp->rx_comp_cons),
3820                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3821                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3822                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3823                        estats->driver_xoff, estats->brb_drop_lo);
3824                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3825                         "packets_too_big_discard %u  no_buff_discard %u  "
3826                         "mac_discard %u  mac_filter_discard %u  "
3827                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3828                         "ttl0_discard %u\n",
3829                        old_tclient->checksum_discard,
3830                        old_tclient->packets_too_big_discard,
3831                        old_tclient->no_buff_discard, estats->mac_discard,
3832                        estats->mac_filter_discard, estats->xxoverflow_discard,
3833                        estats->brb_truncate_discard,
3834                        old_tclient->ttl0_discard);
3835
3836                 for_each_queue(bp, i) {
3837                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838                                bnx2x_fp(bp, i, tx_pkt),
3839                                bnx2x_fp(bp, i, rx_pkt),
3840                                bnx2x_fp(bp, i, rx_calls));
3841                 }
3842         }
3843
3844         bnx2x_hw_stats_post(bp);
3845         bnx2x_storm_stats_post(bp);
3846 }
3847
3848 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3849 {
3850         struct dmae_command *dmae;
3851         u32 opcode;
3852         int loader_idx = PMF_DMAE_C(bp);
3853         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855         bp->executer_idx = 0;
3856
3857         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3858                   DMAE_CMD_C_ENABLE |
3859                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3860 #ifdef __BIG_ENDIAN
3861                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3862 #else
3863                   DMAE_CMD_ENDIANITY_DW_SWAP |
3864 #endif
3865                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3867
3868         if (bp->port.port_stx) {
3869
3870                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871                 if (bp->func_stx)
3872                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3873                 else
3874                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3878                 dmae->dst_addr_hi = 0;
3879                 dmae->len = sizeof(struct host_port_stats) >> 2;
3880                 if (bp->func_stx) {
3881                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882                         dmae->comp_addr_hi = 0;
3883                         dmae->comp_val = 1;
3884                 } else {
3885                         dmae->comp_addr_lo =
3886                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887                         dmae->comp_addr_hi =
3888                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889                         dmae->comp_val = DMAE_COMP_VAL;
3890
3891                         *stats_comp = 0;
3892                 }
3893         }
3894
3895         if (bp->func_stx) {
3896
3897                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901                 dmae->dst_addr_lo = bp->func_stx >> 2;
3902                 dmae->dst_addr_hi = 0;
3903                 dmae->len = sizeof(struct host_func_stats) >> 2;
3904                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906                 dmae->comp_val = DMAE_COMP_VAL;
3907
3908                 *stats_comp = 0;
3909         }
3910 }
3911
3912 static void bnx2x_stats_stop(struct bnx2x *bp)
3913 {
3914         int update = 0;
3915
3916         bnx2x_stats_comp(bp);
3917
3918         if (bp->port.pmf)
3919                 update = (bnx2x_hw_stats_update(bp) == 0);
3920
3921         update |= (bnx2x_storm_stats_update(bp) == 0);
3922
3923         if (update) {
3924                 bnx2x_net_stats_update(bp);
3925
3926                 if (bp->port.pmf)
3927                         bnx2x_port_stats_stop(bp);
3928
3929                 bnx2x_hw_stats_post(bp);
3930                 bnx2x_stats_comp(bp);
3931         }
3932 }
3933
3934 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3935 {
3936 }
3937
3938 static const struct {
3939         void (*action)(struct bnx2x *bp);
3940         enum bnx2x_stats_state next_state;
3941 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3942 /* state        event   */
3943 {
3944 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3946 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3948 },
3949 {
3950 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3951 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3952 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3953 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3954 }
3955 };
3956
3957 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3958 {
3959         enum bnx2x_stats_state state = bp->stats_state;
3960
3961         bnx2x_stats_stm[state][event].action(bp);
3962         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3963
3964         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966                    state, event, bp->stats_state);
3967 }
3968
3969 static void bnx2x_timer(unsigned long data)
3970 {
3971         struct bnx2x *bp = (struct bnx2x *) data;
3972
3973         if (!netif_running(bp->dev))
3974                 return;
3975
3976         if (atomic_read(&bp->intr_sem) != 0)
3977                 goto timer_restart;
3978
3979         if (poll) {
3980                 struct bnx2x_fastpath *fp = &bp->fp[0];
3981                 int rc;
3982
3983                 bnx2x_tx_int(fp, 1000);
3984                 rc = bnx2x_rx_int(fp, 1000);
3985         }
3986
3987         if (!BP_NOMCP(bp)) {
3988                 int func = BP_FUNC(bp);
3989                 u32 drv_pulse;
3990                 u32 mcp_pulse;
3991
3992                 ++bp->fw_drv_pulse_wr_seq;
3993                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994                 /* TBD - add SYSTEM_TIME */
3995                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3996                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3997
3998                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3999                              MCP_PULSE_SEQ_MASK);
4000                 /* The delta between driver pulse and mcp response
4001                  * should be 1 (before mcp response) or 0 (after mcp response)
4002                  */
4003                 if ((drv_pulse != mcp_pulse) &&
4004                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005                         /* someone lost a heartbeat... */
4006                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007                                   drv_pulse, mcp_pulse);
4008                 }
4009         }
4010
4011         if ((bp->state == BNX2X_STATE_OPEN) ||
4012             (bp->state == BNX2X_STATE_DISABLED))
4013                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4014
4015 timer_restart:
4016         mod_timer(&bp->timer, jiffies + bp->current_interval);
4017 }
4018
4019 /* end of Statistics */
4020
4021 /* nic init */
4022
4023 /*
4024  * nic init service functions
4025  */
4026
4027 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4028 {
4029         int port = BP_PORT(bp);
4030
4031         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4033                         sizeof(struct ustorm_status_block)/4);
4034         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4036                         sizeof(struct cstorm_status_block)/4);
4037 }
4038
4039 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040                           dma_addr_t mapping, int sb_id)
4041 {
4042         int port = BP_PORT(bp);
4043         int func = BP_FUNC(bp);
4044         int index;
4045         u64 section;
4046
4047         /* USTORM */
4048         section = ((u64)mapping) + offsetof(struct host_status_block,
4049                                             u_status_block);
4050         sb->u_status_block.status_block_id = sb_id;
4051
4052         REG_WR(bp, BAR_USTRORM_INTMEM +
4053                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4054         REG_WR(bp, BAR_USTRORM_INTMEM +
4055                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4056                U64_HI(section));
4057         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4059
4060         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4062                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4063
4064         /* CSTORM */
4065         section = ((u64)mapping) + offsetof(struct host_status_block,
4066                                             c_status_block);
4067         sb->c_status_block.status_block_id = sb_id;
4068
4069         REG_WR(bp, BAR_CSTRORM_INTMEM +
4070                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4071         REG_WR(bp, BAR_CSTRORM_INTMEM +
4072                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4073                U64_HI(section));
4074         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4076
4077         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4079                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4080
4081         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4082 }
4083
4084 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4085 {
4086         int func = BP_FUNC(bp);
4087
4088         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090                         sizeof(struct ustorm_def_status_block)/4);
4091         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093                         sizeof(struct cstorm_def_status_block)/4);
4094         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096                         sizeof(struct xstorm_def_status_block)/4);
4097         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099                         sizeof(struct tstorm_def_status_block)/4);
4100 }
4101
4102 static void bnx2x_init_def_sb(struct bnx2x *bp,
4103                               struct host_def_status_block *def_sb,
4104                               dma_addr_t mapping, int sb_id)
4105 {
4106         int port = BP_PORT(bp);
4107         int func = BP_FUNC(bp);
4108         int index, val, reg_offset;
4109         u64 section;
4110
4111         /* ATTN */
4112         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113                                             atten_status_block);
4114         def_sb->atten_status_block.status_block_id = sb_id;
4115
4116         bp->attn_state = 0;
4117
4118         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4120
4121         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4122                 bp->attn_group[index].sig[0] = REG_RD(bp,
4123                                                      reg_offset + 0x10*index);
4124                 bp->attn_group[index].sig[1] = REG_RD(bp,
4125                                                reg_offset + 0x4 + 0x10*index);
4126                 bp->attn_group[index].sig[2] = REG_RD(bp,
4127                                                reg_offset + 0x8 + 0x10*index);
4128                 bp->attn_group[index].sig[3] = REG_RD(bp,
4129                                                reg_offset + 0xc + 0x10*index);
4130         }
4131
4132         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133                              HC_REG_ATTN_MSG0_ADDR_L);
4134
4135         REG_WR(bp, reg_offset, U64_LO(section));
4136         REG_WR(bp, reg_offset + 4, U64_HI(section));
4137
4138         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4139
4140         val = REG_RD(bp, reg_offset);
4141         val |= sb_id;
4142         REG_WR(bp, reg_offset, val);
4143
4144         /* USTORM */
4145         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146                                             u_def_status_block);
4147         def_sb->u_def_status_block.status_block_id = sb_id;
4148
4149         REG_WR(bp, BAR_USTRORM_INTMEM +
4150                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4151         REG_WR(bp, BAR_USTRORM_INTMEM +
4152                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4153                U64_HI(section));
4154         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4155                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4156
4157         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4159                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4160
4161         /* CSTORM */
4162         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163                                             c_def_status_block);
4164         def_sb->c_def_status_block.status_block_id = sb_id;
4165
4166         REG_WR(bp, BAR_CSTRORM_INTMEM +
4167                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4168         REG_WR(bp, BAR_CSTRORM_INTMEM +
4169                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4170                U64_HI(section));
4171         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4172                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4173
4174         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4176                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4177
4178         /* TSTORM */
4179         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180                                             t_def_status_block);
4181         def_sb->t_def_status_block.status_block_id = sb_id;
4182
4183         REG_WR(bp, BAR_TSTRORM_INTMEM +
4184                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4185         REG_WR(bp, BAR_TSTRORM_INTMEM +
4186                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4187                U64_HI(section));
4188         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4189                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4190
4191         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4193                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4194
4195         /* XSTORM */
4196         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197                                             x_def_status_block);
4198         def_sb->x_def_status_block.status_block_id = sb_id;
4199
4200         REG_WR(bp, BAR_XSTRORM_INTMEM +
4201                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4202         REG_WR(bp, BAR_XSTRORM_INTMEM +
4203                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4204                U64_HI(section));
4205         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4206                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4207
4208         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4210                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4211
4212         bp->stats_pending = 0;
4213         bp->set_mac_pending = 0;
4214
4215         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4216 }
4217
4218 static void bnx2x_update_coalesce(struct bnx2x *bp)
4219 {
4220         int port = BP_PORT(bp);
4221         int i;
4222
4223         for_each_queue(bp, i) {
4224                 int sb_id = bp->fp[i].sb_id;
4225
4226                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4228                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229                                                     U_SB_ETH_RX_CQ_INDEX),
4230                         bp->rx_ticks/12);
4231                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4232                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233                                                      U_SB_ETH_RX_CQ_INDEX),
4234                          bp->rx_ticks ? 0 : 1);
4235                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237                                                      U_SB_ETH_RX_BD_INDEX),
4238                          bp->rx_ticks ? 0 : 1);
4239
4240                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4242                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4243                                                     C_SB_ETH_TX_CQ_INDEX),
4244                         bp->tx_ticks/12);
4245                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4246                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4247                                                      C_SB_ETH_TX_CQ_INDEX),
4248                          bp->tx_ticks ? 0 : 1);
4249         }
4250 }
4251
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253                                        struct bnx2x_fastpath *fp, int last)
4254 {
4255         int i;
4256
4257         for (i = 0; i < last; i++) {
4258                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259                 struct sk_buff *skb = rx_buf->skb;
4260
4261                 if (skb == NULL) {
4262                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4263                         continue;
4264                 }
4265
4266                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267                         pci_unmap_single(bp->pdev,
4268                                          pci_unmap_addr(rx_buf, mapping),
4269                                          bp->rx_buf_size,
4270                                          PCI_DMA_FROMDEVICE);
4271
4272                 dev_kfree_skb(skb);
4273                 rx_buf->skb = NULL;
4274         }
4275 }
4276
4277 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4278 {
4279         int func = BP_FUNC(bp);
4280         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4282         u16 ring_prod, cqe_ring_prod;
4283         int i, j;
4284
4285         bp->rx_buf_size = bp->dev->mtu;
4286         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287                 BCM_RX_ETH_PAYLOAD_ALIGN;
4288
4289         if (bp->flags & TPA_ENABLE_FLAG) {
4290                 DP(NETIF_MSG_IFUP,
4291                    "rx_buf_size %d  effective_mtu %d\n",
4292                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4293
4294                 for_each_queue(bp, j) {
4295                         struct bnx2x_fastpath *fp = &bp->fp[j];
4296
4297                         for (i = 0; i < max_agg_queues; i++) {
4298                                 fp->tpa_pool[i].skb =
4299                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300                                 if (!fp->tpa_pool[i].skb) {
4301                                         BNX2X_ERR("Failed to allocate TPA "
4302                                                   "skb pool for queue[%d] - "
4303                                                   "disabling TPA on this "
4304                                                   "queue!\n", j);
4305                                         bnx2x_free_tpa_pool(bp, fp, i);
4306                                         fp->disable_tpa = 1;
4307                                         break;
4308                                 }
4309                                 pci_unmap_addr_set((struct sw_rx_bd *)
4310                                                         &bp->fp->tpa_pool[i],
4311                                                    mapping, 0);
4312                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4313                         }
4314                 }
4315         }
4316
4317         for_each_queue(bp, j) {
4318                 struct bnx2x_fastpath *fp = &bp->fp[j];
4319
4320                 fp->rx_bd_cons = 0;
4321                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4322                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4323
4324                 /* "next page" elements initialization */
4325                 /* SGE ring */
4326                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327                         struct eth_rx_sge *sge;
4328
4329                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4330                         sge->addr_hi =
4331                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4333                         sge->addr_lo =
4334                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336                 }
4337
4338                 bnx2x_init_sge_ring_bit_mask(fp);
4339
4340                 /* RX BD ring */
4341                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342                         struct eth_rx_bd *rx_bd;
4343
4344                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4345                         rx_bd->addr_hi =
4346                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4347                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4348                         rx_bd->addr_lo =
4349                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4350                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4351                 }
4352
4353                 /* CQ ring */
4354                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355                         struct eth_rx_cqe_next_page *nextpg;
4356
4357                         nextpg = (struct eth_rx_cqe_next_page *)
4358                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359                         nextpg->addr_hi =
4360                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4361                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4362                         nextpg->addr_lo =
4363                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4364                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4365                 }
4366
4367                 /* Allocate SGEs and initialize the ring elements */
4368                 for (i = 0, ring_prod = 0;
4369                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4370
4371                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372                                 BNX2X_ERR("was only able to allocate "
4373                                           "%d rx sges\n", i);
4374                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375                                 /* Cleanup already allocated elements */
4376                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4377                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4378                                 fp->disable_tpa = 1;
4379                                 ring_prod = 0;
4380                                 break;
4381                         }
4382                         ring_prod = NEXT_SGE_IDX(ring_prod);
4383                 }
4384                 fp->rx_sge_prod = ring_prod;
4385
4386                 /* Allocate BDs and initialize BD ring */
4387                 fp->rx_comp_cons = 0;
4388                 cqe_ring_prod = ring_prod = 0;
4389                 for (i = 0; i < bp->rx_ring_size; i++) {
4390                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391                                 BNX2X_ERR("was only able to allocate "
4392                                           "%d rx skbs\n", i);
4393                                 bp->eth_stats.rx_skb_alloc_failed++;
4394                                 break;
4395                         }
4396                         ring_prod = NEXT_RX_IDX(ring_prod);
4397                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4398                         WARN_ON(ring_prod <= i);
4399                 }
4400
4401                 fp->rx_bd_prod = ring_prod;
4402                 /* must not have more available CQEs than BDs */
4403                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4404                                        cqe_ring_prod);
4405                 fp->rx_pkt = fp->rx_calls = 0;
4406
4407                 /* Warning!
4408                  * this will generate an interrupt (to the TSTORM)
4409                  * must only be done after chip is initialized
4410                  */
4411                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4412                                      fp->rx_sge_prod);
4413                 if (j != 0)
4414                         continue;
4415
4416                 REG_WR(bp, BAR_USTRORM_INTMEM +
4417                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4418                        U64_LO(fp->rx_comp_mapping));
4419                 REG_WR(bp, BAR_USTRORM_INTMEM +
4420                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4421                        U64_HI(fp->rx_comp_mapping));
4422         }
4423 }
4424
4425 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4426 {
4427         int i, j;
4428
4429         for_each_queue(bp, j) {
4430                 struct bnx2x_fastpath *fp = &bp->fp[j];
4431
4432                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433                         struct eth_tx_bd *tx_bd =
4434                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4435
4436                         tx_bd->addr_hi =
4437                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4438                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4439                         tx_bd->addr_lo =
4440                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4441                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4442                 }
4443
4444                 fp->tx_pkt_prod = 0;
4445                 fp->tx_pkt_cons = 0;
4446                 fp->tx_bd_prod = 0;
4447                 fp->tx_bd_cons = 0;
4448                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4449                 fp->tx_pkt = 0;
4450         }
4451 }
4452
4453 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4454 {
4455         int func = BP_FUNC(bp);
4456
4457         spin_lock_init(&bp->spq_lock);
4458
4459         bp->spq_left = MAX_SPQ_PENDING;
4460         bp->spq_prod_idx = 0;
4461         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462         bp->spq_prod_bd = bp->spq;
4463         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4464
4465         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4466                U64_LO(bp->spq_mapping));
4467         REG_WR(bp,
4468                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4469                U64_HI(bp->spq_mapping));
4470
4471         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4472                bp->spq_prod_idx);
4473 }
4474
4475 static void bnx2x_init_context(struct bnx2x *bp)
4476 {
4477         int i;
4478
4479         for_each_queue(bp, i) {
4480                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481                 struct bnx2x_fastpath *fp = &bp->fp[i];
4482                 u8 sb_id = FP_SB_ID(fp);
4483
4484                 context->xstorm_st_context.tx_bd_page_base_hi =
4485                                                 U64_HI(fp->tx_desc_mapping);
4486                 context->xstorm_st_context.tx_bd_page_base_lo =
4487                                                 U64_LO(fp->tx_desc_mapping);
4488                 context->xstorm_st_context.db_data_addr_hi =
4489                                                 U64_HI(fp->tx_prods_mapping);
4490                 context->xstorm_st_context.db_data_addr_lo =
4491                                                 U64_LO(fp->tx_prods_mapping);
4492                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4494
4495                 context->ustorm_st_context.common.sb_index_numbers =
4496                                                 BNX2X_RX_SB_INDEX_NUM;
4497                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498                 context->ustorm_st_context.common.status_block_id = sb_id;
4499                 context->ustorm_st_context.common.flags =
4500                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501                 context->ustorm_st_context.common.mc_alignment_size =
4502                         BCM_RX_ETH_PAYLOAD_ALIGN;
4503                 context->ustorm_st_context.common.bd_buff_size =
4504                                                 bp->rx_buf_size;
4505                 context->ustorm_st_context.common.bd_page_base_hi =
4506                                                 U64_HI(fp->rx_desc_mapping);
4507                 context->ustorm_st_context.common.bd_page_base_lo =
4508                                                 U64_LO(fp->rx_desc_mapping);
4509                 if (!fp->disable_tpa) {
4510                         context->ustorm_st_context.common.flags |=
4511                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513                         context->ustorm_st_context.common.sge_buff_size =
4514                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515                         context->ustorm_st_context.common.sge_page_base_hi =
4516                                                 U64_HI(fp->rx_sge_mapping);
4517                         context->ustorm_st_context.common.sge_page_base_lo =
4518                                                 U64_LO(fp->rx_sge_mapping);
4519                 }
4520
4521                 context->cstorm_st_context.sb_index_number =
4522                                                 C_SB_ETH_TX_CQ_INDEX;
4523                 context->cstorm_st_context.status_block_id = sb_id;
4524
4525                 context->xstorm_ag_context.cdu_reserved =
4526                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527                                                CDU_REGION_NUMBER_XCM_AG,
4528                                                ETH_CONNECTION_TYPE);
4529                 context->ustorm_ag_context.cdu_usage =
4530                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531                                                CDU_REGION_NUMBER_UCM_AG,
4532                                                ETH_CONNECTION_TYPE);
4533         }
4534 }
4535
4536 static void bnx2x_init_ind_table(struct bnx2x *bp)
4537 {
4538         int func = BP_FUNC(bp);
4539         int i;
4540
4541         if (!is_multi(bp))
4542                 return;
4543
4544         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4545         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4546                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4547                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548                         BP_CL_ID(bp) + (i % bp->num_queues));
4549 }
4550
4551 static void bnx2x_set_client_config(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_client_config tstorm_client = {0};
4554         int port = BP_PORT(bp);
4555         int i;
4556
4557         tstorm_client.mtu = bp->dev->mtu;
4558         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4559         tstorm_client.config_flags =
4560                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4561 #ifdef BCM_VLAN
4562         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4563                 tstorm_client.config_flags |=
4564                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4566         }
4567 #endif
4568
4569         if (bp->flags & TPA_ENABLE_FLAG) {
4570                 tstorm_client.max_sges_for_packet =
4571                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4572                 tstorm_client.max_sges_for_packet =
4573                         ((tstorm_client.max_sges_for_packet +
4574                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575                         PAGES_PER_SGE_SHIFT;
4576
4577                 tstorm_client.config_flags |=
4578                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4579         }
4580
4581         for_each_queue(bp, i) {
4582                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4583                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4584                        ((u32 *)&tstorm_client)[0]);
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4587                        ((u32 *)&tstorm_client)[1]);
4588         }
4589
4590         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4592 }
4593
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4595 {
4596         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4597         int mode = bp->rx_mode;
4598         int mask = (1 << BP_L_ID(bp));
4599         int func = BP_FUNC(bp);
4600         int i;
4601
4602         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4603
4604         switch (mode) {
4605         case BNX2X_RX_MODE_NONE: /* no Rx */
4606                 tstorm_mac_filter.ucast_drop_all = mask;
4607                 tstorm_mac_filter.mcast_drop_all = mask;
4608                 tstorm_mac_filter.bcast_drop_all = mask;
4609                 break;
4610         case BNX2X_RX_MODE_NORMAL:
4611                 tstorm_mac_filter.bcast_accept_all = mask;
4612                 break;
4613         case BNX2X_RX_MODE_ALLMULTI:
4614                 tstorm_mac_filter.mcast_accept_all = mask;
4615                 tstorm_mac_filter.bcast_accept_all = mask;
4616                 break;
4617         case BNX2X_RX_MODE_PROMISC:
4618                 tstorm_mac_filter.ucast_accept_all = mask;
4619                 tstorm_mac_filter.mcast_accept_all = mask;
4620                 tstorm_mac_filter.bcast_accept_all = mask;
4621                 break;
4622         default:
4623                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4624                 break;
4625         }
4626
4627         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4629                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4630                        ((u32 *)&tstorm_mac_filter)[i]);
4631
4632 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633                    ((u32 *)&tstorm_mac_filter)[i]); */
4634         }
4635
4636         if (mode != BNX2X_RX_MODE_NONE)
4637                 bnx2x_set_client_config(bp);
4638 }
4639
4640 static void bnx2x_init_internal_common(struct bnx2x *bp)
4641 {
4642         int i;
4643
4644         if (bp->flags & TPA_ENABLE_FLAG) {
4645                 struct tstorm_eth_tpa_exist tpa = {0};
4646
4647                 tpa.tpa_exist = 1;
4648
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4650                        ((u32 *)&tpa)[0]);
4651                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4652                        ((u32 *)&tpa)[1]);
4653         }
4654
4655         /* Zero this manually as its initialization is
4656            currently missing in the initTool */
4657         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658                 REG_WR(bp, BAR_USTRORM_INTMEM +
4659                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4660 }
4661
4662 static void bnx2x_init_internal_port(struct bnx2x *bp)
4663 {
4664         int port = BP_PORT(bp);
4665
4666         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4670 }
4671
4672 static void bnx2x_init_internal_func(struct bnx2x *bp)
4673 {
4674         struct tstorm_eth_function_common_config tstorm_config = {0};
4675         struct stats_indication_flags stats_flags = {0};
4676         int port = BP_PORT(bp);
4677         int func = BP_FUNC(bp);
4678         int i;
4679         u16 max_agg_size;
4680
4681         if (is_multi(bp)) {
4682                 tstorm_config.config_flags = MULTI_FLAGS;
4683                 tstorm_config.rss_result_mask = MULTI_MASK;
4684         }
4685
4686         tstorm_config.leading_client_id = BP_L_ID(bp);
4687
4688         REG_WR(bp, BAR_TSTRORM_INTMEM +
4689                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4690                (*(u32 *)&tstorm_config));
4691
4692         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4693         bnx2x_set_storm_rx_mode(bp);
4694
4695         /* reset xstorm per client statistics */
4696         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699                        i*4, 0);
4700         }
4701         /* reset tstorm per client statistics */
4702         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4705                        i*4, 0);
4706         }
4707
4708         /* Init statistics related context */
4709         stats_flags.collect_eth = 1;
4710
4711         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4712                ((u32 *)&stats_flags)[0]);
4713         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4714                ((u32 *)&stats_flags)[1]);
4715
4716         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4717                ((u32 *)&stats_flags)[0]);
4718         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4719                ((u32 *)&stats_flags)[1]);
4720
4721         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4722                ((u32 *)&stats_flags)[0]);
4723         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4724                ((u32 *)&stats_flags)[1]);
4725
4726         REG_WR(bp, BAR_XSTRORM_INTMEM +
4727                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729         REG_WR(bp, BAR_XSTRORM_INTMEM +
4730                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4732
4733         REG_WR(bp, BAR_TSTRORM_INTMEM +
4734                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736         REG_WR(bp, BAR_TSTRORM_INTMEM +
4737                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4739
4740         if (CHIP_IS_E1H(bp)) {
4741                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4742                         IS_E1HMF(bp));
4743                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4744                         IS_E1HMF(bp));
4745                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4746                         IS_E1HMF(bp));
4747                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4748                         IS_E1HMF(bp));
4749
4750                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4751                          bp->e1hov);
4752         }
4753
4754         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4755         max_agg_size =
4756                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4758                     (u32)0xffff);
4759         for_each_queue(bp, i) {
4760                 struct bnx2x_fastpath *fp = &bp->fp[i];
4761
4762                 REG_WR(bp, BAR_USTRORM_INTMEM +
4763                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764                        U64_LO(fp->rx_comp_mapping));
4765                 REG_WR(bp, BAR_USTRORM_INTMEM +
4766                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767                        U64_HI(fp->rx_comp_mapping));
4768
4769                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4771                          max_agg_size);
4772         }
4773 }
4774
4775 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4776 {
4777         switch (load_code) {
4778         case FW_MSG_CODE_DRV_LOAD_COMMON:
4779                 bnx2x_init_internal_common(bp);
4780                 /* no break */
4781
4782         case FW_MSG_CODE_DRV_LOAD_PORT:
4783                 bnx2x_init_internal_port(bp);
4784                 /* no break */
4785
4786         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787                 bnx2x_init_internal_func(bp);
4788                 break;
4789
4790         default:
4791                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4792                 break;
4793         }
4794 }
4795
4796 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4797 {
4798         int i;
4799
4800         for_each_queue(bp, i) {
4801                 struct bnx2x_fastpath *fp = &bp->fp[i];
4802
4803                 fp->bp = bp;
4804                 fp->state = BNX2X_FP_STATE_CLOSED;
4805                 fp->index = i;
4806                 fp->cl_id = BP_L_ID(bp) + i;
4807                 fp->sb_id = fp->cl_id;
4808                 DP(NETIF_MSG_IFUP,
4809                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4810                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4811                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4812                               FP_SB_ID(fp));
4813                 bnx2x_update_fpsb_idx(fp);
4814         }
4815
4816         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817                           DEF_SB_ID);
4818         bnx2x_update_dsb_idx(bp);
4819         bnx2x_update_coalesce(bp);
4820         bnx2x_init_rx_rings(bp);
4821         bnx2x_init_tx_ring(bp);
4822         bnx2x_init_sp_ring(bp);
4823         bnx2x_init_context(bp);
4824         bnx2x_init_internal(bp, load_code);
4825         bnx2x_init_ind_table(bp);
4826         bnx2x_stats_init(bp);
4827
4828         /* At this point, we are ready for interrupts */
4829         atomic_set(&bp->intr_sem, 0);
4830
4831         /* flush all before enabling interrupts */
4832         mb();
4833         mmiowb();
4834
4835         bnx2x_int_enable(bp);
4836 }
4837
4838 /* end of nic init */
4839
4840 /*
4841  * gzip service functions
4842  */
4843
4844 static int bnx2x_gunzip_init(struct bnx2x *bp)
4845 {
4846         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847                                               &bp->gunzip_mapping);
4848         if (bp->gunzip_buf  == NULL)
4849                 goto gunzip_nomem1;
4850
4851         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852         if (bp->strm  == NULL)
4853                 goto gunzip_nomem2;
4854
4855         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856                                       GFP_KERNEL);
4857         if (bp->strm->workspace == NULL)
4858                 goto gunzip_nomem3;
4859
4860         return 0;
4861
4862 gunzip_nomem3:
4863         kfree(bp->strm);
4864         bp->strm = NULL;
4865
4866 gunzip_nomem2:
4867         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868                             bp->gunzip_mapping);
4869         bp->gunzip_buf = NULL;
4870
4871 gunzip_nomem1:
4872         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4873                " un-compression\n", bp->dev->name);
4874         return -ENOMEM;
4875 }
4876
4877 static void bnx2x_gunzip_end(struct bnx2x *bp)
4878 {
4879         kfree(bp->strm->workspace);
4880
4881         kfree(bp->strm);
4882         bp->strm = NULL;
4883
4884         if (bp->gunzip_buf) {
4885                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886                                     bp->gunzip_mapping);
4887                 bp->gunzip_buf = NULL;
4888         }
4889 }
4890
4891 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4892 {
4893         int n, rc;
4894
4895         /* check gzip header */
4896         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4897                 return -EINVAL;
4898
4899         n = 10;
4900
4901 #define FNAME                           0x8
4902
4903         if (zbuf[3] & FNAME)
4904                 while ((zbuf[n++] != 0) && (n < len));
4905
4906         bp->strm->next_in = zbuf + n;
4907         bp->strm->avail_in = len - n;
4908         bp->strm->next_out = bp->gunzip_buf;
4909         bp->strm->avail_out = FW_BUF_SIZE;
4910
4911         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4912         if (rc != Z_OK)
4913                 return rc;
4914
4915         rc = zlib_inflate(bp->strm, Z_FINISH);
4916         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918                        bp->dev->name, bp->strm->msg);
4919
4920         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921         if (bp->gunzip_outlen & 0x3)
4922                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923                                     " gunzip_outlen (%d) not aligned\n",
4924                        bp->dev->name, bp->gunzip_outlen);
4925         bp->gunzip_outlen >>= 2;
4926
4927         zlib_inflateEnd(bp->strm);
4928
4929         if (rc == Z_STREAM_END)
4930                 return 0;
4931
4932         return rc;
4933 }
4934
4935 /* nic load/unload */
4936
4937 /*
4938  * General service functions
4939  */
4940
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x *bp)
4943 {
4944         u32 wb_write[3];
4945
4946         /* Ethernet source and destination addresses */
4947         wb_write[0] = 0x55555555;
4948         wb_write[1] = 0x55555555;
4949         wb_write[2] = 0x20;             /* SOP */
4950         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4951
4952         /* NON-IP protocol */
4953         wb_write[0] = 0x09000000;
4954         wb_write[1] = 0x55555555;
4955         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4956         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4957 }
4958
4959 /* some of the internal memories
4960  * are not directly readable from the driver
4961  * to test them we send debug packets
4962  */
4963 static int bnx2x_int_mem_test(struct bnx2x *bp)
4964 {
4965         int factor;
4966         int count, i;
4967         u32 val = 0;
4968
4969         if (CHIP_REV_IS_FPGA(bp))
4970                 factor = 120;
4971         else if (CHIP_REV_IS_EMUL(bp))
4972                 factor = 200;
4973         else
4974                 factor = 1;
4975
4976         DP(NETIF_MSG_HW, "start part1\n");
4977
4978         /* Disable inputs of parser neighbor blocks */
4979         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4982         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983
4984         /*  Write 0 to parser credits for CFC search request */
4985         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986
4987         /* send Ethernet packet */
4988         bnx2x_lb_pckt(bp);
4989
4990         /* TODO do i reset NIG statistic? */
4991         /* Wait until NIG register shows 1 packet of size 0x10 */
4992         count = 1000 * factor;
4993         while (count) {
4994
4995                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996                 val = *bnx2x_sp(bp, wb_data[0]);
4997                 if (val == 0x10)
4998                         break;
4999
5000                 msleep(10);
5001                 count--;
5002         }
5003         if (val != 0x10) {
5004                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5005                 return -1;
5006         }
5007
5008         /* Wait until PRS register shows 1 packet */
5009         count = 1000 * factor;
5010         while (count) {
5011                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012                 if (val == 1)
5013                         break;
5014
5015                 msleep(10);
5016                 count--;
5017         }
5018         if (val != 0x1) {
5019                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020                 return -2;
5021         }
5022
5023         /* Reset and init BRB, PRS */
5024         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5025         msleep(50);
5026         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5027         msleep(50);
5028         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030
5031         DP(NETIF_MSG_HW, "part2\n");
5032
5033         /* Disable inputs of parser neighbor blocks */
5034         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5037         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5038
5039         /* Write 0 to parser credits for CFC search request */
5040         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5041
5042         /* send 10 Ethernet packets */
5043         for (i = 0; i < 10; i++)
5044                 bnx2x_lb_pckt(bp);
5045
5046         /* Wait until NIG register shows 10 + 1
5047            packets of size 11*0x10 = 0xb0 */
5048         count = 1000 * factor;
5049         while (count) {
5050
5051                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052                 val = *bnx2x_sp(bp, wb_data[0]);
5053                 if (val == 0xb0)
5054                         break;
5055
5056                 msleep(10);
5057                 count--;
5058         }
5059         if (val != 0xb0) {
5060                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5061                 return -3;
5062         }
5063
5064         /* Wait until PRS register shows 2 packets */
5065         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066         if (val != 2)
5067                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5068
5069         /* Write 1 to parser credits for CFC search request */
5070         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5071
5072         /* Wait until PRS register shows 3 packets */
5073         msleep(10 * factor);
5074         /* Wait until NIG register shows 1 packet of size 0x10 */
5075         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5076         if (val != 3)
5077                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5078
5079         /* clear NIG EOP FIFO */
5080         for (i = 0; i < 11; i++)
5081                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5083         if (val != 1) {
5084                 BNX2X_ERR("clear of NIG failed\n");
5085                 return -4;
5086         }
5087
5088         /* Reset and init BRB, PRS, NIG */
5089         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5090         msleep(50);
5091         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5092         msleep(50);
5093         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5095 #ifndef BCM_ISCSI
5096         /* set NIC mode */
5097         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5098 #endif
5099
5100         /* Enable inputs of parser neighbor blocks */
5101         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5104         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5105
5106         DP(NETIF_MSG_HW, "done\n");
5107
5108         return 0; /* OK */
5109 }
5110
5111 static void enable_blocks_attention(struct bnx2x *bp)
5112 {
5113         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5122 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5127 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5133 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135         if (CHIP_REV_IS_FPGA(bp))
5136                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5137         else
5138                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5139         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5142 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5146 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5148 }
5149
5150
5151 static void bnx2x_reset_common(struct bnx2x *bp)
5152 {
5153         /* reset_common */
5154         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5155                0xd3ffff7f);
5156         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5157 }
5158
5159 static int bnx2x_init_common(struct bnx2x *bp)
5160 {
5161         u32 val, i;
5162
5163         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5164
5165         bnx2x_reset_common(bp);
5166         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5167         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5168
5169         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5170         if (CHIP_IS_E1H(bp))
5171                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5172
5173         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5174         msleep(30);
5175         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5176
5177         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5178         if (CHIP_IS_E1(bp)) {
5179                 /* enable HW interrupt from PXP on USDM overflow
5180                    bit 16 on INT_MASK_0 */
5181                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5182         }
5183
5184         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5185         bnx2x_init_pxp(bp);
5186
5187 #ifdef __BIG_ENDIAN
5188         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5189         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5190         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5191         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5192         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5193
5194 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5196         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5197         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5198         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5199 #endif
5200
5201         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5202 #ifdef BCM_ISCSI
5203         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5204         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5205         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5206 #endif
5207
5208         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5209                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5210
5211         /* let the HW do it's magic ... */
5212         msleep(100);
5213         /* finish PXP init */
5214         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5215         if (val != 1) {
5216                 BNX2X_ERR("PXP2 CFG failed\n");
5217                 return -EBUSY;
5218         }
5219         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5220         if (val != 1) {
5221                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5222                 return -EBUSY;
5223         }
5224
5225         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5226         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5227
5228         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5229
5230         /* clean the DMAE memory */
5231         bp->dmae_ready = 1;
5232         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5233
5234         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5235         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5236         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5237         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5238
5239         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5240         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5241         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5242         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5243
5244         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5245         /* soft reset pulse */
5246         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5247         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5248
5249 #ifdef BCM_ISCSI
5250         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5251 #endif
5252
5253         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5254         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5255         if (!CHIP_REV_IS_SLOW(bp)) {
5256                 /* enable hw interrupt from doorbell Q */
5257                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5258         }
5259
5260         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5261         if (CHIP_REV_IS_SLOW(bp)) {
5262                 /* fix for emulation and FPGA for no pause */
5263                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5264                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5265                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5266                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5267         }
5268
5269         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5270         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5271         /* set NIC mode */
5272         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5273         if (CHIP_IS_E1H(bp))
5274                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5275
5276         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5277         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5278         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5279         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5280
5281         if (CHIP_IS_E1H(bp)) {
5282                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5283                                 STORM_INTMEM_SIZE_E1H/2);
5284                 bnx2x_init_fill(bp,
5285                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5286                                 0, STORM_INTMEM_SIZE_E1H/2);
5287                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5288                                 STORM_INTMEM_SIZE_E1H/2);
5289                 bnx2x_init_fill(bp,
5290                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5291                                 0, STORM_INTMEM_SIZE_E1H/2);
5292                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5293                                 STORM_INTMEM_SIZE_E1H/2);
5294                 bnx2x_init_fill(bp,
5295                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5296                                 0, STORM_INTMEM_SIZE_E1H/2);
5297                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5298                                 STORM_INTMEM_SIZE_E1H/2);
5299                 bnx2x_init_fill(bp,
5300                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5301                                 0, STORM_INTMEM_SIZE_E1H/2);
5302         } else { /* E1 */
5303                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5304                                 STORM_INTMEM_SIZE_E1);
5305                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306                                 STORM_INTMEM_SIZE_E1);
5307                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5308                                 STORM_INTMEM_SIZE_E1);
5309                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5310                                 STORM_INTMEM_SIZE_E1);
5311         }
5312
5313         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5314         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5315         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5316         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5317
5318         /* sync semi rtc */
5319         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5320                0x80000000);
5321         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5322                0x80000000);
5323
5324         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5325         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5326         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5327
5328         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5329         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5330                 REG_WR(bp, i, 0xc0cac01a);
5331                 /* TODO: replace with something meaningful */
5332         }
5333         if (CHIP_IS_E1H(bp))
5334                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5335         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5336
5337         if (sizeof(union cdu_context) != 1024)
5338                 /* we currently assume that a context is 1024 bytes */
5339                 printk(KERN_ALERT PFX "please adjust the size of"
5340                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5341
5342         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5343         val = (4 << 24) + (0 << 12) + 1024;
5344         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5345         if (CHIP_IS_E1(bp)) {
5346                 /* !!! fix pxp client crdit until excel update */
5347                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5348                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5349         }
5350
5351         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5352         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5353
5354         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5355         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5356
5357         /* PXPCS COMMON comes here */
5358         /* Reset PCIE errors for debug */
5359         REG_WR(bp, 0x2814, 0xffffffff);
5360         REG_WR(bp, 0x3820, 0xffffffff);
5361
5362         /* EMAC0 COMMON comes here */
5363         /* EMAC1 COMMON comes here */
5364         /* DBU COMMON comes here */
5365         /* DBG COMMON comes here */
5366
5367         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5368         if (CHIP_IS_E1H(bp)) {
5369                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5370                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5371         }
5372
5373         if (CHIP_REV_IS_SLOW(bp))
5374                 msleep(200);
5375
5376         /* finish CFC init */
5377         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5378         if (val != 1) {
5379                 BNX2X_ERR("CFC LL_INIT failed\n");
5380                 return -EBUSY;
5381         }
5382         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5383         if (val != 1) {
5384                 BNX2X_ERR("CFC AC_INIT failed\n");
5385                 return -EBUSY;
5386         }
5387         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5388         if (val != 1) {
5389                 BNX2X_ERR("CFC CAM_INIT failed\n");
5390                 return -EBUSY;
5391         }
5392         REG_WR(bp, CFC_REG_DEBUG0, 0);
5393
5394         /* read NIG statistic
5395            to see if this is our first up since powerup */
5396         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397         val = *bnx2x_sp(bp, wb_data[0]);
5398
5399         /* do internal memory self test */
5400         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5401                 BNX2X_ERR("internal mem self test failed\n");
5402                 return -EBUSY;
5403         }
5404
5405         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5406         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5407         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5408                 /* Fan failure is indicated by SPIO 5 */
5409                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5410                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5411
5412                 /* set to active low mode */
5413                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5414                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5415                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5416                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5417
5418                 /* enable interrupt to signal the IGU */
5419                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5420                 val |= (1 << MISC_REGISTERS_SPIO_5);
5421                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5422                 break;
5423
5424         default:
5425                 break;
5426         }
5427
5428         /* clear PXP2 attentions */
5429         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5430
5431         enable_blocks_attention(bp);
5432
5433         if (!BP_NOMCP(bp)) {
5434                 bnx2x_acquire_phy_lock(bp);
5435                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5436                 bnx2x_release_phy_lock(bp);
5437         } else
5438                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5439
5440         return 0;
5441 }
5442
5443 static int bnx2x_init_port(struct bnx2x *bp)
5444 {
5445         int port = BP_PORT(bp);
5446         u32 val;
5447
5448         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5449
5450         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5451
5452         /* Port PXP comes here */
5453         /* Port PXP2 comes here */
5454 #ifdef BCM_ISCSI
5455         /* Port0  1
5456          * Port1  385 */
5457         i++;
5458         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5459         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5460         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5461         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5462
5463         /* Port0  2
5464          * Port1  386 */
5465         i++;
5466         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5467         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5468         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5470
5471         /* Port0  3
5472          * Port1  387 */
5473         i++;
5474         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5475         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5476         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5478 #endif
5479         /* Port CMs come here */
5480
5481         /* Port QM comes here */
5482 #ifdef BCM_ISCSI
5483         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5484         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5485
5486         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5487                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5488 #endif
5489         /* Port DQ comes here */
5490         /* Port BRB1 comes here */
5491         /* Port PRS comes here */
5492         /* Port TSDM comes here */
5493         /* Port CSDM comes here */
5494         /* Port USDM comes here */
5495         /* Port XSDM comes here */
5496         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5497                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5498         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5499                              port ? USEM_PORT1_END : USEM_PORT0_END);
5500         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5501                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5502         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5503                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5504         /* Port UPB comes here */
5505         /* Port XPB comes here */
5506
5507         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5508                              port ? PBF_PORT1_END : PBF_PORT0_END);
5509
5510         /* configure PBF to work without PAUSE mtu 9000 */
5511         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5512
5513         /* update threshold */
5514         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5515         /* update init credit */
5516         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5517
5518         /* probe changes */
5519         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5520         msleep(5);
5521         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5522
5523 #ifdef BCM_ISCSI
5524         /* tell the searcher where the T2 table is */
5525         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5526
5527         wb_write[0] = U64_LO(bp->t2_mapping);
5528         wb_write[1] = U64_HI(bp->t2_mapping);
5529         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5530         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5531         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5532         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5533
5534         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5535         /* Port SRCH comes here */
5536 #endif
5537         /* Port CDU comes here */
5538         /* Port CFC comes here */
5539
5540         if (CHIP_IS_E1(bp)) {
5541                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5542                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5543         }
5544         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5545                              port ? HC_PORT1_END : HC_PORT0_END);
5546
5547         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5548                                     MISC_AEU_PORT0_START,
5549                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5550         /* init aeu_mask_attn_func_0/1:
5551          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553          *             bits 4-7 are used for "per vn group attention" */
5554         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5555                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5556
5557         /* Port PXPCS comes here */
5558         /* Port EMAC0 comes here */
5559         /* Port EMAC1 comes here */
5560         /* Port DBU comes here */
5561         /* Port DBG comes here */
5562         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5563                              port ? NIG_PORT1_END : NIG_PORT0_END);
5564
5565         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5566
5567         if (CHIP_IS_E1H(bp)) {
5568                 u32 wsum;
5569                 struct cmng_struct_per_port m_cmng_port;
5570                 int vn;
5571
5572                 /* 0x2 disable e1hov, 0x1 enable */
5573                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5574                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5575
5576                 /* Init RATE SHAPING and FAIRNESS contexts.
5577                    Initialize as if there is 10G link. */
5578                 wsum = bnx2x_calc_vn_wsum(bp);
5579                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5580                 if (IS_E1HMF(bp))
5581                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5583                                         wsum, 10000, &m_cmng_port);
5584         }
5585
5586         /* Port MCP comes here */
5587         /* Port DMAE comes here */
5588
5589         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5590         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5591         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5592                 /* add SPIO 5 to group 0 */
5593                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5594                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5595                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5596                 break;
5597
5598         default:
5599                 break;
5600         }
5601
5602         bnx2x__link_reset(bp);
5603
5604         return 0;
5605 }
5606
5607 #define ILT_PER_FUNC            (768/2)
5608 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5609 /* the phys address is shifted right 12 bits and has an added
5610    1=valid bit added to the 53rd bit
5611    then since this is a wide register(TM)
5612    we split it into two 32 bit writes
5613  */
5614 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5616 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5617 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5618
5619 #define CNIC_ILT_LINES          0
5620
5621 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5622 {
5623         int reg;
5624
5625         if (CHIP_IS_E1H(bp))
5626                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5627         else /* E1 */
5628                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5629
5630         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5631 }
5632
5633 static int bnx2x_init_func(struct bnx2x *bp)
5634 {
5635         int port = BP_PORT(bp);
5636         int func = BP_FUNC(bp);
5637         int i;
5638
5639         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5640
5641         i = FUNC_ILT_BASE(func);
5642
5643         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5644         if (CHIP_IS_E1H(bp)) {
5645                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5646                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5647         } else /* E1 */
5648                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5649                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5650
5651
5652         if (CHIP_IS_E1H(bp)) {
5653                 for (i = 0; i < 9; i++)
5654                         bnx2x_init_block(bp,
5655                                          cm_start[func][i], cm_end[func][i]);
5656
5657                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5658                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5659         }
5660
5661         /* HC init per function */
5662         if (CHIP_IS_E1H(bp)) {
5663                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5664
5665                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5666                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5667         }
5668         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5669
5670         if (CHIP_IS_E1H(bp))
5671                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5672
5673         /* Reset PCIE errors for debug */
5674         REG_WR(bp, 0x2114, 0xffffffff);
5675         REG_WR(bp, 0x2120, 0xffffffff);
5676
5677         return 0;
5678 }
5679
5680 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5681 {
5682         int i, rc = 0;
5683
5684         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5685            BP_FUNC(bp), load_code);
5686
5687         bp->dmae_ready = 0;
5688         mutex_init(&bp->dmae_mutex);
5689         bnx2x_gunzip_init(bp);
5690
5691         switch (load_code) {
5692         case FW_MSG_CODE_DRV_LOAD_COMMON:
5693                 rc = bnx2x_init_common(bp);
5694                 if (rc)
5695                         goto init_hw_err;
5696                 /* no break */
5697
5698         case FW_MSG_CODE_DRV_LOAD_PORT:
5699                 bp->dmae_ready = 1;
5700                 rc = bnx2x_init_port(bp);
5701                 if (rc)
5702                         goto init_hw_err;
5703                 /* no break */
5704
5705         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5706                 bp->dmae_ready = 1;
5707                 rc = bnx2x_init_func(bp);
5708                 if (rc)
5709                         goto init_hw_err;
5710                 break;
5711
5712         default:
5713                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5714                 break;
5715         }
5716
5717         if (!BP_NOMCP(bp)) {
5718                 int func = BP_FUNC(bp);
5719
5720                 bp->fw_drv_pulse_wr_seq =
5721                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5722                                  DRV_PULSE_SEQ_MASK);
5723                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5724                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5725                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5726         } else
5727                 bp->func_stx = 0;
5728
5729         /* this needs to be done before gunzip end */
5730         bnx2x_zero_def_sb(bp);
5731         for_each_queue(bp, i)
5732                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5733
5734 init_hw_err:
5735         bnx2x_gunzip_end(bp);
5736
5737         return rc;
5738 }
5739
5740 /* send the MCP a request, block until there is a reply */
5741 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5742 {
5743         int func = BP_FUNC(bp);
5744         u32 seq = ++bp->fw_seq;
5745         u32 rc = 0;
5746         u32 cnt = 1;
5747         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5748
5749         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5750         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5751
5752         do {
5753                 /* let the FW do it's magic ... */
5754                 msleep(delay);
5755
5756                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5757
5758                 /* Give the FW up to 2 second (200*10ms) */
5759         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5760
5761         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762            cnt*delay, rc, seq);
5763
5764         /* is this a reply to our command? */
5765         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5766                 rc &= FW_MSG_CODE_MASK;
5767
5768         } else {
5769                 /* FW BUG! */
5770                 BNX2X_ERR("FW failed to respond!\n");
5771                 bnx2x_fw_dump(bp);
5772                 rc = 0;
5773         }
5774
5775         return rc;
5776 }
5777
5778 static void bnx2x_free_mem(struct bnx2x *bp)
5779 {
5780
5781 #define BNX2X_PCI_FREE(x, y, size) \
5782         do { \
5783                 if (x) { \
5784                         pci_free_consistent(bp->pdev, size, x, y); \
5785                         x = NULL; \
5786                         y = 0; \
5787                 } \
5788         } while (0)
5789
5790 #define BNX2X_FREE(x) \
5791         do { \
5792                 if (x) { \
5793                         vfree(x); \
5794                         x = NULL; \
5795                 } \
5796         } while (0)
5797
5798         int i;
5799
5800         /* fastpath */
5801         for_each_queue(bp, i) {
5802
5803                 /* Status blocks */
5804                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5805                                bnx2x_fp(bp, i, status_blk_mapping),
5806                                sizeof(struct host_status_block) +
5807                                sizeof(struct eth_tx_db_data));
5808
5809                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812                                bnx2x_fp(bp, i, tx_desc_mapping),
5813                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5814
5815                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5816                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5817                                bnx2x_fp(bp, i, rx_desc_mapping),
5818                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5819
5820                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5821                                bnx2x_fp(bp, i, rx_comp_mapping),
5822                                sizeof(struct eth_fast_path_rx_cqe) *
5823                                NUM_RCQ_BD);
5824
5825                 /* SGE ring */
5826                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5827                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5828                                bnx2x_fp(bp, i, rx_sge_mapping),
5829                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5830         }
5831         /* end of fastpath */
5832
5833         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834                        sizeof(struct host_def_status_block));
5835
5836         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837                        sizeof(struct bnx2x_slowpath));
5838
5839 #ifdef BCM_ISCSI
5840         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5841         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5842         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5843         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5844 #endif
5845         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5846
5847 #undef BNX2X_PCI_FREE
5848 #undef BNX2X_KFREE
5849 }
5850
5851 static int bnx2x_alloc_mem(struct bnx2x *bp)
5852 {
5853
5854 #define BNX2X_PCI_ALLOC(x, y, size) \
5855         do { \
5856                 x = pci_alloc_consistent(bp->pdev, size, y); \
5857                 if (x == NULL) \
5858                         goto alloc_mem_err; \
5859                 memset(x, 0, size); \
5860         } while (0)
5861
5862 #define BNX2X_ALLOC(x, size) \
5863         do { \
5864                 x = vmalloc(size); \
5865                 if (x == NULL) \
5866                         goto alloc_mem_err; \
5867                 memset(x, 0, size); \
5868         } while (0)
5869
5870         int i;
5871
5872         /* fastpath */
5873         for_each_queue(bp, i) {
5874                 bnx2x_fp(bp, i, bp) = bp;
5875
5876                 /* Status blocks */
5877                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5878                                 &bnx2x_fp(bp, i, status_blk_mapping),
5879                                 sizeof(struct host_status_block) +
5880                                 sizeof(struct eth_tx_db_data));
5881
5882                 bnx2x_fp(bp, i, hw_tx_prods) =
5883                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5884
5885                 bnx2x_fp(bp, i, tx_prods_mapping) =
5886                                 bnx2x_fp(bp, i, status_blk_mapping) +
5887                                 sizeof(struct host_status_block);
5888
5889                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5891                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5892                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5893                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5894                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5895
5896                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5897                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5898                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5899                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5900                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5901
5902                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5903                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5904                                 sizeof(struct eth_fast_path_rx_cqe) *
5905                                 NUM_RCQ_BD);
5906
5907                 /* SGE ring */
5908                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5909                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5910                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5911                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5912                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5913         }
5914         /* end of fastpath */
5915
5916         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5917                         sizeof(struct host_def_status_block));
5918
5919         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920                         sizeof(struct bnx2x_slowpath));
5921
5922 #ifdef BCM_ISCSI
5923         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5924
5925         /* Initialize T1 */
5926         for (i = 0; i < 64*1024; i += 64) {
5927                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5928                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5929         }
5930
5931         /* allocate searcher T2 table
5932            we allocate 1/4 of alloc num for T2
5933           (which is not entered into the ILT) */
5934         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5935
5936         /* Initialize T2 */
5937         for (i = 0; i < 16*1024; i += 64)
5938                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5939
5940         /* now fixup the last line in the block to point to the next block */
5941         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5942
5943         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5945
5946         /* QM queues (128*MAX_CONN) */
5947         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5948 #endif
5949
5950         /* Slow path ring */
5951         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5952
5953         return 0;
5954
5955 alloc_mem_err:
5956         bnx2x_free_mem(bp);
5957         return -ENOMEM;
5958
5959 #undef BNX2X_PCI_ALLOC
5960 #undef BNX2X_ALLOC
5961 }
5962
5963 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5964 {
5965         int i;
5966
5967         for_each_queue(bp, i) {
5968                 struct bnx2x_fastpath *fp = &bp->fp[i];
5969
5970                 u16 bd_cons = fp->tx_bd_cons;
5971                 u16 sw_prod = fp->tx_pkt_prod;
5972                 u16 sw_cons = fp->tx_pkt_cons;
5973
5974                 while (sw_cons != sw_prod) {
5975                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5976                         sw_cons++;
5977                 }
5978         }
5979 }
5980
5981 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5982 {
5983         int i, j;
5984
5985         for_each_queue(bp, j) {
5986                 struct bnx2x_fastpath *fp = &bp->fp[j];
5987
5988                 for (i = 0; i < NUM_RX_BD; i++) {
5989                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5990                         struct sk_buff *skb = rx_buf->skb;
5991
5992                         if (skb == NULL)
5993                                 continue;
5994
5995                         pci_unmap_single(bp->pdev,
5996                                          pci_unmap_addr(rx_buf, mapping),
5997                                          bp->rx_buf_size,
5998                                          PCI_DMA_FROMDEVICE);
5999
6000                         rx_buf->skb = NULL;
6001                         dev_kfree_skb(skb);
6002                 }
6003                 if (!fp->disable_tpa)
6004                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6005                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6006                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6007         }
6008 }
6009
6010 static void bnx2x_free_skbs(struct bnx2x *bp)
6011 {
6012         bnx2x_free_tx_skbs(bp);
6013         bnx2x_free_rx_skbs(bp);
6014 }
6015
6016 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6017 {
6018         int i, offset = 1;
6019
6020         free_irq(bp->msix_table[0].vector, bp->dev);
6021         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6022            bp->msix_table[0].vector);
6023
6024         for_each_queue(bp, i) {
6025                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6026                    "state %x\n", i, bp->msix_table[i + offset].vector,
6027                    bnx2x_fp(bp, i, state));
6028
6029                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6030                         BNX2X_ERR("IRQ of fp #%d being freed while "
6031                                   "state != closed\n", i);
6032
6033                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6034         }
6035 }
6036
6037 static void bnx2x_free_irq(struct bnx2x *bp)
6038 {
6039         if (bp->flags & USING_MSIX_FLAG) {
6040                 bnx2x_free_msix_irqs(bp);
6041                 pci_disable_msix(bp->pdev);
6042                 bp->flags &= ~USING_MSIX_FLAG;
6043
6044         } else
6045                 free_irq(bp->pdev->irq, bp->dev);
6046 }
6047
6048 static int bnx2x_enable_msix(struct bnx2x *bp)
6049 {
6050         int i, rc, offset;
6051
6052         bp->msix_table[0].entry = 0;
6053         offset = 1;
6054         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6055
6056         for_each_queue(bp, i) {
6057                 int igu_vec = offset + i + BP_L_ID(bp);
6058
6059                 bp->msix_table[i + offset].entry = igu_vec;
6060                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6061                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6062         }
6063
6064         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6065                              bp->num_queues + offset);
6066         if (rc) {
6067                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6068                 return -1;
6069         }
6070         bp->flags |= USING_MSIX_FLAG;
6071
6072         return 0;
6073 }
6074
6075 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6076 {
6077         int i, rc, offset = 1;
6078
6079         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6080                          bp->dev->name, bp->dev);
6081         if (rc) {
6082                 BNX2X_ERR("request sp irq failed\n");
6083                 return -EBUSY;
6084         }
6085
6086         for_each_queue(bp, i) {
6087                 rc = request_irq(bp->msix_table[i + offset].vector,
6088                                  bnx2x_msix_fp_int, 0,
6089                                  bp->dev->name, &bp->fp[i]);
6090                 if (rc) {
6091                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6092                                   i + offset, -rc);
6093                         bnx2x_free_msix_irqs(bp);
6094                         return -EBUSY;
6095                 }
6096
6097                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6098         }
6099
6100         return 0;
6101 }
6102
6103 static int bnx2x_req_irq(struct bnx2x *bp)
6104 {
6105         int rc;
6106
6107         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6108                          bp->dev->name, bp->dev);
6109         if (!rc)
6110                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6111
6112         return rc;
6113 }
6114
6115 static void bnx2x_napi_enable(struct bnx2x *bp)
6116 {
6117         int i;
6118
6119         for_each_queue(bp, i)
6120                 napi_enable(&bnx2x_fp(bp, i, napi));
6121 }
6122
6123 static void bnx2x_napi_disable(struct bnx2x *bp)
6124 {
6125         int i;
6126
6127         for_each_queue(bp, i)
6128                 napi_disable(&bnx2x_fp(bp, i, napi));
6129 }
6130
6131 static void bnx2x_netif_start(struct bnx2x *bp)
6132 {
6133         if (atomic_dec_and_test(&bp->intr_sem)) {
6134                 if (netif_running(bp->dev)) {
6135                         if (bp->state == BNX2X_STATE_OPEN)
6136                                 netif_wake_queue(bp->dev);
6137                         bnx2x_napi_enable(bp);
6138                         bnx2x_int_enable(bp);
6139                 }
6140         }
6141 }
6142
6143 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6144 {
6145         bnx2x_int_disable_sync(bp, disable_hw);
6146         if (netif_running(bp->dev)) {
6147                 bnx2x_napi_disable(bp);
6148                 netif_tx_disable(bp->dev);
6149                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6150         }
6151 }
6152
6153 /*
6154  * Init service functions
6155  */
6156
6157 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6158 {
6159         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6160         int port = BP_PORT(bp);
6161
6162         /* CAM allocation
6163          * unicasts 0-31:port0 32-63:port1
6164          * multicast 64-127:port0 128-191:port1
6165          */
6166         config->hdr.length_6b = 2;
6167         config->hdr.offset = port ? 32 : 0;
6168         config->hdr.client_id = BP_CL_ID(bp);
6169         config->hdr.reserved1 = 0;
6170
6171         /* primary MAC */
6172         config->config_table[0].cam_entry.msb_mac_addr =
6173                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6174         config->config_table[0].cam_entry.middle_mac_addr =
6175                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6176         config->config_table[0].cam_entry.lsb_mac_addr =
6177                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6178         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6179         if (set)
6180                 config->config_table[0].target_table_entry.flags = 0;
6181         else
6182                 CAM_INVALIDATE(config->config_table[0]);
6183         config->config_table[0].target_table_entry.client_id = 0;
6184         config->config_table[0].target_table_entry.vlan_id = 0;
6185
6186         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6187            (set ? "setting" : "clearing"),
6188            config->config_table[0].cam_entry.msb_mac_addr,
6189            config->config_table[0].cam_entry.middle_mac_addr,
6190            config->config_table[0].cam_entry.lsb_mac_addr);
6191
6192         /* broadcast */
6193         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6194         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6195         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6196         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6197         if (set)
6198                 config->config_table[1].target_table_entry.flags =
6199                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6200         else
6201                 CAM_INVALIDATE(config->config_table[1]);
6202         config->config_table[1].target_table_entry.client_id = 0;
6203         config->config_table[1].target_table_entry.vlan_id = 0;
6204
6205         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6206                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6207                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6208 }
6209
6210 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6211 {
6212         struct mac_configuration_cmd_e1h *config =
6213                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6214
6215         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6216                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6217                 return;
6218         }
6219
6220         /* CAM allocation for E1H
6221          * unicasts: by func number
6222          * multicast: 20+FUNC*20, 20 each
6223          */
6224         config->hdr.length_6b = 1;
6225         config->hdr.offset = BP_FUNC(bp);
6226         config->hdr.client_id = BP_CL_ID(bp);
6227         config->hdr.reserved1 = 0;
6228
6229         /* primary MAC */
6230         config->config_table[0].msb_mac_addr =
6231                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6232         config->config_table[0].middle_mac_addr =
6233                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6234         config->config_table[0].lsb_mac_addr =
6235                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6236         config->config_table[0].client_id = BP_L_ID(bp);
6237         config->config_table[0].vlan_id = 0;
6238         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6239         if (set)
6240                 config->config_table[0].flags = BP_PORT(bp);
6241         else
6242                 config->config_table[0].flags =
6243                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6244
6245         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6246            (set ? "setting" : "clearing"),
6247            config->config_table[0].msb_mac_addr,
6248            config->config_table[0].middle_mac_addr,
6249            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6250
6251         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6252                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6253                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6254 }
6255
6256 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6257                              int *state_p, int poll)
6258 {
6259         /* can take a while if any port is running */
6260         int cnt = 500;
6261
6262         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6263            poll ? "polling" : "waiting", state, idx);
6264
6265         might_sleep();
6266         while (cnt--) {
6267                 if (poll) {
6268                         bnx2x_rx_int(bp->fp, 10);
6269                         /* if index is different from 0
6270                          * the reply for some commands will
6271                          * be on the non default queue
6272                          */
6273                         if (idx)
6274                                 bnx2x_rx_int(&bp->fp[idx], 10);
6275                 }
6276
6277                 mb(); /* state is changed by bnx2x_sp_event() */
6278                 if (*state_p == state)
6279                         return 0;
6280
6281                 msleep(1);
6282         }
6283
6284         /* timeout! */
6285         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286                   poll ? "polling" : "waiting", state, idx);
6287 #ifdef BNX2X_STOP_ON_ERROR
6288         bnx2x_panic();
6289 #endif
6290
6291         return -EBUSY;
6292 }
6293
6294 static int bnx2x_setup_leading(struct bnx2x *bp)
6295 {
6296         int rc;
6297
6298         /* reset IGU state */
6299         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6300
6301         /* SETUP ramrod */
6302         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6303
6304         /* Wait for completion */
6305         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6306
6307         return rc;
6308 }
6309
6310 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6311 {
6312         /* reset IGU state */
6313         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6314
6315         /* SETUP ramrod */
6316         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6317         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6318
6319         /* Wait for completion */
6320         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6321                                  &(bp->fp[index].state), 0);
6322 }
6323
6324 static int bnx2x_poll(struct napi_struct *napi, int budget);
6325 static void bnx2x_set_rx_mode(struct net_device *dev);
6326
6327 /* must be called with rtnl_lock */
6328 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6329 {
6330         u32 load_code;
6331         int i, rc;
6332 #ifdef BNX2X_STOP_ON_ERROR
6333         if (unlikely(bp->panic))
6334                 return -EPERM;
6335 #endif
6336
6337         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6338
6339         /* Send LOAD_REQUEST command to MCP
6340            Returns the type of LOAD command:
6341            if it is the first port to be initialized
6342            common blocks should be initialized, otherwise - not
6343         */
6344         if (!BP_NOMCP(bp)) {
6345                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6346                 if (!load_code) {
6347                         BNX2X_ERR("MCP response failure, aborting\n");
6348                         return -EBUSY;
6349                 }
6350                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6351                         return -EBUSY; /* other port in diagnostic mode */
6352
6353         } else {
6354                 int port = BP_PORT(bp);
6355
6356                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6357                    load_count[0], load_count[1], load_count[2]);
6358                 load_count[0]++;
6359                 load_count[1 + port]++;
6360                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6361                    load_count[0], load_count[1], load_count[2]);
6362                 if (load_count[0] == 1)
6363                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6364                 else if (load_count[1 + port] == 1)
6365                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6366                 else
6367                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6368         }
6369
6370         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6371             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6372                 bp->port.pmf = 1;
6373         else
6374                 bp->port.pmf = 0;
6375         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6376
6377         /* if we can't use MSI-X we only need one fp,
6378          * so try to enable MSI-X with the requested number of fp's
6379          * and fallback to inta with one fp
6380          */
6381         if (use_inta) {
6382                 bp->num_queues = 1;
6383
6384         } else {
6385                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6386                         /* user requested number */
6387                         bp->num_queues = use_multi;
6388
6389                 else if (use_multi)
6390                         bp->num_queues = min_t(u32, num_online_cpus(),
6391                                                BP_MAX_QUEUES(bp));
6392                 else
6393                         bp->num_queues = 1;
6394
6395                 if (bnx2x_enable_msix(bp)) {
6396                         /* failed to enable MSI-X */
6397                         bp->num_queues = 1;
6398                         if (use_multi)
6399                                 BNX2X_ERR("Multi requested but failed"
6400                                           " to enable MSI-X\n");
6401                 }
6402         }
6403         DP(NETIF_MSG_IFUP,
6404            "set number of queues to %d\n", bp->num_queues);
6405
6406         if (bnx2x_alloc_mem(bp))
6407                 return -ENOMEM;
6408
6409         for_each_queue(bp, i)
6410                 bnx2x_fp(bp, i, disable_tpa) =
6411                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6412
6413         if (bp->flags & USING_MSIX_FLAG) {
6414                 rc = bnx2x_req_msix_irqs(bp);
6415                 if (rc) {
6416                         pci_disable_msix(bp->pdev);
6417                         goto load_error;
6418                 }
6419         } else {
6420                 bnx2x_ack_int(bp);
6421                 rc = bnx2x_req_irq(bp);
6422                 if (rc) {
6423                         BNX2X_ERR("IRQ request failed, aborting\n");
6424                         goto load_error;
6425                 }
6426         }
6427
6428         for_each_queue(bp, i)
6429                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6430                                bnx2x_poll, 128);
6431
6432         /* Initialize HW */
6433         rc = bnx2x_init_hw(bp, load_code);
6434         if (rc) {
6435                 BNX2X_ERR("HW init failed, aborting\n");
6436                 goto load_int_disable;
6437         }
6438
6439         /* Setup NIC internals and enable interrupts */
6440         bnx2x_nic_init(bp, load_code);
6441
6442         /* Send LOAD_DONE command to MCP */
6443         if (!BP_NOMCP(bp)) {
6444                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6445                 if (!load_code) {
6446                         BNX2X_ERR("MCP response failure, aborting\n");
6447                         rc = -EBUSY;
6448                         goto load_rings_free;
6449                 }
6450         }
6451
6452         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6453
6454         rc = bnx2x_setup_leading(bp);
6455         if (rc) {
6456                 BNX2X_ERR("Setup leading failed!\n");
6457                 goto load_netif_stop;
6458         }
6459
6460         if (CHIP_IS_E1H(bp))
6461                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6462                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6463                         bp->state = BNX2X_STATE_DISABLED;
6464                 }
6465
6466         if (bp->state == BNX2X_STATE_OPEN)
6467                 for_each_nondefault_queue(bp, i) {
6468                         rc = bnx2x_setup_multi(bp, i);
6469                         if (rc)
6470                                 goto load_netif_stop;
6471                 }
6472
6473         if (CHIP_IS_E1(bp))
6474                 bnx2x_set_mac_addr_e1(bp, 1);
6475         else
6476                 bnx2x_set_mac_addr_e1h(bp, 1);
6477
6478         if (bp->port.pmf)
6479                 bnx2x_initial_phy_init(bp);
6480
6481         /* Start fast path */
6482         switch (load_mode) {
6483         case LOAD_NORMAL:
6484                 /* Tx queue should be only reenabled */
6485                 netif_wake_queue(bp->dev);
6486                 bnx2x_set_rx_mode(bp->dev);
6487                 break;
6488
6489         case LOAD_OPEN:
6490                 netif_start_queue(bp->dev);
6491                 bnx2x_set_rx_mode(bp->dev);
6492                 if (bp->flags & USING_MSIX_FLAG)
6493                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6494                                bp->dev->name);
6495                 break;
6496
6497         case LOAD_DIAG:
6498                 bnx2x_set_rx_mode(bp->dev);
6499                 bp->state = BNX2X_STATE_DIAG;
6500                 break;
6501
6502         default:
6503                 break;
6504         }
6505
6506         if (!bp->port.pmf)
6507                 bnx2x__link_status_update(bp);
6508
6509         /* start the timer */
6510         mod_timer(&bp->timer, jiffies + bp->current_interval);
6511
6512
6513         return 0;
6514
6515 load_netif_stop:
6516         bnx2x_napi_disable(bp);
6517 load_rings_free:
6518         /* Free SKBs, SGEs, TPA pool and driver internals */
6519         bnx2x_free_skbs(bp);
6520         for_each_queue(bp, i)
6521                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6522 load_int_disable:
6523         bnx2x_int_disable_sync(bp, 1);
6524         /* Release IRQs */
6525         bnx2x_free_irq(bp);
6526 load_error:
6527         bnx2x_free_mem(bp);
6528         bp->port.pmf = 0;
6529
6530         /* TBD we really need to reset the chip
6531            if we want to recover from this */
6532         return rc;
6533 }
6534
6535 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6536 {
6537         int rc;
6538
6539         /* halt the connection */
6540         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6541         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6542
6543         /* Wait for completion */
6544         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6545                                &(bp->fp[index].state), 1);
6546         if (rc) /* timeout */
6547                 return rc;
6548
6549         /* delete cfc entry */
6550         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6551
6552         /* Wait for completion */
6553         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6554                                &(bp->fp[index].state), 1);
6555         return rc;
6556 }
6557
6558 static int bnx2x_stop_leading(struct bnx2x *bp)
6559 {
6560         u16 dsb_sp_prod_idx;
6561         /* if the other port is handling traffic,
6562            this can take a lot of time */
6563         int cnt = 500;
6564         int rc;
6565
6566         might_sleep();
6567
6568         /* Send HALT ramrod */
6569         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6570         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6571
6572         /* Wait for completion */
6573         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6574                                &(bp->fp[0].state), 1);
6575         if (rc) /* timeout */
6576                 return rc;
6577
6578         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6579
6580         /* Send PORT_DELETE ramrod */
6581         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6582
6583         /* Wait for completion to arrive on default status block
6584            we are going to reset the chip anyway
6585            so there is not much to do if this times out
6586          */
6587         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6588                 if (!cnt) {
6589                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6590                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6591                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6592 #ifdef BNX2X_STOP_ON_ERROR
6593                         bnx2x_panic();
6594 #else
6595                         rc = -EBUSY;
6596 #endif
6597                         break;
6598                 }
6599                 cnt--;
6600                 msleep(1);
6601         }
6602         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6603         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6604
6605         return rc;
6606 }
6607
6608 static void bnx2x_reset_func(struct bnx2x *bp)
6609 {
6610         int port = BP_PORT(bp);
6611         int func = BP_FUNC(bp);
6612         int base, i;
6613
6614         /* Configure IGU */
6615         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6616         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6617
6618         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6619
6620         /* Clear ILT */
6621         base = FUNC_ILT_BASE(func);
6622         for (i = base; i < base + ILT_PER_FUNC; i++)
6623                 bnx2x_ilt_wr(bp, i, 0);
6624 }
6625
6626 static void bnx2x_reset_port(struct bnx2x *bp)
6627 {
6628         int port = BP_PORT(bp);
6629         u32 val;
6630
6631         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6632
6633         /* Do not rcv packets to BRB */
6634         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6635         /* Do not direct rcv packets that are not for MCP to the BRB */
6636         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6637                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6638
6639         /* Configure AEU */
6640         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6641
6642         msleep(100);
6643         /* Check for BRB port occupancy */
6644         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6645         if (val)
6646                 DP(NETIF_MSG_IFDOWN,
6647                    "BRB1 is not empty  %d blocks are occupied\n", val);
6648
6649         /* TODO: Close Doorbell port? */
6650 }
6651
6652 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6653 {
6654         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6655            BP_FUNC(bp), reset_code);
6656
6657         switch (reset_code) {
6658         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6659                 bnx2x_reset_port(bp);
6660                 bnx2x_reset_func(bp);
6661                 bnx2x_reset_common(bp);
6662                 break;
6663
6664         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6665                 bnx2x_reset_port(bp);
6666                 bnx2x_reset_func(bp);
6667                 break;
6668
6669         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6670                 bnx2x_reset_func(bp);
6671                 break;
6672
6673         default:
6674                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6675                 break;
6676         }
6677 }
6678
6679 /* must be called with rtnl_lock */
6680 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6681 {
6682         int port = BP_PORT(bp);
6683         u32 reset_code = 0;
6684         int i, cnt, rc;
6685
6686         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6687
6688         bp->rx_mode = BNX2X_RX_MODE_NONE;
6689         bnx2x_set_storm_rx_mode(bp);
6690
6691         bnx2x_netif_stop(bp, 1);
6692         if (!netif_running(bp->dev))
6693                 bnx2x_napi_disable(bp);
6694         del_timer_sync(&bp->timer);
6695         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6696                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6697         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6698
6699         /* Release IRQs */
6700         bnx2x_free_irq(bp);
6701
6702         /* Wait until tx fast path tasks complete */
6703         for_each_queue(bp, i) {
6704                 struct bnx2x_fastpath *fp = &bp->fp[i];
6705
6706                 cnt = 1000;
6707                 smp_rmb();
6708                 while (bnx2x_has_tx_work(fp)) {
6709
6710                         bnx2x_tx_int(fp, 1000);
6711                         if (!cnt) {
6712                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6713                                           i);
6714 #ifdef BNX2X_STOP_ON_ERROR
6715                                 bnx2x_panic();
6716                                 return -EBUSY;
6717 #else
6718                                 break;
6719 #endif
6720                         }
6721                         cnt--;
6722                         msleep(1);
6723                         smp_rmb();
6724                 }
6725         }
6726         /* Give HW time to discard old tx messages */
6727         msleep(1);
6728
6729         if (CHIP_IS_E1(bp)) {
6730                 struct mac_configuration_cmd *config =
6731                                                 bnx2x_sp(bp, mcast_config);
6732
6733                 bnx2x_set_mac_addr_e1(bp, 0);
6734
6735                 for (i = 0; i < config->hdr.length_6b; i++)
6736                         CAM_INVALIDATE(config->config_table[i]);
6737
6738                 config->hdr.length_6b = i;
6739                 if (CHIP_REV_IS_SLOW(bp))
6740                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6741                 else
6742                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6743                 config->hdr.client_id = BP_CL_ID(bp);
6744                 config->hdr.reserved1 = 0;
6745
6746                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6747                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6748                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6749
6750         } else { /* E1H */
6751                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6752
6753                 bnx2x_set_mac_addr_e1h(bp, 0);
6754
6755                 for (i = 0; i < MC_HASH_SIZE; i++)
6756                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6757         }
6758
6759         if (unload_mode == UNLOAD_NORMAL)
6760                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6761
6762         else if (bp->flags & NO_WOL_FLAG) {
6763                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6764                 if (CHIP_IS_E1H(bp))
6765                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6766
6767         } else if (bp->wol) {
6768                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6769                 u8 *mac_addr = bp->dev->dev_addr;
6770                 u32 val;
6771                 /* The mac address is written to entries 1-4 to
6772                    preserve entry 0 which is used by the PMF */
6773                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6774
6775                 val = (mac_addr[0] << 8) | mac_addr[1];
6776                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6777
6778                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6779                       (mac_addr[4] << 8) | mac_addr[5];
6780                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6781
6782                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6783
6784         } else
6785                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6786
6787         /* Close multi and leading connections
6788            Completions for ramrods are collected in a synchronous way */
6789         for_each_nondefault_queue(bp, i)
6790                 if (bnx2x_stop_multi(bp, i))
6791                         goto unload_error;
6792
6793         rc = bnx2x_stop_leading(bp);
6794         if (rc) {
6795                 BNX2X_ERR("Stop leading failed!\n");
6796 #ifdef BNX2X_STOP_ON_ERROR
6797                 return -EBUSY;
6798 #else
6799                 goto unload_error;
6800 #endif
6801         }
6802
6803 unload_error:
6804         if (!BP_NOMCP(bp))
6805                 reset_code = bnx2x_fw_command(bp, reset_code);
6806         else {
6807                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6808                    load_count[0], load_count[1], load_count[2]);
6809                 load_count[0]--;
6810                 load_count[1 + port]--;
6811                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6812                    load_count[0], load_count[1], load_count[2]);
6813                 if (load_count[0] == 0)
6814                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6815                 else if (load_count[1 + port] == 0)
6816                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6817                 else
6818                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6819         }
6820
6821         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6822             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6823                 bnx2x__link_reset(bp);
6824
6825         /* Reset the chip */
6826         bnx2x_reset_chip(bp, reset_code);
6827
6828         /* Report UNLOAD_DONE to MCP */
6829         if (!BP_NOMCP(bp))
6830                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6831         bp->port.pmf = 0;
6832
6833         /* Free SKBs, SGEs, TPA pool and driver internals */
6834         bnx2x_free_skbs(bp);
6835         for_each_queue(bp, i)
6836                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6837         bnx2x_free_mem(bp);
6838
6839         bp->state = BNX2X_STATE_CLOSED;
6840
6841         netif_carrier_off(bp->dev);
6842
6843         return 0;
6844 }
6845
6846 static void bnx2x_reset_task(struct work_struct *work)
6847 {
6848         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6849
6850 #ifdef BNX2X_STOP_ON_ERROR
6851         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6852                   " so reset not done to allow debug dump,\n"
6853          KERN_ERR " you will need to reboot when done\n");
6854         return;
6855 #endif
6856
6857         rtnl_lock();
6858
6859         if (!netif_running(bp->dev))
6860                 goto reset_task_exit;
6861
6862         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6863         bnx2x_nic_load(bp, LOAD_NORMAL);
6864
6865 reset_task_exit:
6866         rtnl_unlock();
6867 }
6868
6869 /* end of nic load/unload */
6870
6871 /* ethtool_ops */
6872
6873 /*
6874  * Init service functions
6875  */
6876
6877 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6878 {
6879         u32 val;
6880
6881         /* Check if there is any driver already loaded */
6882         val = REG_RD(bp, MISC_REG_UNPREPARED);
6883         if (val == 0x1) {
6884                 /* Check if it is the UNDI driver
6885                  * UNDI driver initializes CID offset for normal bell to 0x7
6886                  */
6887                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6888                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6889                 if (val == 0x7) {
6890                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6891                         /* save our func */
6892                         int func = BP_FUNC(bp);
6893                         u32 swap_en;
6894                         u32 swap_val;
6895
6896                         /* clear the UNDI indication */
6897                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6898
6899                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6900
6901                         /* try unload UNDI on port 0 */
6902                         bp->func = 0;
6903                         bp->fw_seq =
6904                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6905                                 DRV_MSG_SEQ_NUMBER_MASK);
6906                         reset_code = bnx2x_fw_command(bp, reset_code);
6907
6908                         /* if UNDI is loaded on the other port */
6909                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6910
6911                                 /* send "DONE" for previous unload */
6912                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6913
6914                                 /* unload UNDI on port 1 */
6915                                 bp->func = 1;
6916                                 bp->fw_seq =
6917                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6918                                         DRV_MSG_SEQ_NUMBER_MASK);
6919                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6920
6921                                 bnx2x_fw_command(bp, reset_code);
6922                         }
6923
6924                         /* now it's safe to release the lock */
6925                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6926
6927                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6928                                     HC_REG_CONFIG_0), 0x1000);
6929
6930                         /* close input traffic and wait for it */
6931                         /* Do not rcv packets to BRB */
6932                         REG_WR(bp,
6933                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6934                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6935                         /* Do not direct rcv packets that are not for MCP to
6936                          * the BRB */
6937                         REG_WR(bp,
6938                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6939                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6940                         /* clear AEU */
6941                         REG_WR(bp,
6942                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6943                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6944                         msleep(10);
6945
6946                         /* save NIG port swap info */
6947                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6948                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6949                         /* reset device */
6950                         REG_WR(bp,
6951                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6952                                0xd3ffffff);
6953                         REG_WR(bp,
6954                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6955                                0x1403);
6956                         /* take the NIG out of reset and restore swap values */
6957                         REG_WR(bp,
6958                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6959                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6960                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6961                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6962
6963                         /* send unload done to the MCP */
6964                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6965
6966                         /* restore our func and fw_seq */
6967                         bp->func = func;
6968                         bp->fw_seq =
6969                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6970                                 DRV_MSG_SEQ_NUMBER_MASK);
6971
6972                 } else
6973                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6974         }
6975 }
6976
6977 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6978 {
6979         u32 val, val2, val3, val4, id;
6980         u16 pmc;
6981
6982         /* Get the chip revision id and number. */
6983         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6984         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6985         id = ((val & 0xffff) << 16);
6986         val = REG_RD(bp, MISC_REG_CHIP_REV);
6987         id |= ((val & 0xf) << 12);
6988         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6989         id |= ((val & 0xff) << 4);
6990         val = REG_RD(bp, MISC_REG_BOND_ID);
6991         id |= (val & 0xf);
6992         bp->common.chip_id = id;
6993         bp->link_params.chip_id = bp->common.chip_id;
6994         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6995
6996         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6997         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6998                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6999         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7000                        bp->common.flash_size, bp->common.flash_size);
7001
7002         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7003         bp->link_params.shmem_base = bp->common.shmem_base;
7004         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7005
7006         if (!bp->common.shmem_base ||
7007             (bp->common.shmem_base < 0xA0000) ||
7008             (bp->common.shmem_base >= 0xC0000)) {
7009                 BNX2X_DEV_INFO("MCP not active\n");
7010                 bp->flags |= NO_MCP_FLAG;
7011                 return;
7012         }
7013
7014         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7015         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7016                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7017                 BNX2X_ERR("BAD MCP validity signature\n");
7018
7019         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7020         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7021
7022         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7023                        bp->common.hw_config, bp->common.board);
7024
7025         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7026                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7027                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7028
7029         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7030         bp->common.bc_ver = val;
7031         BNX2X_DEV_INFO("bc_ver %X\n", val);
7032         if (val < BNX2X_BC_VER) {
7033                 /* for now only warn
7034                  * later we might need to enforce this */
7035                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7036                           " please upgrade BC\n", BNX2X_BC_VER, val);
7037         }
7038
7039         if (BP_E1HVN(bp) == 0) {
7040                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7041                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7042         } else {
7043                 /* no WOL capability for E1HVN != 0 */
7044                 bp->flags |= NO_WOL_FLAG;
7045         }
7046         BNX2X_DEV_INFO("%sWoL capable\n",
7047                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7048
7049         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7050         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7051         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7052         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7053
7054         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7055                val, val2, val3, val4);
7056 }
7057
7058 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7059                                                     u32 switch_cfg)
7060 {
7061         int port = BP_PORT(bp);
7062         u32 ext_phy_type;
7063
7064         switch (switch_cfg) {
7065         case SWITCH_CFG_1G:
7066                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7067
7068                 ext_phy_type =
7069                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7070                 switch (ext_phy_type) {
7071                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7072                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7073                                        ext_phy_type);
7074
7075                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7076                                                SUPPORTED_10baseT_Full |
7077                                                SUPPORTED_100baseT_Half |
7078                                                SUPPORTED_100baseT_Full |
7079                                                SUPPORTED_1000baseT_Full |
7080                                                SUPPORTED_2500baseX_Full |
7081                                                SUPPORTED_TP |
7082                                                SUPPORTED_FIBRE |
7083                                                SUPPORTED_Autoneg |
7084                                                SUPPORTED_Pause |
7085                                                SUPPORTED_Asym_Pause);
7086                         break;
7087
7088                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7089                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7090                                        ext_phy_type);
7091
7092                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7093                                                SUPPORTED_10baseT_Full |
7094                                                SUPPORTED_100baseT_Half |
7095                                                SUPPORTED_100baseT_Full |
7096                                                SUPPORTED_1000baseT_Full |
7097                                                SUPPORTED_TP |
7098                                                SUPPORTED_FIBRE |
7099                                                SUPPORTED_Autoneg |
7100                                                SUPPORTED_Pause |
7101                                                SUPPORTED_Asym_Pause);
7102                         break;
7103
7104                 default:
7105                         BNX2X_ERR("NVRAM config error. "
7106                                   "BAD SerDes ext_phy_config 0x%x\n",
7107                                   bp->link_params.ext_phy_config);
7108                         return;
7109                 }
7110
7111                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7112                                            port*0x10);
7113                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7114                 break;
7115
7116         case SWITCH_CFG_10G:
7117                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7118
7119                 ext_phy_type =
7120                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7121                 switch (ext_phy_type) {
7122                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7123                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7124                                        ext_phy_type);
7125
7126                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7127                                                SUPPORTED_10baseT_Full |
7128                                                SUPPORTED_100baseT_Half |
7129                                                SUPPORTED_100baseT_Full |
7130                                                SUPPORTED_1000baseT_Full |
7131                                                SUPPORTED_2500baseX_Full |
7132                                                SUPPORTED_10000baseT_Full |
7133                                                SUPPORTED_TP |
7134                                                SUPPORTED_FIBRE |
7135                                                SUPPORTED_Autoneg |
7136                                                SUPPORTED_Pause |
7137                                                SUPPORTED_Asym_Pause);
7138                         break;
7139
7140                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7141                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7142                                        ext_phy_type);
7143
7144                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7145                                                SUPPORTED_FIBRE |
7146                                                SUPPORTED_Pause |
7147                                                SUPPORTED_Asym_Pause);
7148                         break;
7149
7150                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7151                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7152                                        ext_phy_type);
7153
7154                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7155                                                SUPPORTED_1000baseT_Full |
7156                                                SUPPORTED_FIBRE |
7157                                                SUPPORTED_Pause |
7158                                                SUPPORTED_Asym_Pause);
7159                         break;
7160
7161                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7162                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7163                                        ext_phy_type);
7164
7165                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7166                                                SUPPORTED_1000baseT_Full |
7167                                                SUPPORTED_FIBRE |
7168                                                SUPPORTED_Autoneg |
7169                                                SUPPORTED_Pause |
7170                                                SUPPORTED_Asym_Pause);
7171                         break;
7172
7173                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7174                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7175                                        ext_phy_type);
7176
7177                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7178                                                SUPPORTED_2500baseX_Full |
7179                                                SUPPORTED_1000baseT_Full |
7180                                                SUPPORTED_FIBRE |
7181                                                SUPPORTED_Autoneg |
7182                                                SUPPORTED_Pause |
7183                                                SUPPORTED_Asym_Pause);
7184                         break;
7185
7186                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7187                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7188                                        ext_phy_type);
7189
7190                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7191                                                SUPPORTED_TP |
7192                                                SUPPORTED_Autoneg |
7193                                                SUPPORTED_Pause |
7194                                                SUPPORTED_Asym_Pause);
7195                         break;
7196
7197                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7198                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7199                                   bp->link_params.ext_phy_config);
7200                         break;
7201
7202                 default:
7203                         BNX2X_ERR("NVRAM config error. "
7204                                   "BAD XGXS ext_phy_config 0x%x\n",
7205                                   bp->link_params.ext_phy_config);
7206                         return;
7207                 }
7208
7209                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7210                                            port*0x18);
7211                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7212
7213                 break;
7214
7215         default:
7216                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7217                           bp->port.link_config);
7218                 return;
7219         }
7220         bp->link_params.phy_addr = bp->port.phy_addr;
7221
7222         /* mask what we support according to speed_cap_mask */
7223         if (!(bp->link_params.speed_cap_mask &
7224                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7225                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7226
7227         if (!(bp->link_params.speed_cap_mask &
7228                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7229                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7230
7231         if (!(bp->link_params.speed_cap_mask &
7232                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7233                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7234
7235         if (!(bp->link_params.speed_cap_mask &
7236                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7237                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7238
7239         if (!(bp->link_params.speed_cap_mask &
7240                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7241                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7242                                         SUPPORTED_1000baseT_Full);
7243
7244         if (!(bp->link_params.speed_cap_mask &
7245                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7246                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7247
7248         if (!(bp->link_params.speed_cap_mask &
7249                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7250                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7251
7252         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7253 }
7254
7255 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7256 {
7257         bp->link_params.req_duplex = DUPLEX_FULL;
7258
7259         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7260         case PORT_FEATURE_LINK_SPEED_AUTO:
7261                 if (bp->port.supported & SUPPORTED_Autoneg) {
7262                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7263                         bp->port.advertising = bp->port.supported;
7264                 } else {
7265                         u32 ext_phy_type =
7266                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7267
7268                         if ((ext_phy_type ==
7269                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7270                             (ext_phy_type ==
7271                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7272                                 /* force 10G, no AN */
7273                                 bp->link_params.req_line_speed = SPEED_10000;
7274                                 bp->port.advertising =
7275                                                 (ADVERTISED_10000baseT_Full |
7276                                                  ADVERTISED_FIBRE);
7277                                 break;
7278                         }
7279                         BNX2X_ERR("NVRAM config error. "
7280                                   "Invalid link_config 0x%x"
7281                                   "  Autoneg not supported\n",
7282                                   bp->port.link_config);
7283                         return;
7284                 }
7285                 break;
7286
7287         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7288                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7289                         bp->link_params.req_line_speed = SPEED_10;
7290                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7291                                                 ADVERTISED_TP);
7292                 } else {
7293                         BNX2X_ERR("NVRAM config error. "
7294                                   "Invalid link_config 0x%x"
7295                                   "  speed_cap_mask 0x%x\n",
7296                                   bp->port.link_config,
7297                                   bp->link_params.speed_cap_mask);
7298                         return;
7299                 }
7300                 break;
7301
7302         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7303                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7304                         bp->link_params.req_line_speed = SPEED_10;
7305                         bp->link_params.req_duplex = DUPLEX_HALF;
7306                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7307                                                 ADVERTISED_TP);
7308                 } else {
7309                         BNX2X_ERR("NVRAM config error. "
7310                                   "Invalid link_config 0x%x"
7311                                   "  speed_cap_mask 0x%x\n",
7312                                   bp->port.link_config,
7313                                   bp->link_params.speed_cap_mask);
7314                         return;
7315                 }
7316                 break;
7317
7318         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7319                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7320                         bp->link_params.req_line_speed = SPEED_100;
7321                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7322                                                 ADVERTISED_TP);
7323                 } else {
7324                         BNX2X_ERR("NVRAM config error. "
7325                                   "Invalid link_config 0x%x"
7326                                   "  speed_cap_mask 0x%x\n",
7327                                   bp->port.link_config,
7328                                   bp->link_params.speed_cap_mask);
7329                         return;
7330                 }
7331                 break;
7332
7333         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7334                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7335                         bp->link_params.req_line_speed = SPEED_100;
7336                         bp->link_params.req_duplex = DUPLEX_HALF;
7337                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7338                                                 ADVERTISED_TP);
7339                 } else {
7340                         BNX2X_ERR("NVRAM config error. "
7341                                   "Invalid link_config 0x%x"
7342                                   "  speed_cap_mask 0x%x\n",
7343                                   bp->port.link_config,
7344                                   bp->link_params.speed_cap_mask);
7345                         return;
7346                 }
7347                 break;
7348
7349         case PORT_FEATURE_LINK_SPEED_1G:
7350                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7351                         bp->link_params.req_line_speed = SPEED_1000;
7352                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7353                                                 ADVERTISED_TP);
7354                 } else {
7355                         BNX2X_ERR("NVRAM config error. "
7356                                   "Invalid link_config 0x%x"
7357                                   "  speed_cap_mask 0x%x\n",
7358                                   bp->port.link_config,
7359                                   bp->link_params.speed_cap_mask);
7360                         return;
7361                 }
7362                 break;
7363
7364         case PORT_FEATURE_LINK_SPEED_2_5G:
7365                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7366                         bp->link_params.req_line_speed = SPEED_2500;
7367                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7368                                                 ADVERTISED_TP);
7369                 } else {
7370                         BNX2X_ERR("NVRAM config error. "
7371                                   "Invalid link_config 0x%x"
7372                                   "  speed_cap_mask 0x%x\n",
7373                                   bp->port.link_config,
7374                                   bp->link_params.speed_cap_mask);
7375                         return;
7376                 }
7377                 break;
7378
7379         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7380         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7381         case PORT_FEATURE_LINK_SPEED_10G_KR:
7382                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7383                         bp->link_params.req_line_speed = SPEED_10000;
7384                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7385                                                 ADVERTISED_FIBRE);
7386                 } else {
7387                         BNX2X_ERR("NVRAM config error. "
7388                                   "Invalid link_config 0x%x"
7389                                   "  speed_cap_mask 0x%x\n",
7390                                   bp->port.link_config,
7391                                   bp->link_params.speed_cap_mask);
7392                         return;
7393                 }
7394                 break;
7395
7396         default:
7397                 BNX2X_ERR("NVRAM config error. "
7398                           "BAD link speed link_config 0x%x\n",
7399                           bp->port.link_config);
7400                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7401                 bp->port.advertising = bp->port.supported;
7402                 break;
7403         }
7404
7405         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7406                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7407         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7408             !(bp->port.supported & SUPPORTED_Autoneg))
7409                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7410
7411         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7412                        "  advertising 0x%x\n",
7413                        bp->link_params.req_line_speed,
7414                        bp->link_params.req_duplex,
7415                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7416 }
7417
7418 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7419 {
7420         int port = BP_PORT(bp);
7421         u32 val, val2;
7422
7423         bp->link_params.bp = bp;
7424         bp->link_params.port = port;
7425
7426         bp->link_params.serdes_config =
7427                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7428         bp->link_params.lane_config =
7429                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7430         bp->link_params.ext_phy_config =
7431                 SHMEM_RD(bp,
7432                          dev_info.port_hw_config[port].external_phy_config);
7433         bp->link_params.speed_cap_mask =
7434                 SHMEM_RD(bp,
7435                          dev_info.port_hw_config[port].speed_capability_mask);
7436
7437         bp->port.link_config =
7438                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7439
7440         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7441              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7442                        "  link_config 0x%08x\n",
7443                        bp->link_params.serdes_config,
7444                        bp->link_params.lane_config,
7445                        bp->link_params.ext_phy_config,
7446                        bp->link_params.speed_cap_mask, bp->port.link_config);
7447
7448         bp->link_params.switch_cfg = (bp->port.link_config &
7449                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7450         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7451
7452         bnx2x_link_settings_requested(bp);
7453
7454         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7455         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7456         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7457         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7458         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7459         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7460         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7461         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7462         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7463         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7464 }
7465
7466 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7467 {
7468         int func = BP_FUNC(bp);
7469         u32 val, val2;
7470         int rc = 0;
7471
7472         bnx2x_get_common_hwinfo(bp);
7473
7474         bp->e1hov = 0;
7475         bp->e1hmf = 0;
7476         if (CHIP_IS_E1H(bp)) {
7477                 bp->mf_config =
7478                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7479
7480                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7481                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7482                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7483
7484                         bp->e1hov = val;
7485                         bp->e1hmf = 1;
7486                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7487                                        "(0x%04x)\n",
7488                                        func, bp->e1hov, bp->e1hov);
7489                 } else {
7490                         BNX2X_DEV_INFO("Single function mode\n");
7491                         if (BP_E1HVN(bp)) {
7492                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7493                                           "  aborting\n", func);
7494                                 rc = -EPERM;
7495                         }
7496                 }
7497         }
7498
7499         if (!BP_NOMCP(bp)) {
7500                 bnx2x_get_port_hwinfo(bp);
7501
7502                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7503                               DRV_MSG_SEQ_NUMBER_MASK);
7504                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7505         }
7506
7507         if (IS_E1HMF(bp)) {
7508                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7509                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7510                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7511                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7512                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7513                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7514                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7515                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7516                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7517                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7518                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7519                                ETH_ALEN);
7520                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7521                                ETH_ALEN);
7522                 }
7523
7524                 return rc;
7525         }
7526
7527         if (BP_NOMCP(bp)) {
7528                 /* only supposed to happen on emulation/FPGA */
7529                 BNX2X_ERR("warning random MAC workaround active\n");
7530                 random_ether_addr(bp->dev->dev_addr);
7531                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7532         }
7533
7534         return rc;
7535 }
7536
7537 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7538 {
7539         int func = BP_FUNC(bp);
7540         int rc;
7541
7542         /* Disable interrupt handling until HW is initialized */
7543         atomic_set(&bp->intr_sem, 1);
7544
7545         mutex_init(&bp->port.phy_mutex);
7546
7547         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7548         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7549
7550         rc = bnx2x_get_hwinfo(bp);
7551
7552         /* need to reset chip if undi was active */
7553         if (!BP_NOMCP(bp))
7554                 bnx2x_undi_unload(bp);
7555
7556         if (CHIP_REV_IS_FPGA(bp))
7557                 printk(KERN_ERR PFX "FPGA detected\n");
7558
7559         if (BP_NOMCP(bp) && (func == 0))
7560                 printk(KERN_ERR PFX
7561                        "MCP disabled, must load devices in order!\n");
7562
7563         /* Set TPA flags */
7564         if (disable_tpa) {
7565                 bp->flags &= ~TPA_ENABLE_FLAG;
7566                 bp->dev->features &= ~NETIF_F_LRO;
7567         } else {
7568                 bp->flags |= TPA_ENABLE_FLAG;
7569                 bp->dev->features |= NETIF_F_LRO;
7570         }
7571
7572
7573         bp->tx_ring_size = MAX_TX_AVAIL;
7574         bp->rx_ring_size = MAX_RX_AVAIL;
7575
7576         bp->rx_csum = 1;
7577         bp->rx_offset = 0;
7578
7579         bp->tx_ticks = 50;
7580         bp->rx_ticks = 25;
7581
7582         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7583         bp->current_interval = (poll ? poll : bp->timer_interval);
7584
7585         init_timer(&bp->timer);
7586         bp->timer.expires = jiffies + bp->current_interval;
7587         bp->timer.data = (unsigned long) bp;
7588         bp->timer.function = bnx2x_timer;
7589
7590         return rc;
7591 }
7592
7593 /*
7594  * ethtool service functions
7595  */
7596
7597 /* All ethtool functions called with rtnl_lock */
7598
7599 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7600 {
7601         struct bnx2x *bp = netdev_priv(dev);
7602
7603         cmd->supported = bp->port.supported;
7604         cmd->advertising = bp->port.advertising;
7605
7606         if (netif_carrier_ok(dev)) {
7607                 cmd->speed = bp->link_vars.line_speed;
7608                 cmd->duplex = bp->link_vars.duplex;
7609         } else {
7610                 cmd->speed = bp->link_params.req_line_speed;
7611                 cmd->duplex = bp->link_params.req_duplex;
7612         }
7613         if (IS_E1HMF(bp)) {
7614                 u16 vn_max_rate;
7615
7616                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7617                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7618                 if (vn_max_rate < cmd->speed)
7619                         cmd->speed = vn_max_rate;
7620         }
7621
7622         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7623                 u32 ext_phy_type =
7624                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7625
7626                 switch (ext_phy_type) {
7627                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7628                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7629                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7630                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7631                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7632                         cmd->port = PORT_FIBRE;
7633                         break;
7634
7635                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7636                         cmd->port = PORT_TP;
7637                         break;
7638
7639                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7640                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7641                                   bp->link_params.ext_phy_config);
7642                         break;
7643
7644                 default:
7645                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7646                            bp->link_params.ext_phy_config);
7647                         break;
7648                 }
7649         } else
7650                 cmd->port = PORT_TP;
7651
7652         cmd->phy_address = bp->port.phy_addr;
7653         cmd->transceiver = XCVR_INTERNAL;
7654
7655         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7656                 cmd->autoneg = AUTONEG_ENABLE;
7657         else
7658                 cmd->autoneg = AUTONEG_DISABLE;
7659
7660         cmd->maxtxpkt = 0;
7661         cmd->maxrxpkt = 0;
7662
7663         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7664            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7665            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7666            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7667            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7668            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7669            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7670
7671         return 0;
7672 }
7673
7674 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7675 {
7676         struct bnx2x *bp = netdev_priv(dev);
7677         u32 advertising;
7678
7679         if (IS_E1HMF(bp))
7680                 return 0;
7681
7682         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7683            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7684            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7685            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7686            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7687            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7688            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7689
7690         if (cmd->autoneg == AUTONEG_ENABLE) {
7691                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7692                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7693                         return -EINVAL;
7694                 }
7695
7696                 /* advertise the requested speed and duplex if supported */
7697                 cmd->advertising &= bp->port.supported;
7698
7699                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7700                 bp->link_params.req_duplex = DUPLEX_FULL;
7701                 bp->port.advertising |= (ADVERTISED_Autoneg |
7702                                          cmd->advertising);
7703
7704         } else { /* forced speed */
7705                 /* advertise the requested speed and duplex if supported */
7706                 switch (cmd->speed) {
7707                 case SPEED_10:
7708                         if (cmd->duplex == DUPLEX_FULL) {
7709                                 if (!(bp->port.supported &
7710                                       SUPPORTED_10baseT_Full)) {
7711                                         DP(NETIF_MSG_LINK,
7712                                            "10M full not supported\n");
7713                                         return -EINVAL;
7714                                 }
7715
7716                                 advertising = (ADVERTISED_10baseT_Full |
7717                                                ADVERTISED_TP);
7718                         } else {
7719                                 if (!(bp->port.supported &
7720                                       SUPPORTED_10baseT_Half)) {
7721                                         DP(NETIF_MSG_LINK,
7722                                            "10M half not supported\n");
7723                                         return -EINVAL;
7724                                 }
7725
7726                                 advertising = (ADVERTISED_10baseT_Half |
7727                                                ADVERTISED_TP);
7728                         }
7729                         break;
7730
7731                 case SPEED_100:
7732                         if (cmd->duplex == DUPLEX_FULL) {
7733                                 if (!(bp->port.supported &
7734                                                 SUPPORTED_100baseT_Full)) {
7735                                         DP(NETIF_MSG_LINK,
7736                                            "100M full not supported\n");
7737                                         return -EINVAL;
7738                                 }
7739
7740                                 advertising = (ADVERTISED_100baseT_Full |
7741                                                ADVERTISED_TP);
7742                         } else {
7743                                 if (!(bp->port.supported &
7744                                                 SUPPORTED_100baseT_Half)) {
7745                                         DP(NETIF_MSG_LINK,
7746                                            "100M half not supported\n");
7747                                         return -EINVAL;
7748                                 }
7749
7750                                 advertising = (ADVERTISED_100baseT_Half |
7751                                                ADVERTISED_TP);
7752                         }
7753                         break;
7754
7755                 case SPEED_1000:
7756                         if (cmd->duplex != DUPLEX_FULL) {
7757                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7758                                 return -EINVAL;
7759                         }
7760
7761                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7762                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7763                                 return -EINVAL;
7764                         }
7765
7766                         advertising = (ADVERTISED_1000baseT_Full |
7767                                        ADVERTISED_TP);
7768                         break;
7769
7770                 case SPEED_2500:
7771                         if (cmd->duplex != DUPLEX_FULL) {
7772                                 DP(NETIF_MSG_LINK,
7773                                    "2.5G half not supported\n");
7774                                 return -EINVAL;
7775                         }
7776
7777                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7778                                 DP(NETIF_MSG_LINK,
7779                                    "2.5G full not supported\n");
7780                                 return -EINVAL;
7781                         }
7782
7783                         advertising = (ADVERTISED_2500baseX_Full |
7784                                        ADVERTISED_TP);
7785                         break;
7786
7787                 case SPEED_10000:
7788                         if (cmd->duplex != DUPLEX_FULL) {
7789                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7790                                 return -EINVAL;
7791                         }
7792
7793                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7794                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7795                                 return -EINVAL;
7796                         }
7797
7798                         advertising = (ADVERTISED_10000baseT_Full |
7799                                        ADVERTISED_FIBRE);
7800                         break;
7801
7802                 default:
7803                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7804                         return -EINVAL;
7805                 }
7806
7807                 bp->link_params.req_line_speed = cmd->speed;
7808                 bp->link_params.req_duplex = cmd->duplex;
7809                 bp->port.advertising = advertising;
7810         }
7811
7812         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7813            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7814            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7815            bp->port.advertising);
7816
7817         if (netif_running(dev)) {
7818                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7819                 bnx2x_link_set(bp);
7820         }
7821
7822         return 0;
7823 }
7824
7825 #define PHY_FW_VER_LEN                  10
7826
7827 static void bnx2x_get_drvinfo(struct net_device *dev,
7828                               struct ethtool_drvinfo *info)
7829 {
7830         struct bnx2x *bp = netdev_priv(dev);
7831         u8 phy_fw_ver[PHY_FW_VER_LEN];
7832
7833         strcpy(info->driver, DRV_MODULE_NAME);
7834         strcpy(info->version, DRV_MODULE_VERSION);
7835
7836         phy_fw_ver[0] = '\0';
7837         if (bp->port.pmf) {
7838                 bnx2x_acquire_phy_lock(bp);
7839                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7840                                              (bp->state != BNX2X_STATE_CLOSED),
7841                                              phy_fw_ver, PHY_FW_VER_LEN);
7842                 bnx2x_release_phy_lock(bp);
7843         }
7844
7845         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7846                  (bp->common.bc_ver & 0xff0000) >> 16,
7847                  (bp->common.bc_ver & 0xff00) >> 8,
7848                  (bp->common.bc_ver & 0xff),
7849                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7850         strcpy(info->bus_info, pci_name(bp->pdev));
7851         info->n_stats = BNX2X_NUM_STATS;
7852         info->testinfo_len = BNX2X_NUM_TESTS;
7853         info->eedump_len = bp->common.flash_size;
7854         info->regdump_len = 0;
7855 }
7856
7857 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7858 {
7859         struct bnx2x *bp = netdev_priv(dev);
7860
7861         if (bp->flags & NO_WOL_FLAG) {
7862                 wol->supported = 0;
7863                 wol->wolopts = 0;
7864         } else {
7865                 wol->supported = WAKE_MAGIC;
7866                 if (bp->wol)
7867                         wol->wolopts = WAKE_MAGIC;
7868                 else
7869                         wol->wolopts = 0;
7870         }
7871         memset(&wol->sopass, 0, sizeof(wol->sopass));
7872 }
7873
7874 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7875 {
7876         struct bnx2x *bp = netdev_priv(dev);
7877
7878         if (wol->wolopts & ~WAKE_MAGIC)
7879                 return -EINVAL;
7880
7881         if (wol->wolopts & WAKE_MAGIC) {
7882                 if (bp->flags & NO_WOL_FLAG)
7883                         return -EINVAL;
7884
7885                 bp->wol = 1;
7886         } else
7887                 bp->wol = 0;
7888
7889         return 0;
7890 }
7891
7892 static u32 bnx2x_get_msglevel(struct net_device *dev)
7893 {
7894         struct bnx2x *bp = netdev_priv(dev);
7895
7896         return bp->msglevel;
7897 }
7898
7899 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7900 {
7901         struct bnx2x *bp = netdev_priv(dev);
7902
7903         if (capable(CAP_NET_ADMIN))
7904                 bp->msglevel = level;
7905 }
7906
7907 static int bnx2x_nway_reset(struct net_device *dev)
7908 {
7909         struct bnx2x *bp = netdev_priv(dev);
7910
7911         if (!bp->port.pmf)
7912                 return 0;
7913
7914         if (netif_running(dev)) {
7915                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7916                 bnx2x_link_set(bp);
7917         }
7918
7919         return 0;
7920 }
7921
7922 static int bnx2x_get_eeprom_len(struct net_device *dev)
7923 {
7924         struct bnx2x *bp = netdev_priv(dev);
7925
7926         return bp->common.flash_size;
7927 }
7928
7929 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7930 {
7931         int port = BP_PORT(bp);
7932         int count, i;
7933         u32 val = 0;
7934
7935         /* adjust timeout for emulation/FPGA */
7936         count = NVRAM_TIMEOUT_COUNT;
7937         if (CHIP_REV_IS_SLOW(bp))
7938                 count *= 100;
7939
7940         /* request access to nvram interface */
7941         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7942                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7943
7944         for (i = 0; i < count*10; i++) {
7945                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7946                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7947                         break;
7948
7949                 udelay(5);
7950         }
7951
7952         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7953                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7954                 return -EBUSY;
7955         }
7956
7957         return 0;
7958 }
7959
7960 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7961 {
7962         int port = BP_PORT(bp);
7963         int count, i;
7964         u32 val = 0;
7965
7966         /* adjust timeout for emulation/FPGA */
7967         count = NVRAM_TIMEOUT_COUNT;
7968         if (CHIP_REV_IS_SLOW(bp))
7969                 count *= 100;
7970
7971         /* relinquish nvram interface */
7972         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7973                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7974
7975         for (i = 0; i < count*10; i++) {
7976                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7977                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7978                         break;
7979
7980                 udelay(5);
7981         }
7982
7983         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7984                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7985                 return -EBUSY;
7986         }
7987
7988         return 0;
7989 }
7990
7991 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7992 {
7993         u32 val;
7994
7995         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7996
7997         /* enable both bits, even on read */
7998         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7999                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8000                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8001 }
8002
8003 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8004 {
8005         u32 val;
8006
8007         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8008
8009         /* disable both bits, even after read */
8010         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8011                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8012                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8013 }
8014
8015 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8016                                   u32 cmd_flags)
8017 {
8018         int count, i, rc;
8019         u32 val;
8020
8021         /* build the command word */
8022         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8023
8024         /* need to clear DONE bit separately */
8025         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8026
8027         /* address of the NVRAM to read from */
8028         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8029                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8030
8031         /* issue a read command */
8032         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8033
8034         /* adjust timeout for emulation/FPGA */
8035         count = NVRAM_TIMEOUT_COUNT;
8036         if (CHIP_REV_IS_SLOW(bp))
8037                 count *= 100;
8038
8039         /* wait for completion */
8040         *ret_val = 0;
8041         rc = -EBUSY;
8042         for (i = 0; i < count; i++) {
8043                 udelay(5);
8044                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8045
8046                 if (val & MCPR_NVM_COMMAND_DONE) {
8047                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8048                         /* we read nvram data in cpu order
8049                          * but ethtool sees it as an array of bytes
8050                          * converting to big-endian will do the work */
8051                         val = cpu_to_be32(val);
8052                         *ret_val = val;
8053                         rc = 0;
8054                         break;
8055                 }
8056         }
8057
8058         return rc;
8059 }
8060
8061 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8062                             int buf_size)
8063 {
8064         int rc;
8065         u32 cmd_flags;
8066         u32 val;
8067
8068         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8069                 DP(BNX2X_MSG_NVM,
8070                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8071                    offset, buf_size);
8072                 return -EINVAL;
8073         }
8074
8075         if (offset + buf_size > bp->common.flash_size) {
8076                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8077                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8078                    offset, buf_size, bp->common.flash_size);
8079                 return -EINVAL;
8080         }
8081
8082         /* request access to nvram interface */
8083         rc = bnx2x_acquire_nvram_lock(bp);
8084         if (rc)
8085                 return rc;
8086
8087         /* enable access to nvram interface */
8088         bnx2x_enable_nvram_access(bp);
8089
8090         /* read the first word(s) */
8091         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8092         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8093                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8094                 memcpy(ret_buf, &val, 4);
8095
8096                 /* advance to the next dword */
8097                 offset += sizeof(u32);
8098                 ret_buf += sizeof(u32);
8099                 buf_size -= sizeof(u32);
8100                 cmd_flags = 0;
8101         }
8102
8103         if (rc == 0) {
8104                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8105                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8106                 memcpy(ret_buf, &val, 4);
8107         }
8108
8109         /* disable access to nvram interface */
8110         bnx2x_disable_nvram_access(bp);
8111         bnx2x_release_nvram_lock(bp);
8112
8113         return rc;
8114 }
8115
8116 static int bnx2x_get_eeprom(struct net_device *dev,
8117                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8118 {
8119         struct bnx2x *bp = netdev_priv(dev);
8120         int rc;
8121
8122         if (!netif_running(dev))
8123                 return -EAGAIN;
8124
8125         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8126            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8127            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8128            eeprom->len, eeprom->len);
8129
8130         /* parameters already validated in ethtool_get_eeprom */
8131
8132         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8133
8134         return rc;
8135 }
8136
8137 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8138                                    u32 cmd_flags)
8139 {
8140         int count, i, rc;
8141
8142         /* build the command word */
8143         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8144
8145         /* need to clear DONE bit separately */
8146         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8147
8148         /* write the data */
8149         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8150
8151         /* address of the NVRAM to write to */
8152         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8153                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8154
8155         /* issue the write command */
8156         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8157
8158         /* adjust timeout for emulation/FPGA */
8159         count = NVRAM_TIMEOUT_COUNT;
8160         if (CHIP_REV_IS_SLOW(bp))
8161                 count *= 100;
8162
8163         /* wait for completion */
8164         rc = -EBUSY;
8165         for (i = 0; i < count; i++) {
8166                 udelay(5);
8167                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8168                 if (val & MCPR_NVM_COMMAND_DONE) {
8169                         rc = 0;
8170                         break;
8171                 }
8172         }
8173
8174         return rc;
8175 }
8176
8177 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8178
8179 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8180                               int buf_size)
8181 {
8182         int rc;
8183         u32 cmd_flags;
8184         u32 align_offset;
8185         u32 val;
8186
8187         if (offset + buf_size > bp->common.flash_size) {
8188                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8189                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8190                    offset, buf_size, bp->common.flash_size);
8191                 return -EINVAL;
8192         }
8193
8194         /* request access to nvram interface */
8195         rc = bnx2x_acquire_nvram_lock(bp);
8196         if (rc)
8197                 return rc;
8198
8199         /* enable access to nvram interface */
8200         bnx2x_enable_nvram_access(bp);
8201
8202         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8203         align_offset = (offset & ~0x03);
8204         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8205
8206         if (rc == 0) {
8207                 val &= ~(0xff << BYTE_OFFSET(offset));
8208                 val |= (*data_buf << BYTE_OFFSET(offset));
8209
8210                 /* nvram data is returned as an array of bytes
8211                  * convert it back to cpu order */
8212                 val = be32_to_cpu(val);
8213
8214                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8215                                              cmd_flags);
8216         }
8217
8218         /* disable access to nvram interface */
8219         bnx2x_disable_nvram_access(bp);
8220         bnx2x_release_nvram_lock(bp);
8221
8222         return rc;
8223 }
8224
8225 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8226                              int buf_size)
8227 {
8228         int rc;
8229         u32 cmd_flags;
8230         u32 val;
8231         u32 written_so_far;
8232
8233         if (buf_size == 1)      /* ethtool */
8234                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8235
8236         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8237                 DP(BNX2X_MSG_NVM,
8238                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8239                    offset, buf_size);
8240                 return -EINVAL;
8241         }
8242
8243         if (offset + buf_size > bp->common.flash_size) {
8244                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8245                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8246                    offset, buf_size, bp->common.flash_size);
8247                 return -EINVAL;
8248         }
8249
8250         /* request access to nvram interface */
8251         rc = bnx2x_acquire_nvram_lock(bp);
8252         if (rc)
8253                 return rc;
8254
8255         /* enable access to nvram interface */
8256         bnx2x_enable_nvram_access(bp);
8257
8258         written_so_far = 0;
8259         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8260         while ((written_so_far < buf_size) && (rc == 0)) {
8261                 if (written_so_far == (buf_size - sizeof(u32)))
8262                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8263                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8264                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8265                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8266                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8267
8268                 memcpy(&val, data_buf, 4);
8269
8270                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8271
8272                 /* advance to the next dword */
8273                 offset += sizeof(u32);
8274                 data_buf += sizeof(u32);
8275                 written_so_far += sizeof(u32);
8276                 cmd_flags = 0;
8277         }
8278
8279         /* disable access to nvram interface */
8280         bnx2x_disable_nvram_access(bp);
8281         bnx2x_release_nvram_lock(bp);
8282
8283         return rc;
8284 }
8285
8286 static int bnx2x_set_eeprom(struct net_device *dev,
8287                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8288 {
8289         struct bnx2x *bp = netdev_priv(dev);
8290         int rc;
8291
8292         if (!netif_running(dev))
8293                 return -EAGAIN;
8294
8295         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8296            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8297            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8298            eeprom->len, eeprom->len);
8299
8300         /* parameters already validated in ethtool_set_eeprom */
8301
8302         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8303         if (eeprom->magic == 0x00504859)
8304                 if (bp->port.pmf) {
8305
8306                         bnx2x_acquire_phy_lock(bp);
8307                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8308                                              bp->link_params.ext_phy_config,
8309                                              (bp->state != BNX2X_STATE_CLOSED),
8310                                              eebuf, eeprom->len);
8311                         if ((bp->state == BNX2X_STATE_OPEN) ||
8312                             (bp->state == BNX2X_STATE_DISABLED)) {
8313                                 rc |= bnx2x_link_reset(&bp->link_params,
8314                                                        &bp->link_vars);
8315                                 rc |= bnx2x_phy_init(&bp->link_params,
8316                                                      &bp->link_vars);
8317                         }
8318                         bnx2x_release_phy_lock(bp);
8319
8320                 } else /* Only the PMF can access the PHY */
8321                         return -EINVAL;
8322         else
8323                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8324
8325         return rc;
8326 }
8327
8328 static int bnx2x_get_coalesce(struct net_device *dev,
8329                               struct ethtool_coalesce *coal)
8330 {
8331         struct bnx2x *bp = netdev_priv(dev);
8332
8333         memset(coal, 0, sizeof(struct ethtool_coalesce));
8334
8335         coal->rx_coalesce_usecs = bp->rx_ticks;
8336         coal->tx_coalesce_usecs = bp->tx_ticks;
8337
8338         return 0;
8339 }
8340
8341 static int bnx2x_set_coalesce(struct net_device *dev,
8342                               struct ethtool_coalesce *coal)
8343 {
8344         struct bnx2x *bp = netdev_priv(dev);
8345
8346         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8347         if (bp->rx_ticks > 3000)
8348                 bp->rx_ticks = 3000;
8349
8350         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8351         if (bp->tx_ticks > 0x3000)
8352                 bp->tx_ticks = 0x3000;
8353
8354         if (netif_running(dev))
8355                 bnx2x_update_coalesce(bp);
8356
8357         return 0;
8358 }
8359
8360 static void bnx2x_get_ringparam(struct net_device *dev,
8361                                 struct ethtool_ringparam *ering)
8362 {
8363         struct bnx2x *bp = netdev_priv(dev);
8364
8365         ering->rx_max_pending = MAX_RX_AVAIL;
8366         ering->rx_mini_max_pending = 0;
8367         ering->rx_jumbo_max_pending = 0;
8368
8369         ering->rx_pending = bp->rx_ring_size;
8370         ering->rx_mini_pending = 0;
8371         ering->rx_jumbo_pending = 0;
8372
8373         ering->tx_max_pending = MAX_TX_AVAIL;
8374         ering->tx_pending = bp->tx_ring_size;
8375 }
8376
8377 static int bnx2x_set_ringparam(struct net_device *dev,
8378                                struct ethtool_ringparam *ering)
8379 {
8380         struct bnx2x *bp = netdev_priv(dev);
8381         int rc = 0;
8382
8383         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8384             (ering->tx_pending > MAX_TX_AVAIL) ||
8385             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8386                 return -EINVAL;
8387
8388         bp->rx_ring_size = ering->rx_pending;
8389         bp->tx_ring_size = ering->tx_pending;
8390
8391         if (netif_running(dev)) {
8392                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8393                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8394         }
8395
8396         return rc;
8397 }
8398
8399 static void bnx2x_get_pauseparam(struct net_device *dev,
8400                                  struct ethtool_pauseparam *epause)
8401 {
8402         struct bnx2x *bp = netdev_priv(dev);
8403
8404         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8405                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8406
8407         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8408                             BNX2X_FLOW_CTRL_RX);
8409         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8410                             BNX2X_FLOW_CTRL_TX);
8411
8412         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8413            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8414            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8415 }
8416
8417 static int bnx2x_set_pauseparam(struct net_device *dev,
8418                                 struct ethtool_pauseparam *epause)
8419 {
8420         struct bnx2x *bp = netdev_priv(dev);
8421
8422         if (IS_E1HMF(bp))
8423                 return 0;
8424
8425         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8426            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8427            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8428
8429         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8430
8431         if (epause->rx_pause)
8432                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8433
8434         if (epause->tx_pause)
8435                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8436
8437         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8438                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8439
8440         if (epause->autoneg) {
8441                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8442                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8443                         return -EINVAL;
8444                 }
8445
8446                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8447                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8448         }
8449
8450         DP(NETIF_MSG_LINK,
8451            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8452
8453         if (netif_running(dev)) {
8454                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8455                 bnx2x_link_set(bp);
8456         }
8457
8458         return 0;
8459 }
8460
8461 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8462 {
8463         struct bnx2x *bp = netdev_priv(dev);
8464         int changed = 0;
8465         int rc = 0;
8466
8467         /* TPA requires Rx CSUM offloading */
8468         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8469                 if (!(dev->features & NETIF_F_LRO)) {
8470                         dev->features |= NETIF_F_LRO;
8471                         bp->flags |= TPA_ENABLE_FLAG;
8472                         changed = 1;
8473                 }
8474
8475         } else if (dev->features & NETIF_F_LRO) {
8476                 dev->features &= ~NETIF_F_LRO;
8477                 bp->flags &= ~TPA_ENABLE_FLAG;
8478                 changed = 1;
8479         }
8480
8481         if (changed && netif_running(dev)) {
8482                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8483                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8484         }
8485
8486         return rc;
8487 }
8488
8489 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8490 {
8491         struct bnx2x *bp = netdev_priv(dev);
8492
8493         return bp->rx_csum;
8494 }
8495
8496 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8497 {
8498         struct bnx2x *bp = netdev_priv(dev);
8499         int rc = 0;
8500
8501         bp->rx_csum = data;
8502
8503         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8504            TPA'ed packets will be discarded due to wrong TCP CSUM */
8505         if (!data) {
8506                 u32 flags = ethtool_op_get_flags(dev);
8507
8508                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8509         }
8510
8511         return rc;
8512 }
8513
8514 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8515 {
8516         if (data) {
8517                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8518                 dev->features |= NETIF_F_TSO6;
8519         } else {
8520                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8521                 dev->features &= ~NETIF_F_TSO6;
8522         }
8523
8524         return 0;
8525 }
8526
8527 static const struct {
8528         char string[ETH_GSTRING_LEN];
8529 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8530         { "register_test (offline)" },
8531         { "memory_test (offline)" },
8532         { "loopback_test (offline)" },
8533         { "nvram_test (online)" },
8534         { "interrupt_test (online)" },
8535         { "link_test (online)" },
8536         { "idle check (online)" },
8537         { "MC errors (online)" }
8538 };
8539
8540 static int bnx2x_self_test_count(struct net_device *dev)
8541 {
8542         return BNX2X_NUM_TESTS;
8543 }
8544
8545 static int bnx2x_test_registers(struct bnx2x *bp)
8546 {
8547         int idx, i, rc = -ENODEV;
8548         u32 wr_val = 0;
8549         int port = BP_PORT(bp);
8550         static const struct {
8551                 u32  offset0;
8552                 u32  offset1;
8553                 u32  mask;
8554         } reg_tbl[] = {
8555 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8556                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8557                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8558                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8559                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8560                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8561                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8562                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8563                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8564                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8565 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8566                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8567                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8568                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8569                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8570                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8571                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8572                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8573                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8574                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8575 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8576                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8577                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8578                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8579                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8580                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8581                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8582                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8583                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8584                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8585 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8586                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8587                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8588                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8589                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8590                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8591                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8592                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8593
8594                 { 0xffffffff, 0, 0x00000000 }
8595         };
8596
8597         if (!netif_running(bp->dev))
8598                 return rc;
8599
8600         /* Repeat the test twice:
8601            First by writing 0x00000000, second by writing 0xffffffff */
8602         for (idx = 0; idx < 2; idx++) {
8603
8604                 switch (idx) {
8605                 case 0:
8606                         wr_val = 0;
8607                         break;
8608                 case 1:
8609                         wr_val = 0xffffffff;
8610                         break;
8611                 }
8612
8613                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8614                         u32 offset, mask, save_val, val;
8615
8616                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8617                         mask = reg_tbl[i].mask;
8618
8619                         save_val = REG_RD(bp, offset);
8620
8621                         REG_WR(bp, offset, wr_val);
8622                         val = REG_RD(bp, offset);
8623
8624                         /* Restore the original register's value */
8625                         REG_WR(bp, offset, save_val);
8626
8627                         /* verify that value is as expected value */
8628                         if ((val & mask) != (wr_val & mask))
8629                                 goto test_reg_exit;
8630                 }
8631         }
8632
8633         rc = 0;
8634
8635 test_reg_exit:
8636         return rc;
8637 }
8638
8639 static int bnx2x_test_memory(struct bnx2x *bp)
8640 {
8641         int i, j, rc = -ENODEV;
8642         u32 val;
8643         static const struct {
8644                 u32 offset;
8645                 int size;
8646         } mem_tbl[] = {
8647                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8648                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8649                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8650                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8651                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8652                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8653                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8654
8655                 { 0xffffffff, 0 }
8656         };
8657         static const struct {
8658                 char *name;
8659                 u32 offset;
8660                 u32 e1_mask;
8661                 u32 e1h_mask;
8662         } prty_tbl[] = {
8663                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8664                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8665                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8666                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8667                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8668                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8669
8670                 { NULL, 0xffffffff, 0, 0 }
8671         };
8672
8673         if (!netif_running(bp->dev))
8674                 return rc;
8675
8676         /* Go through all the memories */
8677         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8678                 for (j = 0; j < mem_tbl[i].size; j++)
8679                         REG_RD(bp, mem_tbl[i].offset + j*4);
8680
8681         /* Check the parity status */
8682         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8683                 val = REG_RD(bp, prty_tbl[i].offset);
8684                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8685                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8686                         DP(NETIF_MSG_HW,
8687                            "%s is 0x%x\n", prty_tbl[i].name, val);
8688                         goto test_mem_exit;
8689                 }
8690         }
8691
8692         rc = 0;
8693
8694 test_mem_exit:
8695         return rc;
8696 }
8697
8698 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8699 {
8700         int cnt = 1000;
8701
8702         if (link_up)
8703                 while (bnx2x_link_test(bp) && cnt--)
8704                         msleep(10);
8705 }
8706
8707 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8708 {
8709         unsigned int pkt_size, num_pkts, i;
8710         struct sk_buff *skb;
8711         unsigned char *packet;
8712         struct bnx2x_fastpath *fp = &bp->fp[0];
8713         u16 tx_start_idx, tx_idx;
8714         u16 rx_start_idx, rx_idx;
8715         u16 pkt_prod;
8716         struct sw_tx_bd *tx_buf;
8717         struct eth_tx_bd *tx_bd;
8718         dma_addr_t mapping;
8719         union eth_rx_cqe *cqe;
8720         u8 cqe_fp_flags;
8721         struct sw_rx_bd *rx_buf;
8722         u16 len;
8723         int rc = -ENODEV;
8724
8725         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8726                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8727                 bnx2x_acquire_phy_lock(bp);
8728                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8729                 bnx2x_release_phy_lock(bp);
8730
8731         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8732                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8733                 bnx2x_acquire_phy_lock(bp);
8734                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8735                 bnx2x_release_phy_lock(bp);
8736                 /* wait until link state is restored */
8737                 bnx2x_wait_for_link(bp, link_up);
8738
8739         } else
8740                 return -EINVAL;
8741
8742         pkt_size = 1514;
8743         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8744         if (!skb) {
8745                 rc = -ENOMEM;
8746                 goto test_loopback_exit;
8747         }
8748         packet = skb_put(skb, pkt_size);
8749         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8750         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8751         for (i = ETH_HLEN; i < pkt_size; i++)
8752                 packet[i] = (unsigned char) (i & 0xff);
8753
8754         num_pkts = 0;
8755         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8756         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8757
8758         pkt_prod = fp->tx_pkt_prod++;
8759         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8760         tx_buf->first_bd = fp->tx_bd_prod;
8761         tx_buf->skb = skb;
8762
8763         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8764         mapping = pci_map_single(bp->pdev, skb->data,
8765                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8766         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8767         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8768         tx_bd->nbd = cpu_to_le16(1);
8769         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8770         tx_bd->vlan = cpu_to_le16(pkt_prod);
8771         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8772                                        ETH_TX_BD_FLAGS_END_BD);
8773         tx_bd->general_data = ((UNICAST_ADDRESS <<
8774                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8775
8776         wmb();
8777
8778         fp->hw_tx_prods->bds_prod =
8779                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8780         mb(); /* FW restriction: must not reorder writing nbd and packets */
8781         fp->hw_tx_prods->packets_prod =
8782                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8783         DOORBELL(bp, FP_IDX(fp), 0);
8784
8785         mmiowb();
8786
8787         num_pkts++;
8788         fp->tx_bd_prod++;
8789         bp->dev->trans_start = jiffies;
8790
8791         udelay(100);
8792
8793         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8794         if (tx_idx != tx_start_idx + num_pkts)
8795                 goto test_loopback_exit;
8796
8797         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8798         if (rx_idx != rx_start_idx + num_pkts)
8799                 goto test_loopback_exit;
8800
8801         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8802         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8803         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8804                 goto test_loopback_rx_exit;
8805
8806         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8807         if (len != pkt_size)
8808                 goto test_loopback_rx_exit;
8809
8810         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8811         skb = rx_buf->skb;
8812         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8813         for (i = ETH_HLEN; i < pkt_size; i++)
8814                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8815                         goto test_loopback_rx_exit;
8816
8817         rc = 0;
8818
8819 test_loopback_rx_exit:
8820
8821         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8822         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8823         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8824         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8825
8826         /* Update producers */
8827         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8828                              fp->rx_sge_prod);
8829
8830 test_loopback_exit:
8831         bp->link_params.loopback_mode = LOOPBACK_NONE;
8832
8833         return rc;
8834 }
8835
8836 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8837 {
8838         int rc = 0;
8839
8840         if (!netif_running(bp->dev))
8841                 return BNX2X_LOOPBACK_FAILED;
8842
8843         bnx2x_netif_stop(bp, 1);
8844
8845         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8846                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8847                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8848         }
8849
8850         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8851                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8852                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8853         }
8854
8855         bnx2x_netif_start(bp);
8856
8857         return rc;
8858 }
8859
8860 #define CRC32_RESIDUAL                  0xdebb20e3
8861
8862 static int bnx2x_test_nvram(struct bnx2x *bp)
8863 {
8864         static const struct {
8865                 int offset;
8866                 int size;
8867         } nvram_tbl[] = {
8868                 {     0,  0x14 }, /* bootstrap */
8869                 {  0x14,  0xec }, /* dir */
8870                 { 0x100, 0x350 }, /* manuf_info */
8871                 { 0x450,  0xf0 }, /* feature_info */
8872                 { 0x640,  0x64 }, /* upgrade_key_info */
8873                 { 0x6a4,  0x64 },
8874                 { 0x708,  0x70 }, /* manuf_key_info */
8875                 { 0x778,  0x70 },
8876                 {     0,     0 }
8877         };
8878         u32 buf[0x350 / 4];
8879         u8 *data = (u8 *)buf;
8880         int i, rc;
8881         u32 magic, csum;
8882
8883         rc = bnx2x_nvram_read(bp, 0, data, 4);
8884         if (rc) {
8885                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8886                 goto test_nvram_exit;
8887         }
8888
8889         magic = be32_to_cpu(buf[0]);
8890         if (magic != 0x669955aa) {
8891                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8892                 rc = -ENODEV;
8893                 goto test_nvram_exit;
8894         }
8895
8896         for (i = 0; nvram_tbl[i].size; i++) {
8897
8898                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8899                                       nvram_tbl[i].size);
8900                 if (rc) {
8901                         DP(NETIF_MSG_PROBE,
8902                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8903                         goto test_nvram_exit;
8904                 }
8905
8906                 csum = ether_crc_le(nvram_tbl[i].size, data);
8907                 if (csum != CRC32_RESIDUAL) {
8908                         DP(NETIF_MSG_PROBE,
8909                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8910                         rc = -ENODEV;
8911                         goto test_nvram_exit;
8912                 }
8913         }
8914
8915 test_nvram_exit:
8916         return rc;
8917 }
8918
8919 static int bnx2x_test_intr(struct bnx2x *bp)
8920 {
8921         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8922         int i, rc;
8923
8924         if (!netif_running(bp->dev))
8925                 return -ENODEV;
8926
8927         config->hdr.length_6b = 0;
8928         if (CHIP_IS_E1(bp))
8929                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8930         else
8931                 config->hdr.offset = BP_FUNC(bp);
8932         config->hdr.client_id = BP_CL_ID(bp);
8933         config->hdr.reserved1 = 0;
8934
8935         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8936                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8937                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8938         if (rc == 0) {
8939                 bp->set_mac_pending++;
8940                 for (i = 0; i < 10; i++) {
8941                         if (!bp->set_mac_pending)
8942                                 break;
8943                         msleep_interruptible(10);
8944                 }
8945                 if (i == 10)
8946                         rc = -ENODEV;
8947         }
8948
8949         return rc;
8950 }
8951
8952 static void bnx2x_self_test(struct net_device *dev,
8953                             struct ethtool_test *etest, u64 *buf)
8954 {
8955         struct bnx2x *bp = netdev_priv(dev);
8956
8957         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8958
8959         if (!netif_running(dev))
8960                 return;
8961
8962         /* offline tests are not supported in MF mode */
8963         if (IS_E1HMF(bp))
8964                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8965
8966         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8967                 u8 link_up;
8968
8969                 link_up = bp->link_vars.link_up;
8970                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8971                 bnx2x_nic_load(bp, LOAD_DIAG);
8972                 /* wait until link state is restored */
8973                 bnx2x_wait_for_link(bp, link_up);
8974
8975                 if (bnx2x_test_registers(bp) != 0) {
8976                         buf[0] = 1;
8977                         etest->flags |= ETH_TEST_FL_FAILED;
8978                 }
8979                 if (bnx2x_test_memory(bp) != 0) {
8980                         buf[1] = 1;
8981                         etest->flags |= ETH_TEST_FL_FAILED;
8982                 }
8983                 buf[2] = bnx2x_test_loopback(bp, link_up);
8984                 if (buf[2] != 0)
8985                         etest->flags |= ETH_TEST_FL_FAILED;
8986
8987                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8988                 bnx2x_nic_load(bp, LOAD_NORMAL);
8989                 /* wait until link state is restored */
8990                 bnx2x_wait_for_link(bp, link_up);
8991         }
8992         if (bnx2x_test_nvram(bp) != 0) {
8993                 buf[3] = 1;
8994                 etest->flags |= ETH_TEST_FL_FAILED;
8995         }
8996         if (bnx2x_test_intr(bp) != 0) {
8997                 buf[4] = 1;
8998                 etest->flags |= ETH_TEST_FL_FAILED;
8999         }
9000         if (bp->port.pmf)
9001                 if (bnx2x_link_test(bp) != 0) {
9002                         buf[5] = 1;
9003                         etest->flags |= ETH_TEST_FL_FAILED;
9004                 }
9005         buf[7] = bnx2x_mc_assert(bp);
9006         if (buf[7] != 0)
9007                 etest->flags |= ETH_TEST_FL_FAILED;
9008
9009 #ifdef BNX2X_EXTRA_DEBUG
9010         bnx2x_panic_dump(bp);
9011 #endif
9012 }
9013
9014 static const struct {
9015         long offset;
9016         int size;
9017         u32 flags;
9018 #define STATS_FLAGS_PORT                1
9019 #define STATS_FLAGS_FUNC                2
9020         u8 string[ETH_GSTRING_LEN];
9021 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9022 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9023                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9024         { STATS_OFFSET32(error_bytes_received_hi),
9025                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9026         { STATS_OFFSET32(total_bytes_transmitted_hi),
9027                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9028         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9029                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9030         { STATS_OFFSET32(total_unicast_packets_received_hi),
9031                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9032         { STATS_OFFSET32(total_multicast_packets_received_hi),
9033                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9034         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9035                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9036         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9037                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9038         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9039                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9040 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9041                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9042         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9043                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9044         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9045                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9046         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9047                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9048         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9049                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9050         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9051                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9052         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9053                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9054         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9055                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9056         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9057                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9058         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9059                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9060 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9061                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9062         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9063                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9064         { STATS_OFFSET32(jabber_packets_received),
9065                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9066         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9067                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9068         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9069                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9070         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9071                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9072         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9073                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9074         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9075                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9076         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9077                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9078         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9079                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9080 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9081                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9082         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9083                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9084         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9085                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9086         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9087                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9088         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9089                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9090         { STATS_OFFSET32(mac_filter_discard),
9091                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9092         { STATS_OFFSET32(no_buff_discard),
9093                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9094         { STATS_OFFSET32(xxoverflow_discard),
9095                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9096         { STATS_OFFSET32(brb_drop_hi),
9097                                 8, STATS_FLAGS_PORT, "brb_discard" },
9098         { STATS_OFFSET32(brb_truncate_hi),
9099                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9100 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9101                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9102         { STATS_OFFSET32(rx_skb_alloc_failed),
9103                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9104 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9105                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9106 };
9107
9108 #define IS_NOT_E1HMF_STAT(bp, i) \
9109                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9110
9111 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9112 {
9113         struct bnx2x *bp = netdev_priv(dev);
9114         int i, j;
9115
9116         switch (stringset) {
9117         case ETH_SS_STATS:
9118                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9119                         if (IS_NOT_E1HMF_STAT(bp, i))
9120                                 continue;
9121                         strcpy(buf + j*ETH_GSTRING_LEN,
9122                                bnx2x_stats_arr[i].string);
9123                         j++;
9124                 }
9125                 break;
9126
9127         case ETH_SS_TEST:
9128                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9129                 break;
9130         }
9131 }
9132
9133 static int bnx2x_get_stats_count(struct net_device *dev)
9134 {
9135         struct bnx2x *bp = netdev_priv(dev);
9136         int i, num_stats = 0;
9137
9138         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9139                 if (IS_NOT_E1HMF_STAT(bp, i))
9140                         continue;
9141                 num_stats++;
9142         }
9143         return num_stats;
9144 }
9145
9146 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9147                                     struct ethtool_stats *stats, u64 *buf)
9148 {
9149         struct bnx2x *bp = netdev_priv(dev);
9150         u32 *hw_stats = (u32 *)&bp->eth_stats;
9151         int i, j;
9152
9153         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9154                 if (IS_NOT_E1HMF_STAT(bp, i))
9155                         continue;
9156
9157                 if (bnx2x_stats_arr[i].size == 0) {
9158                         /* skip this counter */
9159                         buf[j] = 0;
9160                         j++;
9161                         continue;
9162                 }
9163                 if (bnx2x_stats_arr[i].size == 4) {
9164                         /* 4-byte counter */
9165                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9166                         j++;
9167                         continue;
9168                 }
9169                 /* 8-byte counter */
9170                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9171                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9172                 j++;
9173         }
9174 }
9175
9176 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9177 {
9178         struct bnx2x *bp = netdev_priv(dev);
9179         int port = BP_PORT(bp);
9180         int i;
9181
9182         if (!netif_running(dev))
9183                 return 0;
9184
9185         if (!bp->port.pmf)
9186                 return 0;
9187
9188         if (data == 0)
9189                 data = 2;
9190
9191         for (i = 0; i < (data * 2); i++) {
9192                 if ((i % 2) == 0)
9193                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9194                                       bp->link_params.hw_led_mode,
9195                                       bp->link_params.chip_id);
9196                 else
9197                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9198                                       bp->link_params.hw_led_mode,
9199                                       bp->link_params.chip_id);
9200
9201                 msleep_interruptible(500);
9202                 if (signal_pending(current))
9203                         break;
9204         }
9205
9206         if (bp->link_vars.link_up)
9207                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9208                               bp->link_vars.line_speed,
9209                               bp->link_params.hw_led_mode,
9210                               bp->link_params.chip_id);
9211
9212         return 0;
9213 }
9214
9215 static struct ethtool_ops bnx2x_ethtool_ops = {
9216         .get_settings           = bnx2x_get_settings,
9217         .set_settings           = bnx2x_set_settings,
9218         .get_drvinfo            = bnx2x_get_drvinfo,
9219         .get_wol                = bnx2x_get_wol,
9220         .set_wol                = bnx2x_set_wol,
9221         .get_msglevel           = bnx2x_get_msglevel,
9222         .set_msglevel           = bnx2x_set_msglevel,
9223         .nway_reset             = bnx2x_nway_reset,
9224         .get_link               = ethtool_op_get_link,
9225         .get_eeprom_len         = bnx2x_get_eeprom_len,
9226         .get_eeprom             = bnx2x_get_eeprom,
9227         .set_eeprom             = bnx2x_set_eeprom,
9228         .get_coalesce           = bnx2x_get_coalesce,
9229         .set_coalesce           = bnx2x_set_coalesce,
9230         .get_ringparam          = bnx2x_get_ringparam,
9231         .set_ringparam          = bnx2x_set_ringparam,
9232         .get_pauseparam         = bnx2x_get_pauseparam,
9233         .set_pauseparam         = bnx2x_set_pauseparam,
9234         .get_rx_csum            = bnx2x_get_rx_csum,
9235         .set_rx_csum            = bnx2x_set_rx_csum,
9236         .get_tx_csum            = ethtool_op_get_tx_csum,
9237         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9238         .set_flags              = bnx2x_set_flags,
9239         .get_flags              = ethtool_op_get_flags,
9240         .get_sg                 = ethtool_op_get_sg,
9241         .set_sg                 = ethtool_op_set_sg,
9242         .get_tso                = ethtool_op_get_tso,
9243         .set_tso                = bnx2x_set_tso,
9244         .self_test_count        = bnx2x_self_test_count,
9245         .self_test              = bnx2x_self_test,
9246         .get_strings            = bnx2x_get_strings,
9247         .phys_id                = bnx2x_phys_id,
9248         .get_stats_count        = bnx2x_get_stats_count,
9249         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9250 };
9251
9252 /* end of ethtool_ops */
9253
9254 /****************************************************************************
9255 * General service functions
9256 ****************************************************************************/
9257
9258 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9259 {
9260         u16 pmcsr;
9261
9262         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9263
9264         switch (state) {
9265         case PCI_D0:
9266                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9267                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9268                                        PCI_PM_CTRL_PME_STATUS));
9269
9270                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9271                         /* delay required during transition out of D3hot */
9272                         msleep(20);
9273                 break;
9274
9275         case PCI_D3hot:
9276                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9277                 pmcsr |= 3;
9278
9279                 if (bp->wol)
9280                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9281
9282                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9283                                       pmcsr);
9284
9285                 /* No more memory access after this point until
9286                 * device is brought back to D0.
9287                 */
9288                 break;
9289
9290         default:
9291                 return -EINVAL;
9292         }
9293         return 0;
9294 }
9295
9296 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9297 {
9298         u16 rx_cons_sb;
9299
9300         /* Tell compiler that status block fields can change */
9301         barrier();
9302         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9303         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9304                 rx_cons_sb++;
9305         return (fp->rx_comp_cons != rx_cons_sb);
9306 }
9307
9308 /*
9309  * net_device service functions
9310  */
9311
9312 static int bnx2x_poll(struct napi_struct *napi, int budget)
9313 {
9314         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9315                                                  napi);
9316         struct bnx2x *bp = fp->bp;
9317         int work_done = 0;
9318
9319 #ifdef BNX2X_STOP_ON_ERROR
9320         if (unlikely(bp->panic))
9321                 goto poll_panic;
9322 #endif
9323
9324         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9325         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9326         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9327
9328         bnx2x_update_fpsb_idx(fp);
9329
9330         if (bnx2x_has_tx_work(fp))
9331                 bnx2x_tx_int(fp, budget);
9332
9333         if (bnx2x_has_rx_work(fp))
9334                 work_done = bnx2x_rx_int(fp, budget);
9335         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9336
9337         /* must not complete if we consumed full budget */
9338         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9339
9340 #ifdef BNX2X_STOP_ON_ERROR
9341 poll_panic:
9342 #endif
9343                 netif_rx_complete(napi);
9344
9345                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9346                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9347                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9348                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9349         }
9350         return work_done;
9351 }
9352
9353
9354 /* we split the first BD into headers and data BDs
9355  * to ease the pain of our fellow microcode engineers
9356  * we use one mapping for both BDs
9357  * So far this has only been observed to happen
9358  * in Other Operating Systems(TM)
9359  */
9360 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9361                                    struct bnx2x_fastpath *fp,
9362                                    struct eth_tx_bd **tx_bd, u16 hlen,
9363                                    u16 bd_prod, int nbd)
9364 {
9365         struct eth_tx_bd *h_tx_bd = *tx_bd;
9366         struct eth_tx_bd *d_tx_bd;
9367         dma_addr_t mapping;
9368         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9369
9370         /* first fix first BD */
9371         h_tx_bd->nbd = cpu_to_le16(nbd);
9372         h_tx_bd->nbytes = cpu_to_le16(hlen);
9373
9374         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9375            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9376            h_tx_bd->addr_lo, h_tx_bd->nbd);
9377
9378         /* now get a new data BD
9379          * (after the pbd) and fill it */
9380         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9381         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9382
9383         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9384                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9385
9386         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9387         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9388         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9389         d_tx_bd->vlan = 0;
9390         /* this marks the BD as one that has no individual mapping
9391          * the FW ignores this flag in a BD not marked start
9392          */
9393         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9394         DP(NETIF_MSG_TX_QUEUED,
9395            "TSO split data size is %d (%x:%x)\n",
9396            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9397
9398         /* update tx_bd for marking the last BD flag */
9399         *tx_bd = d_tx_bd;
9400
9401         return bd_prod;
9402 }
9403
9404 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9405 {
9406         if (fix > 0)
9407                 csum = (u16) ~csum_fold(csum_sub(csum,
9408                                 csum_partial(t_header - fix, fix, 0)));
9409
9410         else if (fix < 0)
9411                 csum = (u16) ~csum_fold(csum_add(csum,
9412                                 csum_partial(t_header, -fix, 0)));
9413
9414         return swab16(csum);
9415 }
9416
9417 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9418 {
9419         u32 rc;
9420
9421         if (skb->ip_summed != CHECKSUM_PARTIAL)
9422                 rc = XMIT_PLAIN;
9423
9424         else {
9425                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9426                         rc = XMIT_CSUM_V6;
9427                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9428                                 rc |= XMIT_CSUM_TCP;
9429
9430                 } else {
9431                         rc = XMIT_CSUM_V4;
9432                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9433                                 rc |= XMIT_CSUM_TCP;
9434                 }
9435         }
9436
9437         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9438                 rc |= XMIT_GSO_V4;
9439
9440         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9441                 rc |= XMIT_GSO_V6;
9442
9443         return rc;
9444 }
9445
9446 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9447 /* check if packet requires linearization (packet is too fragmented) */
9448 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9449                              u32 xmit_type)
9450 {
9451         int to_copy = 0;
9452         int hlen = 0;
9453         int first_bd_sz = 0;
9454
9455         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9456         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9457
9458                 if (xmit_type & XMIT_GSO) {
9459                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9460                         /* Check if LSO packet needs to be copied:
9461                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9462                         int wnd_size = MAX_FETCH_BD - 3;
9463                         /* Number of windows to check */
9464                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9465                         int wnd_idx = 0;
9466                         int frag_idx = 0;
9467                         u32 wnd_sum = 0;
9468
9469                         /* Headers length */
9470                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9471                                 tcp_hdrlen(skb);
9472
9473                         /* Amount of data (w/o headers) on linear part of SKB*/
9474                         first_bd_sz = skb_headlen(skb) - hlen;
9475
9476                         wnd_sum  = first_bd_sz;
9477
9478                         /* Calculate the first sum - it's special */
9479                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9480                                 wnd_sum +=
9481                                         skb_shinfo(skb)->frags[frag_idx].size;
9482
9483                         /* If there was data on linear skb data - check it */
9484                         if (first_bd_sz > 0) {
9485                                 if (unlikely(wnd_sum < lso_mss)) {
9486                                         to_copy = 1;
9487                                         goto exit_lbl;
9488                                 }
9489
9490                                 wnd_sum -= first_bd_sz;
9491                         }
9492
9493                         /* Others are easier: run through the frag list and
9494                            check all windows */
9495                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9496                                 wnd_sum +=
9497                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9498
9499                                 if (unlikely(wnd_sum < lso_mss)) {
9500                                         to_copy = 1;
9501                                         break;
9502                                 }
9503                                 wnd_sum -=
9504                                         skb_shinfo(skb)->frags[wnd_idx].size;
9505                         }
9506
9507                 } else {
9508                         /* in non-LSO too fragmented packet should always
9509                            be linearized */
9510                         to_copy = 1;
9511                 }
9512         }
9513
9514 exit_lbl:
9515         if (unlikely(to_copy))
9516                 DP(NETIF_MSG_TX_QUEUED,
9517                    "Linearization IS REQUIRED for %s packet. "
9518                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9519                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9520                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9521
9522         return to_copy;
9523 }
9524 #endif
9525
9526 /* called with netif_tx_lock
9527  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9528  * netif_wake_queue()
9529  */
9530 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9531 {
9532         struct bnx2x *bp = netdev_priv(dev);
9533         struct bnx2x_fastpath *fp;
9534         struct sw_tx_bd *tx_buf;
9535         struct eth_tx_bd *tx_bd;
9536         struct eth_tx_parse_bd *pbd = NULL;
9537         u16 pkt_prod, bd_prod;
9538         int nbd, fp_index;
9539         dma_addr_t mapping;
9540         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9541         int vlan_off = (bp->e1hov ? 4 : 0);
9542         int i;
9543         u8 hlen = 0;
9544
9545 #ifdef BNX2X_STOP_ON_ERROR
9546         if (unlikely(bp->panic))
9547                 return NETDEV_TX_BUSY;
9548 #endif
9549
9550         fp_index = (smp_processor_id() % bp->num_queues);
9551         fp = &bp->fp[fp_index];
9552
9553         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9554                 bp->eth_stats.driver_xoff++,
9555                 netif_stop_queue(dev);
9556                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9557                 return NETDEV_TX_BUSY;
9558         }
9559
9560         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9561            "  gso type %x  xmit_type %x\n",
9562            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9563            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9564
9565 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9566         /* First, check if we need to linearize the skb
9567            (due to FW restrictions) */
9568         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9569                 /* Statistics of linearization */
9570                 bp->lin_cnt++;
9571                 if (skb_linearize(skb) != 0) {
9572                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9573                            "silently dropping this SKB\n");
9574                         dev_kfree_skb_any(skb);
9575                         return NETDEV_TX_OK;
9576                 }
9577         }
9578 #endif
9579
9580         /*
9581         Please read carefully. First we use one BD which we mark as start,
9582         then for TSO or xsum we have a parsing info BD,
9583         and only then we have the rest of the TSO BDs.
9584         (don't forget to mark the last one as last,
9585         and to unmap only AFTER you write to the BD ...)
9586         And above all, all pdb sizes are in words - NOT DWORDS!
9587         */
9588
9589         pkt_prod = fp->tx_pkt_prod++;
9590         bd_prod = TX_BD(fp->tx_bd_prod);
9591
9592         /* get a tx_buf and first BD */
9593         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9594         tx_bd = &fp->tx_desc_ring[bd_prod];
9595
9596         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9597         tx_bd->general_data = (UNICAST_ADDRESS <<
9598                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9599         /* header nbd */
9600         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9601
9602         /* remember the first BD of the packet */
9603         tx_buf->first_bd = fp->tx_bd_prod;
9604         tx_buf->skb = skb;
9605
9606         DP(NETIF_MSG_TX_QUEUED,
9607            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9608            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9609
9610 #ifdef BCM_VLAN
9611         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9612             (bp->flags & HW_VLAN_TX_FLAG)) {
9613                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9614                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9615                 vlan_off += 4;
9616         } else
9617 #endif
9618                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9619
9620         if (xmit_type) {
9621                 /* turn on parsing and get a BD */
9622                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9623                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9624
9625                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9626         }
9627
9628         if (xmit_type & XMIT_CSUM) {
9629                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9630
9631                 /* for now NS flag is not used in Linux */
9632                 pbd->global_data = (hlen |
9633                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9634                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9635
9636                 pbd->ip_hlen = (skb_transport_header(skb) -
9637                                 skb_network_header(skb)) / 2;
9638
9639                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9640
9641                 pbd->total_hlen = cpu_to_le16(hlen);
9642                 hlen = hlen*2 - vlan_off;
9643
9644                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9645
9646                 if (xmit_type & XMIT_CSUM_V4)
9647                         tx_bd->bd_flags.as_bitfield |=
9648                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9649                 else
9650                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9651
9652                 if (xmit_type & XMIT_CSUM_TCP) {
9653                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9654
9655                 } else {
9656                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9657
9658                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9659                         pbd->cs_offset = fix / 2;
9660
9661                         DP(NETIF_MSG_TX_QUEUED,
9662                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9663                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9664                            SKB_CS(skb));
9665
9666                         /* HW bug: fixup the CSUM */
9667                         pbd->tcp_pseudo_csum =
9668                                 bnx2x_csum_fix(skb_transport_header(skb),
9669                                                SKB_CS(skb), fix);
9670
9671                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9672                            pbd->tcp_pseudo_csum);
9673                 }
9674         }
9675
9676         mapping = pci_map_single(bp->pdev, skb->data,
9677                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9678
9679         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9680         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9681         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9682         tx_bd->nbd = cpu_to_le16(nbd);
9683         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9684
9685         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9686            "  nbytes %d  flags %x  vlan %x\n",
9687            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9688            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9689            le16_to_cpu(tx_bd->vlan));
9690
9691         if (xmit_type & XMIT_GSO) {
9692
9693                 DP(NETIF_MSG_TX_QUEUED,
9694                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9695                    skb->len, hlen, skb_headlen(skb),
9696                    skb_shinfo(skb)->gso_size);
9697
9698                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9699
9700                 if (unlikely(skb_headlen(skb) > hlen))
9701                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9702                                                  bd_prod, ++nbd);
9703
9704                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9705                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9706                 pbd->tcp_flags = pbd_tcp_flags(skb);
9707
9708                 if (xmit_type & XMIT_GSO_V4) {
9709                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9710                         pbd->tcp_pseudo_csum =
9711                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9712                                                           ip_hdr(skb)->daddr,
9713                                                           0, IPPROTO_TCP, 0));
9714
9715                 } else
9716                         pbd->tcp_pseudo_csum =
9717                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9718                                                         &ipv6_hdr(skb)->daddr,
9719                                                         0, IPPROTO_TCP, 0));
9720
9721                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9722         }
9723
9724         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9725                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9726
9727                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9728                 tx_bd = &fp->tx_desc_ring[bd_prod];
9729
9730                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9731                                        frag->size, PCI_DMA_TODEVICE);
9732
9733                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9734                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9735                 tx_bd->nbytes = cpu_to_le16(frag->size);
9736                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9737                 tx_bd->bd_flags.as_bitfield = 0;
9738
9739                 DP(NETIF_MSG_TX_QUEUED,
9740                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9741                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9742                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9743         }
9744
9745         /* now at last mark the BD as the last BD */
9746         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9747
9748         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9749            tx_bd, tx_bd->bd_flags.as_bitfield);
9750
9751         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9752
9753         /* now send a tx doorbell, counting the next BD
9754          * if the packet contains or ends with it
9755          */
9756         if (TX_BD_POFF(bd_prod) < nbd)
9757                 nbd++;
9758
9759         if (pbd)
9760                 DP(NETIF_MSG_TX_QUEUED,
9761                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9762                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9763                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9764                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9765                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9766
9767         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9768
9769         /*
9770          * Make sure that the BD data is updated before updating the producer
9771          * since FW might read the BD right after the producer is updated.
9772          * This is only applicable for weak-ordered memory model archs such
9773          * as IA-64. The following barrier is also mandatory since FW will
9774          * assumes packets must have BDs.
9775          */
9776         wmb();
9777
9778         fp->hw_tx_prods->bds_prod =
9779                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9780         mb(); /* FW restriction: must not reorder writing nbd and packets */
9781         fp->hw_tx_prods->packets_prod =
9782                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9783         DOORBELL(bp, FP_IDX(fp), 0);
9784
9785         mmiowb();
9786
9787         fp->tx_bd_prod += nbd;
9788         dev->trans_start = jiffies;
9789
9790         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9791                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9792                    if we put Tx into XOFF state. */
9793                 smp_mb();
9794                 netif_stop_queue(dev);
9795                 bp->eth_stats.driver_xoff++;
9796                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9797                         netif_wake_queue(dev);
9798         }
9799         fp->tx_pkt++;
9800
9801         return NETDEV_TX_OK;
9802 }
9803
9804 /* called with rtnl_lock */
9805 static int bnx2x_open(struct net_device *dev)
9806 {
9807         struct bnx2x *bp = netdev_priv(dev);
9808
9809         bnx2x_set_power_state(bp, PCI_D0);
9810
9811         return bnx2x_nic_load(bp, LOAD_OPEN);
9812 }
9813
9814 /* called with rtnl_lock */
9815 static int bnx2x_close(struct net_device *dev)
9816 {
9817         struct bnx2x *bp = netdev_priv(dev);
9818
9819         /* Unload the driver, release IRQs */
9820         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9821         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9822                 if (!CHIP_REV_IS_SLOW(bp))
9823                         bnx2x_set_power_state(bp, PCI_D3hot);
9824
9825         return 0;
9826 }
9827
9828 /* called with netif_tx_lock from set_multicast */
9829 static void bnx2x_set_rx_mode(struct net_device *dev)
9830 {
9831         struct bnx2x *bp = netdev_priv(dev);
9832         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9833         int port = BP_PORT(bp);
9834
9835         if (bp->state != BNX2X_STATE_OPEN) {
9836                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9837                 return;
9838         }
9839
9840         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9841
9842         if (dev->flags & IFF_PROMISC)
9843                 rx_mode = BNX2X_RX_MODE_PROMISC;
9844
9845         else if ((dev->flags & IFF_ALLMULTI) ||
9846                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9847                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9848
9849         else { /* some multicasts */
9850                 if (CHIP_IS_E1(bp)) {
9851                         int i, old, offset;
9852                         struct dev_mc_list *mclist;
9853                         struct mac_configuration_cmd *config =
9854                                                 bnx2x_sp(bp, mcast_config);
9855
9856                         for (i = 0, mclist = dev->mc_list;
9857                              mclist && (i < dev->mc_count);
9858                              i++, mclist = mclist->next) {
9859
9860                                 config->config_table[i].
9861                                         cam_entry.msb_mac_addr =
9862                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9863                                 config->config_table[i].
9864                                         cam_entry.middle_mac_addr =
9865                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9866                                 config->config_table[i].
9867                                         cam_entry.lsb_mac_addr =
9868                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9869                                 config->config_table[i].cam_entry.flags =
9870                                                         cpu_to_le16(port);
9871                                 config->config_table[i].
9872                                         target_table_entry.flags = 0;
9873                                 config->config_table[i].
9874                                         target_table_entry.client_id = 0;
9875                                 config->config_table[i].
9876                                         target_table_entry.vlan_id = 0;
9877
9878                                 DP(NETIF_MSG_IFUP,
9879                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9880                                    config->config_table[i].
9881                                                 cam_entry.msb_mac_addr,
9882                                    config->config_table[i].
9883                                                 cam_entry.middle_mac_addr,
9884                                    config->config_table[i].
9885                                                 cam_entry.lsb_mac_addr);
9886                         }
9887                         old = config->hdr.length_6b;
9888                         if (old > i) {
9889                                 for (; i < old; i++) {
9890                                         if (CAM_IS_INVALID(config->
9891                                                            config_table[i])) {
9892                                                 /* already invalidated */
9893                                                 break;
9894                                         }
9895                                         /* invalidate */
9896                                         CAM_INVALIDATE(config->
9897                                                        config_table[i]);
9898                                 }
9899                         }
9900
9901                         if (CHIP_REV_IS_SLOW(bp))
9902                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9903                         else
9904                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9905
9906                         config->hdr.length_6b = i;
9907                         config->hdr.offset = offset;
9908                         config->hdr.client_id = BP_CL_ID(bp);
9909                         config->hdr.reserved1 = 0;
9910
9911                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9912                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9913                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9914                                       0);
9915                 } else { /* E1H */
9916                         /* Accept one or more multicasts */
9917                         struct dev_mc_list *mclist;
9918                         u32 mc_filter[MC_HASH_SIZE];
9919                         u32 crc, bit, regidx;
9920                         int i;
9921
9922                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9923
9924                         for (i = 0, mclist = dev->mc_list;
9925                              mclist && (i < dev->mc_count);
9926                              i++, mclist = mclist->next) {
9927
9928                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9929                                    mclist->dmi_addr);
9930
9931                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9932                                 bit = (crc >> 24) & 0xff;
9933                                 regidx = bit >> 5;
9934                                 bit &= 0x1f;
9935                                 mc_filter[regidx] |= (1 << bit);
9936                         }
9937
9938                         for (i = 0; i < MC_HASH_SIZE; i++)
9939                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9940                                        mc_filter[i]);
9941                 }
9942         }
9943
9944         bp->rx_mode = rx_mode;
9945         bnx2x_set_storm_rx_mode(bp);
9946 }
9947
9948 /* called with rtnl_lock */
9949 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9950 {
9951         struct sockaddr *addr = p;
9952         struct bnx2x *bp = netdev_priv(dev);
9953
9954         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9955                 return -EINVAL;
9956
9957         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9958         if (netif_running(dev)) {
9959                 if (CHIP_IS_E1(bp))
9960                         bnx2x_set_mac_addr_e1(bp, 1);
9961                 else
9962                         bnx2x_set_mac_addr_e1h(bp, 1);
9963         }
9964
9965         return 0;
9966 }
9967
9968 /* called with rtnl_lock */
9969 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9970 {
9971         struct mii_ioctl_data *data = if_mii(ifr);
9972         struct bnx2x *bp = netdev_priv(dev);
9973         int port = BP_PORT(bp);
9974         int err;
9975
9976         switch (cmd) {
9977         case SIOCGMIIPHY:
9978                 data->phy_id = bp->port.phy_addr;
9979
9980                 /* fallthrough */
9981
9982         case SIOCGMIIREG: {
9983                 u16 mii_regval;
9984
9985                 if (!netif_running(dev))
9986                         return -EAGAIN;
9987
9988                 mutex_lock(&bp->port.phy_mutex);
9989                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9990                                       DEFAULT_PHY_DEV_ADDR,
9991                                       (data->reg_num & 0x1f), &mii_regval);
9992                 data->val_out = mii_regval;
9993                 mutex_unlock(&bp->port.phy_mutex);
9994                 return err;
9995         }
9996
9997         case SIOCSMIIREG:
9998                 if (!capable(CAP_NET_ADMIN))
9999                         return -EPERM;
10000
10001                 if (!netif_running(dev))
10002                         return -EAGAIN;
10003
10004                 mutex_lock(&bp->port.phy_mutex);
10005                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10006                                        DEFAULT_PHY_DEV_ADDR,
10007                                        (data->reg_num & 0x1f), data->val_in);
10008                 mutex_unlock(&bp->port.phy_mutex);
10009                 return err;
10010
10011         default:
10012                 /* do nothing */
10013                 break;
10014         }
10015
10016         return -EOPNOTSUPP;
10017 }
10018
10019 /* called with rtnl_lock */
10020 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10021 {
10022         struct bnx2x *bp = netdev_priv(dev);
10023         int rc = 0;
10024
10025         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10026             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10027                 return -EINVAL;
10028
10029         /* This does not race with packet allocation
10030          * because the actual alloc size is
10031          * only updated as part of load
10032          */
10033         dev->mtu = new_mtu;
10034
10035         if (netif_running(dev)) {
10036                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10037                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10038         }
10039
10040         return rc;
10041 }
10042
10043 static void bnx2x_tx_timeout(struct net_device *dev)
10044 {
10045         struct bnx2x *bp = netdev_priv(dev);
10046
10047 #ifdef BNX2X_STOP_ON_ERROR
10048         if (!bp->panic)
10049                 bnx2x_panic();
10050 #endif
10051         /* This allows the netif to be shutdown gracefully before resetting */
10052         schedule_work(&bp->reset_task);
10053 }
10054
10055 #ifdef BCM_VLAN
10056 /* called with rtnl_lock */
10057 static void bnx2x_vlan_rx_register(struct net_device *dev,
10058                                    struct vlan_group *vlgrp)
10059 {
10060         struct bnx2x *bp = netdev_priv(dev);
10061
10062         bp->vlgrp = vlgrp;
10063
10064         /* Set flags according to the required capabilities */
10065         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10066
10067         if (dev->features & NETIF_F_HW_VLAN_TX)
10068                 bp->flags |= HW_VLAN_TX_FLAG;
10069
10070         if (dev->features & NETIF_F_HW_VLAN_RX)
10071                 bp->flags |= HW_VLAN_RX_FLAG;
10072
10073         if (netif_running(dev))
10074                 bnx2x_set_client_config(bp);
10075 }
10076
10077 #endif
10078
10079 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10080 static void poll_bnx2x(struct net_device *dev)
10081 {
10082         struct bnx2x *bp = netdev_priv(dev);
10083
10084         disable_irq(bp->pdev->irq);
10085         bnx2x_interrupt(bp->pdev->irq, dev);
10086         enable_irq(bp->pdev->irq);
10087 }
10088 #endif
10089
10090 static const struct net_device_ops bnx2x_netdev_ops = {
10091         .ndo_open               = bnx2x_open,
10092         .ndo_stop               = bnx2x_close,
10093         .ndo_start_xmit         = bnx2x_start_xmit,
10094         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10095         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10096         .ndo_validate_addr      = eth_validate_addr,
10097         .ndo_do_ioctl           = bnx2x_ioctl,
10098         .ndo_change_mtu         = bnx2x_change_mtu,
10099         .ndo_tx_timeout         = bnx2x_tx_timeout,
10100 #ifdef BCM_VLAN
10101         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10102 #endif
10103 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10104         .ndo_poll_controller    = poll_bnx2x,
10105 #endif
10106 };
10107
10108
10109 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10110                                     struct net_device *dev)
10111 {
10112         struct bnx2x *bp;
10113         int rc;
10114
10115         SET_NETDEV_DEV(dev, &pdev->dev);
10116         bp = netdev_priv(dev);
10117
10118         bp->dev = dev;
10119         bp->pdev = pdev;
10120         bp->flags = 0;
10121         bp->func = PCI_FUNC(pdev->devfn);
10122
10123         rc = pci_enable_device(pdev);
10124         if (rc) {
10125                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10126                 goto err_out;
10127         }
10128
10129         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10130                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10131                        " aborting\n");
10132                 rc = -ENODEV;
10133                 goto err_out_disable;
10134         }
10135
10136         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10137                 printk(KERN_ERR PFX "Cannot find second PCI device"
10138                        " base address, aborting\n");
10139                 rc = -ENODEV;
10140                 goto err_out_disable;
10141         }
10142
10143         if (atomic_read(&pdev->enable_cnt) == 1) {
10144                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10145                 if (rc) {
10146                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10147                                " aborting\n");
10148                         goto err_out_disable;
10149                 }
10150
10151                 pci_set_master(pdev);
10152                 pci_save_state(pdev);
10153         }
10154
10155         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10156         if (bp->pm_cap == 0) {
10157                 printk(KERN_ERR PFX "Cannot find power management"
10158                        " capability, aborting\n");
10159                 rc = -EIO;
10160                 goto err_out_release;
10161         }
10162
10163         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10164         if (bp->pcie_cap == 0) {
10165                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10166                        " aborting\n");
10167                 rc = -EIO;
10168                 goto err_out_release;
10169         }
10170
10171         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10172                 bp->flags |= USING_DAC_FLAG;
10173                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10174                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10175                                " failed, aborting\n");
10176                         rc = -EIO;
10177                         goto err_out_release;
10178                 }
10179
10180         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10181                 printk(KERN_ERR PFX "System does not support DMA,"
10182                        " aborting\n");
10183                 rc = -EIO;
10184                 goto err_out_release;
10185         }
10186
10187         dev->mem_start = pci_resource_start(pdev, 0);
10188         dev->base_addr = dev->mem_start;
10189         dev->mem_end = pci_resource_end(pdev, 0);
10190
10191         dev->irq = pdev->irq;
10192
10193         bp->regview = pci_ioremap_bar(pdev, 0);
10194         if (!bp->regview) {
10195                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10196                 rc = -ENOMEM;
10197                 goto err_out_release;
10198         }
10199
10200         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10201                                         min_t(u64, BNX2X_DB_SIZE,
10202                                               pci_resource_len(pdev, 2)));
10203         if (!bp->doorbells) {
10204                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10205                 rc = -ENOMEM;
10206                 goto err_out_unmap;
10207         }
10208
10209         bnx2x_set_power_state(bp, PCI_D0);
10210
10211         /* clean indirect addresses */
10212         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10213                                PCICFG_VENDOR_ID_OFFSET);
10214         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10215         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10216         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10217         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10218
10219         dev->watchdog_timeo = TX_TIMEOUT;
10220
10221         dev->netdev_ops = &bnx2x_netdev_ops;
10222         dev->ethtool_ops = &bnx2x_ethtool_ops;
10223         dev->features |= NETIF_F_SG;
10224         dev->features |= NETIF_F_HW_CSUM;
10225         if (bp->flags & USING_DAC_FLAG)
10226                 dev->features |= NETIF_F_HIGHDMA;
10227 #ifdef BCM_VLAN
10228         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10229         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10230 #endif
10231         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10232         dev->features |= NETIF_F_TSO6;
10233
10234         return 0;
10235
10236 err_out_unmap:
10237         if (bp->regview) {
10238                 iounmap(bp->regview);
10239                 bp->regview = NULL;
10240         }
10241         if (bp->doorbells) {
10242                 iounmap(bp->doorbells);
10243                 bp->doorbells = NULL;
10244         }
10245
10246 err_out_release:
10247         if (atomic_read(&pdev->enable_cnt) == 1)
10248                 pci_release_regions(pdev);
10249
10250 err_out_disable:
10251         pci_disable_device(pdev);
10252         pci_set_drvdata(pdev, NULL);
10253
10254 err_out:
10255         return rc;
10256 }
10257
10258 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10259 {
10260         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10261
10262         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10263         return val;
10264 }
10265
10266 /* return value of 1=2.5GHz 2=5GHz */
10267 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10268 {
10269         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10270
10271         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10272         return val;
10273 }
10274
10275 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10276                                     const struct pci_device_id *ent)
10277 {
10278         static int version_printed;
10279         struct net_device *dev = NULL;
10280         struct bnx2x *bp;
10281         int rc;
10282
10283         if (version_printed++ == 0)
10284                 printk(KERN_INFO "%s", version);
10285
10286         /* dev zeroed in init_etherdev */
10287         dev = alloc_etherdev(sizeof(*bp));
10288         if (!dev) {
10289                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10290                 return -ENOMEM;
10291         }
10292
10293         bp = netdev_priv(dev);
10294         bp->msglevel = debug;
10295
10296         rc = bnx2x_init_dev(pdev, dev);
10297         if (rc < 0) {
10298                 free_netdev(dev);
10299                 return rc;
10300         }
10301
10302         pci_set_drvdata(pdev, dev);
10303
10304         rc = bnx2x_init_bp(bp);
10305         if (rc)
10306                 goto init_one_exit;
10307
10308         rc = register_netdev(dev);
10309         if (rc) {
10310                 dev_err(&pdev->dev, "Cannot register net device\n");
10311                 goto init_one_exit;
10312         }
10313
10314         netif_carrier_off(dev);
10315
10316         bp->common.name = board_info[ent->driver_data].name;
10317         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10318                " IRQ %d, ", dev->name, bp->common.name,
10319                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10320                bnx2x_get_pcie_width(bp),
10321                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10322                dev->base_addr, bp->pdev->irq);
10323         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10324         return 0;
10325
10326 init_one_exit:
10327         if (bp->regview)
10328                 iounmap(bp->regview);
10329
10330         if (bp->doorbells)
10331                 iounmap(bp->doorbells);
10332
10333         free_netdev(dev);
10334
10335         if (atomic_read(&pdev->enable_cnt) == 1)
10336                 pci_release_regions(pdev);
10337
10338         pci_disable_device(pdev);
10339         pci_set_drvdata(pdev, NULL);
10340
10341         return rc;
10342 }
10343
10344 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10345 {
10346         struct net_device *dev = pci_get_drvdata(pdev);
10347         struct bnx2x *bp;
10348
10349         if (!dev) {
10350                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10351                 return;
10352         }
10353         bp = netdev_priv(dev);
10354
10355         unregister_netdev(dev);
10356
10357         if (bp->regview)
10358                 iounmap(bp->regview);
10359
10360         if (bp->doorbells)
10361                 iounmap(bp->doorbells);
10362
10363         free_netdev(dev);
10364
10365         if (atomic_read(&pdev->enable_cnt) == 1)
10366                 pci_release_regions(pdev);
10367
10368         pci_disable_device(pdev);
10369         pci_set_drvdata(pdev, NULL);
10370 }
10371
10372 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10373 {
10374         struct net_device *dev = pci_get_drvdata(pdev);
10375         struct bnx2x *bp;
10376
10377         if (!dev) {
10378                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10379                 return -ENODEV;
10380         }
10381         bp = netdev_priv(dev);
10382
10383         rtnl_lock();
10384
10385         pci_save_state(pdev);
10386
10387         if (!netif_running(dev)) {
10388                 rtnl_unlock();
10389                 return 0;
10390         }
10391
10392         netif_device_detach(dev);
10393
10394         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10395
10396         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10397
10398         rtnl_unlock();
10399
10400         return 0;
10401 }
10402
10403 static int bnx2x_resume(struct pci_dev *pdev)
10404 {
10405         struct net_device *dev = pci_get_drvdata(pdev);
10406         struct bnx2x *bp;
10407         int rc;
10408
10409         if (!dev) {
10410                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10411                 return -ENODEV;
10412         }
10413         bp = netdev_priv(dev);
10414
10415         rtnl_lock();
10416
10417         pci_restore_state(pdev);
10418
10419         if (!netif_running(dev)) {
10420                 rtnl_unlock();
10421                 return 0;
10422         }
10423
10424         bnx2x_set_power_state(bp, PCI_D0);
10425         netif_device_attach(dev);
10426
10427         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10428
10429         rtnl_unlock();
10430
10431         return rc;
10432 }
10433
10434 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10435 {
10436         int i;
10437
10438         bp->state = BNX2X_STATE_ERROR;
10439
10440         bp->rx_mode = BNX2X_RX_MODE_NONE;
10441
10442         bnx2x_netif_stop(bp, 0);
10443
10444         del_timer_sync(&bp->timer);
10445         bp->stats_state = STATS_STATE_DISABLED;
10446         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10447
10448         /* Release IRQs */
10449         bnx2x_free_irq(bp);
10450
10451         if (CHIP_IS_E1(bp)) {
10452                 struct mac_configuration_cmd *config =
10453                                                 bnx2x_sp(bp, mcast_config);
10454
10455                 for (i = 0; i < config->hdr.length_6b; i++)
10456                         CAM_INVALIDATE(config->config_table[i]);
10457         }
10458
10459         /* Free SKBs, SGEs, TPA pool and driver internals */
10460         bnx2x_free_skbs(bp);
10461         for_each_queue(bp, i)
10462                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10463         bnx2x_free_mem(bp);
10464
10465         bp->state = BNX2X_STATE_CLOSED;
10466
10467         netif_carrier_off(bp->dev);
10468
10469         return 0;
10470 }
10471
10472 static void bnx2x_eeh_recover(struct bnx2x *bp)
10473 {
10474         u32 val;
10475
10476         mutex_init(&bp->port.phy_mutex);
10477
10478         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10479         bp->link_params.shmem_base = bp->common.shmem_base;
10480         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10481
10482         if (!bp->common.shmem_base ||
10483             (bp->common.shmem_base < 0xA0000) ||
10484             (bp->common.shmem_base >= 0xC0000)) {
10485                 BNX2X_DEV_INFO("MCP not active\n");
10486                 bp->flags |= NO_MCP_FLAG;
10487                 return;
10488         }
10489
10490         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10491         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10492                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10493                 BNX2X_ERR("BAD MCP validity signature\n");
10494
10495         if (!BP_NOMCP(bp)) {
10496                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10497                               & DRV_MSG_SEQ_NUMBER_MASK);
10498                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10499         }
10500 }
10501
10502 /**
10503  * bnx2x_io_error_detected - called when PCI error is detected
10504  * @pdev: Pointer to PCI device
10505  * @state: The current pci connection state
10506  *
10507  * This function is called after a PCI bus error affecting
10508  * this device has been detected.
10509  */
10510 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10511                                                 pci_channel_state_t state)
10512 {
10513         struct net_device *dev = pci_get_drvdata(pdev);
10514         struct bnx2x *bp = netdev_priv(dev);
10515
10516         rtnl_lock();
10517
10518         netif_device_detach(dev);
10519
10520         if (netif_running(dev))
10521                 bnx2x_eeh_nic_unload(bp);
10522
10523         pci_disable_device(pdev);
10524
10525         rtnl_unlock();
10526
10527         /* Request a slot reset */
10528         return PCI_ERS_RESULT_NEED_RESET;
10529 }
10530
10531 /**
10532  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10533  * @pdev: Pointer to PCI device
10534  *
10535  * Restart the card from scratch, as if from a cold-boot.
10536  */
10537 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10538 {
10539         struct net_device *dev = pci_get_drvdata(pdev);
10540         struct bnx2x *bp = netdev_priv(dev);
10541
10542         rtnl_lock();
10543
10544         if (pci_enable_device(pdev)) {
10545                 dev_err(&pdev->dev,
10546                         "Cannot re-enable PCI device after reset\n");
10547                 rtnl_unlock();
10548                 return PCI_ERS_RESULT_DISCONNECT;
10549         }
10550
10551         pci_set_master(pdev);
10552         pci_restore_state(pdev);
10553
10554         if (netif_running(dev))
10555                 bnx2x_set_power_state(bp, PCI_D0);
10556
10557         rtnl_unlock();
10558
10559         return PCI_ERS_RESULT_RECOVERED;
10560 }
10561
10562 /**
10563  * bnx2x_io_resume - called when traffic can start flowing again
10564  * @pdev: Pointer to PCI device
10565  *
10566  * This callback is called when the error recovery driver tells us that
10567  * its OK to resume normal operation.
10568  */
10569 static void bnx2x_io_resume(struct pci_dev *pdev)
10570 {
10571         struct net_device *dev = pci_get_drvdata(pdev);
10572         struct bnx2x *bp = netdev_priv(dev);
10573
10574         rtnl_lock();
10575
10576         bnx2x_eeh_recover(bp);
10577
10578         if (netif_running(dev))
10579                 bnx2x_nic_load(bp, LOAD_NORMAL);
10580
10581         netif_device_attach(dev);
10582
10583         rtnl_unlock();
10584 }
10585
10586 static struct pci_error_handlers bnx2x_err_handler = {
10587         .error_detected = bnx2x_io_error_detected,
10588         .slot_reset = bnx2x_io_slot_reset,
10589         .resume = bnx2x_io_resume,
10590 };
10591
10592 static struct pci_driver bnx2x_pci_driver = {
10593         .name        = DRV_MODULE_NAME,
10594         .id_table    = bnx2x_pci_tbl,
10595         .probe       = bnx2x_init_one,
10596         .remove      = __devexit_p(bnx2x_remove_one),
10597         .suspend     = bnx2x_suspend,
10598         .resume      = bnx2x_resume,
10599         .err_handler = &bnx2x_err_handler,
10600 };
10601
10602 static int __init bnx2x_init(void)
10603 {
10604         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10605         if (bnx2x_wq == NULL) {
10606                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10607                 return -ENOMEM;
10608         }
10609
10610         return pci_register_driver(&bnx2x_pci_driver);
10611 }
10612
10613 static void __exit bnx2x_cleanup(void)
10614 {
10615         pci_unregister_driver(&bnx2x_pci_driver);
10616
10617         destroy_workqueue(bnx2x_wq);
10618 }
10619
10620 module_init(bnx2x_init);
10621 module_exit(bnx2x_cleanup);
10622