]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2x_main.c
bnx2x: Using system page size for SGE
[karo-tx-linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.23"
63 #define DRV_MODULE_RELDATE      "2008/11/03"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98 static struct workqueue_struct *bnx2x_wq;
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = RX_SGE(fp->rx_sge_prod);
559                 end = RX_SGE(fp->last_max_sge);
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         if (disable_hw)
661                 /* prevent the HW from sending interrupts */
662                 bnx2x_int_disable(bp);
663
664         /* make sure all ISRs are done */
665         if (msix) {
666                 for_each_queue(bp, i)
667                         synchronize_irq(bp->msix_table[i].vector);
668
669                 /* one more for the Slow Path IRQ */
670                 synchronize_irq(bp->msix_table[i].vector);
671         } else
672                 synchronize_irq(bp->pdev->irq);
673
674         /* make sure sp_task is not running */
675         cancel_delayed_work(&bp->sp_task);
676         flush_workqueue(bnx2x_wq);
677 }
678
679 /* fast path */
680
681 /*
682  * General service functions
683  */
684
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686                                 u8 storm, u16 index, u8 op, u8 update)
687 {
688         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689                        COMMAND_REG_INT_ACK);
690         struct igu_ack_register igu_ack;
691
692         igu_ack.status_block_index = index;
693         igu_ack.sb_id_and_flags =
694                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
695                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
699         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700            (*(u32 *)&igu_ack), hc_addr);
701         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 }
703
704 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 {
706         struct host_status_block *fpsb = fp->status_blk;
707         u16 rc = 0;
708
709         barrier(); /* status block is written to by the chip */
710         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712                 rc |= 1;
713         }
714         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716                 rc |= 2;
717         }
718         return rc;
719 }
720
721 static u16 bnx2x_ack_int(struct bnx2x *bp)
722 {
723         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724                        COMMAND_REG_SIMD_MASK);
725         u32 result = REG_RD(bp, hc_addr);
726
727         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728            result, hc_addr);
729
730         return result;
731 }
732
733
734 /*
735  * fast path service functions
736  */
737
738 /* free skb in the packet ring at pos idx
739  * return idx of last bd freed
740  */
741 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742                              u16 idx)
743 {
744         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745         struct eth_tx_bd *tx_bd;
746         struct sk_buff *skb = tx_buf->skb;
747         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748         int nbd;
749
750         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
751            idx, tx_buf, skb);
752
753         /* unmap first bd */
754         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755         tx_bd = &fp->tx_desc_ring[bd_idx];
756         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759         nbd = le16_to_cpu(tx_bd->nbd) - 1;
760         new_cons = nbd + tx_buf->first_bd;
761 #ifdef BNX2X_STOP_ON_ERROR
762         if (nbd > (MAX_SKB_FRAGS + 2)) {
763                 BNX2X_ERR("BAD nbd!\n");
764                 bnx2x_panic();
765         }
766 #endif
767
768         /* Skip a parse bd and the TSO split header bd
769            since they have no mapping */
770         if (nbd)
771                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774                                            ETH_TX_BD_FLAGS_TCP_CSUM |
775                                            ETH_TX_BD_FLAGS_SW_LSO)) {
776                 if (--nbd)
777                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778                 tx_bd = &fp->tx_desc_ring[bd_idx];
779                 /* is this a TSO split header bd? */
780                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781                         if (--nbd)
782                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783                 }
784         }
785
786         /* now free frags */
787         while (nbd > 0) {
788
789                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790                 tx_bd = &fp->tx_desc_ring[bd_idx];
791                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793                 if (--nbd)
794                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795         }
796
797         /* release skb */
798         WARN_ON(!skb);
799         dev_kfree_skb(skb);
800         tx_buf->first_bd = 0;
801         tx_buf->skb = NULL;
802
803         return new_cons;
804 }
805
806 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
807 {
808         s16 used;
809         u16 prod;
810         u16 cons;
811
812         barrier(); /* Tell compiler that prod and cons can change */
813         prod = fp->tx_bd_prod;
814         cons = fp->tx_bd_cons;
815
816         /* NUM_TX_RINGS = number of "next-page" entries
817            It will be used as a threshold */
818         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
819
820 #ifdef BNX2X_STOP_ON_ERROR
821         WARN_ON(used < 0);
822         WARN_ON(used > fp->bp->tx_ring_size);
823         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 #endif
825
826         return (s16)(fp->bp->tx_ring_size) - used;
827 }
828
829 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830 {
831         struct bnx2x *bp = fp->bp;
832         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833         int done = 0;
834
835 #ifdef BNX2X_STOP_ON_ERROR
836         if (unlikely(bp->panic))
837                 return;
838 #endif
839
840         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841         sw_cons = fp->tx_pkt_cons;
842
843         while (sw_cons != hw_cons) {
844                 u16 pkt_cons;
845
846                 pkt_cons = TX_BD(sw_cons);
847
848                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
850                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
851                    hw_cons, sw_cons, pkt_cons);
852
853 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
854                         rmb();
855                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856                 }
857 */
858                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859                 sw_cons++;
860                 done++;
861
862                 if (done == work)
863                         break;
864         }
865
866         fp->tx_pkt_cons = sw_cons;
867         fp->tx_bd_cons = bd_cons;
868
869         /* Need to make the tx_cons update visible to start_xmit()
870          * before checking for netif_queue_stopped().  Without the
871          * memory barrier, there is a small possibility that start_xmit()
872          * will miss it and cause the queue to be stopped forever.
873          */
874         smp_mb();
875
876         /* TBD need a thresh? */
877         if (unlikely(netif_queue_stopped(bp->dev))) {
878
879                 netif_tx_lock(bp->dev);
880
881                 if (netif_queue_stopped(bp->dev) &&
882                     (bp->state == BNX2X_STATE_OPEN) &&
883                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884                         netif_wake_queue(bp->dev);
885
886                 netif_tx_unlock(bp->dev);
887         }
888 }
889
890
891 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892                            union eth_rx_cqe *rr_cqe)
893 {
894         struct bnx2x *bp = fp->bp;
895         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
898         DP(BNX2X_MSG_SP,
899            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
900            FP_IDX(fp), cid, command, bp->state,
901            rr_cqe->ramrod_cqe.ramrod_type);
902
903         bp->spq_left++;
904
905         if (FP_IDX(fp)) {
906                 switch (command | fp->state) {
907                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908                                                 BNX2X_FP_STATE_OPENING):
909                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910                            cid);
911                         fp->state = BNX2X_FP_STATE_OPEN;
912                         break;
913
914                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916                            cid);
917                         fp->state = BNX2X_FP_STATE_HALTED;
918                         break;
919
920                 default:
921                         BNX2X_ERR("unexpected MC reply (%d)  "
922                                   "fp->state is %x\n", command, fp->state);
923                         break;
924                 }
925                 mb(); /* force bnx2x_wait_ramrod() to see the change */
926                 return;
927         }
928
929         switch (command | bp->state) {
930         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932                 bp->state = BNX2X_STATE_OPEN;
933                 break;
934
935         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938                 fp->state = BNX2X_FP_STATE_HALTED;
939                 break;
940
941         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
942                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
943                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
944                 break;
945
946
947         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
948         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
949                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
950                 bp->set_mac_pending = 0;
951                 break;
952
953         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
954                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
955                 break;
956
957         default:
958                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
959                           command, bp->state);
960                 break;
961         }
962         mb(); /* force bnx2x_wait_ramrod() to see the change */
963 }
964
965 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966                                      struct bnx2x_fastpath *fp, u16 index)
967 {
968         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969         struct page *page = sw_buf->page;
970         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972         /* Skip "next page" elements */
973         if (!page)
974                 return;
975
976         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
977                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
978         __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980         sw_buf->page = NULL;
981         sge->addr_hi = 0;
982         sge->addr_lo = 0;
983 }
984
985 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986                                            struct bnx2x_fastpath *fp, int last)
987 {
988         int i;
989
990         for (i = 0; i < last; i++)
991                 bnx2x_free_rx_sge(bp, fp, i);
992 }
993
994 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995                                      struct bnx2x_fastpath *fp, u16 index)
996 {
997         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000         dma_addr_t mapping;
1001
1002         if (unlikely(page == NULL))
1003                 return -ENOMEM;
1004
1005         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1006                                PCI_DMA_FROMDEVICE);
1007         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1008                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009                 return -ENOMEM;
1010         }
1011
1012         sw_buf->page = page;
1013         pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018         return 0;
1019 }
1020
1021 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022                                      struct bnx2x_fastpath *fp, u16 index)
1023 {
1024         struct sk_buff *skb;
1025         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027         dma_addr_t mapping;
1028
1029         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030         if (unlikely(skb == NULL))
1031                 return -ENOMEM;
1032
1033         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1034                                  PCI_DMA_FROMDEVICE);
1035         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1036                 dev_kfree_skb(skb);
1037                 return -ENOMEM;
1038         }
1039
1040         rx_buf->skb = skb;
1041         pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046         return 0;
1047 }
1048
1049 /* note that we are not allocating a new skb,
1050  * we are just moving one from cons to prod
1051  * we are not creating a new mapping,
1052  * so there is no need to check for dma_mapping_error().
1053  */
1054 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055                                struct sk_buff *skb, u16 cons, u16 prod)
1056 {
1057         struct bnx2x *bp = fp->bp;
1058         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063         pci_dma_sync_single_for_device(bp->pdev,
1064                                        pci_unmap_addr(cons_rx_buf, mapping),
1065                                        bp->rx_offset + RX_COPY_THRESH,
1066                                        PCI_DMA_FROMDEVICE);
1067
1068         prod_rx_buf->skb = cons_rx_buf->skb;
1069         pci_unmap_addr_set(prod_rx_buf, mapping,
1070                            pci_unmap_addr(cons_rx_buf, mapping));
1071         *prod_bd = *cons_bd;
1072 }
1073
1074 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075                                              u16 idx)
1076 {
1077         u16 last_max = fp->last_max_sge;
1078
1079         if (SUB_S16(idx, last_max) > 0)
1080                 fp->last_max_sge = idx;
1081 }
1082
1083 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084 {
1085         int i, j;
1086
1087         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088                 int idx = RX_SGE_CNT * i - 1;
1089
1090                 for (j = 0; j < 2; j++) {
1091                         SGE_MASK_CLEAR_BIT(fp, idx);
1092                         idx--;
1093                 }
1094         }
1095 }
1096
1097 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098                                   struct eth_fast_path_rx_cqe *fp_cqe)
1099 {
1100         struct bnx2x *bp = fp->bp;
1101         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1102                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1103                       SGE_PAGE_SHIFT;
1104         u16 last_max, last_elem, first_elem;
1105         u16 delta = 0;
1106         u16 i;
1107
1108         if (!sge_len)
1109                 return;
1110
1111         /* First mark all used pages */
1112         for (i = 0; i < sge_len; i++)
1113                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         /* Here we assume that the last SGE index is the biggest */
1119         prefetch((void *)(fp->sge_mask));
1120         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122         last_max = RX_SGE(fp->last_max_sge);
1123         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126         /* If ring is not full */
1127         if (last_elem + 1 != first_elem)
1128                 last_elem++;
1129
1130         /* Now update the prod */
1131         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132                 if (likely(fp->sge_mask[i]))
1133                         break;
1134
1135                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136                 delta += RX_SGE_MASK_ELEM_SZ;
1137         }
1138
1139         if (delta > 0) {
1140                 fp->rx_sge_prod += delta;
1141                 /* clear page-end entries */
1142                 bnx2x_clear_sge_mask_next_elems(fp);
1143         }
1144
1145         DP(NETIF_MSG_RX_STATUS,
1146            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1147            fp->last_max_sge, fp->rx_sge_prod);
1148 }
1149
1150 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151 {
1152         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153         memset(fp->sge_mask, 0xff,
1154                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
1156         /* Clear the two last indices in the page to 1:
1157            these are the indices that correspond to the "next" element,
1158            hence will never be indicated and should be removed from
1159            the calculations. */
1160         bnx2x_clear_sge_mask_next_elems(fp);
1161 }
1162
1163 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164                             struct sk_buff *skb, u16 cons, u16 prod)
1165 {
1166         struct bnx2x *bp = fp->bp;
1167         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170         dma_addr_t mapping;
1171
1172         /* move empty skb from pool to prod and map it */
1173         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1175                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1176         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178         /* move partial skb from cons to pool (don't unmap yet) */
1179         fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181         /* mark bin state as start - print error if current state != stop */
1182         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185         fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187         /* point prod_bd to new skb */
1188         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191 #ifdef BNX2X_STOP_ON_ERROR
1192         fp->tpa_queue_used |= (1 << queue);
1193 #ifdef __powerpc64__
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195 #else
1196         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197 #endif
1198            fp->tpa_queue_used);
1199 #endif
1200 }
1201
1202 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203                                struct sk_buff *skb,
1204                                struct eth_fast_path_rx_cqe *fp_cqe,
1205                                u16 cqe_idx)
1206 {
1207         struct sw_rx_page *rx_pg, old_rx_pg;
1208         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209         u32 i, frag_len, frag_size, pages;
1210         int err;
1211         int j;
1212
1213         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1214         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1215
1216         /* This is needed in order to enable forwarding support */
1217         if (frag_size)
1218                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1219                                                max(frag_size, (u32)len_on_bd));
1220
1221 #ifdef BNX2X_STOP_ON_ERROR
1222         if (pages >
1223             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1224                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225                           pages, cqe_idx);
1226                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1227                           fp_cqe->pkt_len, len_on_bd);
1228                 bnx2x_panic();
1229                 return -EINVAL;
1230         }
1231 #endif
1232
1233         /* Run through the SGL and compose the fragmented skb */
1234         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237                 /* FW gives the indices of the SGE as if the ring is an array
1238                    (meaning that "next" element will consume 2 indices) */
1239                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1240                 rx_pg = &fp->rx_page_ring[sge_idx];
1241                 old_rx_pg = *rx_pg;
1242
1243                 /* If we fail to allocate a substitute page, we simply stop
1244                    where we are and drop the whole packet */
1245                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246                 if (unlikely(err)) {
1247                         bp->eth_stats.rx_skb_alloc_failed++;
1248                         return err;
1249                 }
1250
1251                 /* Unmap the page as we r going to pass it to the stack */
1252                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1253                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1254
1255                 /* Add one frag and update the appropriate fields in the skb */
1256                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257
1258                 skb->data_len += frag_len;
1259                 skb->truesize += frag_len;
1260                 skb->len += frag_len;
1261
1262                 frag_size -= frag_len;
1263         }
1264
1265         return 0;
1266 }
1267
1268 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270                            u16 cqe_idx)
1271 {
1272         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273         struct sk_buff *skb = rx_buf->skb;
1274         /* alloc new skb */
1275         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276
1277         /* Unmap skb in the pool anyway, as we are going to change
1278            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279            fails. */
1280         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1281                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1282
1283         if (likely(new_skb)) {
1284                 /* fix ip xsum and give it to the stack */
1285                 /* (no need to map the new skb) */
1286
1287                 prefetch(skb);
1288                 prefetch(((char *)(skb)) + 128);
1289
1290 #ifdef BNX2X_STOP_ON_ERROR
1291                 if (pad + len > bp->rx_buf_size) {
1292                         BNX2X_ERR("skb_put is about to fail...  "
1293                                   "pad %d  len %d  rx_buf_size %d\n",
1294                                   pad, len, bp->rx_buf_size);
1295                         bnx2x_panic();
1296                         return;
1297                 }
1298 #endif
1299
1300                 skb_reserve(skb, pad);
1301                 skb_put(skb, len);
1302
1303                 skb->protocol = eth_type_trans(skb, bp->dev);
1304                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305
1306                 {
1307                         struct iphdr *iph;
1308
1309                         iph = (struct iphdr *)skb->data;
1310                         iph->check = 0;
1311                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312                 }
1313
1314                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315                                          &cqe->fast_path_cqe, cqe_idx)) {
1316 #ifdef BCM_VLAN
1317                         if ((bp->vlgrp != NULL) &&
1318                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319                              PARSING_FLAGS_VLAN))
1320                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321                                                 le16_to_cpu(cqe->fast_path_cqe.
1322                                                             vlan_tag));
1323                         else
1324 #endif
1325                                 netif_receive_skb(skb);
1326                 } else {
1327                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328                            " - dropping packet!\n");
1329                         dev_kfree_skb(skb);
1330                 }
1331
1332
1333                 /* put new skb in bin */
1334                 fp->tpa_pool[queue].skb = new_skb;
1335
1336         } else {
1337                 /* else drop the packet and keep the buffer in the bin */
1338                 DP(NETIF_MSG_RX_STATUS,
1339                    "Failed to allocate new skb - dropping packet!\n");
1340                 bp->eth_stats.rx_skb_alloc_failed++;
1341         }
1342
1343         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344 }
1345
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347                                         struct bnx2x_fastpath *fp,
1348                                         u16 bd_prod, u16 rx_comp_prod,
1349                                         u16 rx_sge_prod)
1350 {
1351         struct tstorm_eth_rx_producers rx_prods = {0};
1352         int i;
1353
1354         /* Update producers */
1355         rx_prods.bd_prod = bd_prod;
1356         rx_prods.cqe_prod = rx_comp_prod;
1357         rx_prods.sge_prod = rx_sge_prod;
1358
1359         /*
1360          * Make sure that the BD and SGE data is updated before updating the
1361          * producers since FW might read the BD/SGE right after the producer
1362          * is updated.
1363          * This is only applicable for weak-ordered memory model archs such
1364          * as IA-64. The following barrier is also mandatory since FW will
1365          * assumes BDs must have buffers.
1366          */
1367         wmb();
1368
1369         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372                        ((u32 *)&rx_prods)[i]);
1373
1374         mmiowb(); /* keep prod updates ordered */
1375
1376         DP(NETIF_MSG_RX_STATUS,
1377            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1378            bd_prod, rx_comp_prod, rx_sge_prod);
1379 }
1380
1381 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1382 {
1383         struct bnx2x *bp = fp->bp;
1384         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1385         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1386         int rx_pkt = 0;
1387
1388 #ifdef BNX2X_STOP_ON_ERROR
1389         if (unlikely(bp->panic))
1390                 return 0;
1391 #endif
1392
1393         /* CQ "next element" is of the size of the regular element,
1394            that's why it's ok here */
1395         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1397                 hw_comp_cons++;
1398
1399         bd_cons = fp->rx_bd_cons;
1400         bd_prod = fp->rx_bd_prod;
1401         bd_prod_fw = bd_prod;
1402         sw_comp_cons = fp->rx_comp_cons;
1403         sw_comp_prod = fp->rx_comp_prod;
1404
1405         /* Memory barrier necessary as speculative reads of the rx
1406          * buffer can be ahead of the index in the status block
1407          */
1408         rmb();
1409
1410         DP(NETIF_MSG_RX_STATUS,
1411            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1412            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1413
1414         while (sw_comp_cons != hw_comp_cons) {
1415                 struct sw_rx_bd *rx_buf = NULL;
1416                 struct sk_buff *skb;
1417                 union eth_rx_cqe *cqe;
1418                 u8 cqe_fp_flags;
1419                 u16 len, pad;
1420
1421                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422                 bd_prod = RX_BD(bd_prod);
1423                 bd_cons = RX_BD(bd_cons);
1424
1425                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1426                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1427
1428                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1429                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1430                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1431                    cqe->fast_path_cqe.rss_hash_result,
1432                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1434
1435                 /* is this a slowpath msg? */
1436                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1437                         bnx2x_sp_event(fp, cqe);
1438                         goto next_cqe;
1439
1440                 /* this is an rx packet */
1441                 } else {
1442                         rx_buf = &fp->rx_buf_ring[bd_cons];
1443                         skb = rx_buf->skb;
1444                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445                         pad = cqe->fast_path_cqe.placement_offset;
1446
1447                         /* If CQE is marked both TPA_START and TPA_END
1448                            it is a non-TPA CQE */
1449                         if ((!fp->disable_tpa) &&
1450                             (TPA_TYPE(cqe_fp_flags) !=
1451                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1452                                 u16 queue = cqe->fast_path_cqe.queue_index;
1453
1454                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455                                         DP(NETIF_MSG_RX_STATUS,
1456                                            "calling tpa_start on queue %d\n",
1457                                            queue);
1458
1459                                         bnx2x_tpa_start(fp, queue, skb,
1460                                                         bd_cons, bd_prod);
1461                                         goto next_rx;
1462                                 }
1463
1464                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465                                         DP(NETIF_MSG_RX_STATUS,
1466                                            "calling tpa_stop on queue %d\n",
1467                                            queue);
1468
1469                                         if (!BNX2X_RX_SUM_FIX(cqe))
1470                                                 BNX2X_ERR("STOP on none TCP "
1471                                                           "data\n");
1472
1473                                         /* This is a size of the linear data
1474                                            on this skb */
1475                                         len = le16_to_cpu(cqe->fast_path_cqe.
1476                                                                 len_on_bd);
1477                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1478                                                     len, cqe, comp_ring_cons);
1479 #ifdef BNX2X_STOP_ON_ERROR
1480                                         if (bp->panic)
1481                                                 return -EINVAL;
1482 #endif
1483
1484                                         bnx2x_update_sge_prod(fp,
1485                                                         &cqe->fast_path_cqe);
1486                                         goto next_cqe;
1487                                 }
1488                         }
1489
1490                         pci_dma_sync_single_for_device(bp->pdev,
1491                                         pci_unmap_addr(rx_buf, mapping),
1492                                                        pad + RX_COPY_THRESH,
1493                                                        PCI_DMA_FROMDEVICE);
1494                         prefetch(skb);
1495                         prefetch(((char *)(skb)) + 128);
1496
1497                         /* is this an error packet? */
1498                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1499                                 DP(NETIF_MSG_RX_ERR,
1500                                    "ERROR  flags %x  rx packet %u\n",
1501                                    cqe_fp_flags, sw_comp_cons);
1502                                 bp->eth_stats.rx_err_discard_pkt++;
1503                                 goto reuse_rx;
1504                         }
1505
1506                         /* Since we don't have a jumbo ring
1507                          * copy small packets if mtu > 1500
1508                          */
1509                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510                             (len <= RX_COPY_THRESH)) {
1511                                 struct sk_buff *new_skb;
1512
1513                                 new_skb = netdev_alloc_skb(bp->dev,
1514                                                            len + pad);
1515                                 if (new_skb == NULL) {
1516                                         DP(NETIF_MSG_RX_ERR,
1517                                            "ERROR  packet dropped "
1518                                            "because of alloc failure\n");
1519                                         bp->eth_stats.rx_skb_alloc_failed++;
1520                                         goto reuse_rx;
1521                                 }
1522
1523                                 /* aligned copy */
1524                                 skb_copy_from_linear_data_offset(skb, pad,
1525                                                     new_skb->data + pad, len);
1526                                 skb_reserve(new_skb, pad);
1527                                 skb_put(new_skb, len);
1528
1529                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1530
1531                                 skb = new_skb;
1532
1533                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534                                 pci_unmap_single(bp->pdev,
1535                                         pci_unmap_addr(rx_buf, mapping),
1536                                                  bp->rx_buf_size,
1537                                                  PCI_DMA_FROMDEVICE);
1538                                 skb_reserve(skb, pad);
1539                                 skb_put(skb, len);
1540
1541                         } else {
1542                                 DP(NETIF_MSG_RX_ERR,
1543                                    "ERROR  packet dropped because "
1544                                    "of alloc failure\n");
1545                                 bp->eth_stats.rx_skb_alloc_failed++;
1546 reuse_rx:
1547                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1548                                 goto next_rx;
1549                         }
1550
1551                         skb->protocol = eth_type_trans(skb, bp->dev);
1552
1553                         skb->ip_summed = CHECKSUM_NONE;
1554                         if (bp->rx_csum) {
1555                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1557                                 else
1558                                         bp->eth_stats.hw_csum_err++;
1559                         }
1560                 }
1561
1562 #ifdef BCM_VLAN
1563                 if ((bp->vlgrp != NULL) &&
1564                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565                      PARSING_FLAGS_VLAN))
1566                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1568                 else
1569 #endif
1570                         netif_receive_skb(skb);
1571
1572
1573 next_rx:
1574                 rx_buf->skb = NULL;
1575
1576                 bd_cons = NEXT_RX_IDX(bd_cons);
1577                 bd_prod = NEXT_RX_IDX(bd_prod);
1578                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1579                 rx_pkt++;
1580 next_cqe:
1581                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1583
1584                 if (rx_pkt == budget)
1585                         break;
1586         } /* while */
1587
1588         fp->rx_bd_cons = bd_cons;
1589         fp->rx_bd_prod = bd_prod_fw;
1590         fp->rx_comp_cons = sw_comp_cons;
1591         fp->rx_comp_prod = sw_comp_prod;
1592
1593         /* Update producers */
1594         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1595                              fp->rx_sge_prod);
1596
1597         fp->rx_pkt += rx_pkt;
1598         fp->rx_calls++;
1599
1600         return rx_pkt;
1601 }
1602
1603 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1604 {
1605         struct bnx2x_fastpath *fp = fp_cookie;
1606         struct bnx2x *bp = fp->bp;
1607         int index = FP_IDX(fp);
1608
1609         /* Return here if interrupt is disabled */
1610         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612                 return IRQ_HANDLED;
1613         }
1614
1615         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616            index, FP_SB_ID(fp));
1617         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1618
1619 #ifdef BNX2X_STOP_ON_ERROR
1620         if (unlikely(bp->panic))
1621                 return IRQ_HANDLED;
1622 #endif
1623
1624         prefetch(fp->rx_cons_sb);
1625         prefetch(fp->tx_cons_sb);
1626         prefetch(&fp->status_blk->c_status_block.status_block_index);
1627         prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
1629         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1630
1631         return IRQ_HANDLED;
1632 }
1633
1634 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635 {
1636         struct net_device *dev = dev_instance;
1637         struct bnx2x *bp = netdev_priv(dev);
1638         u16 status = bnx2x_ack_int(bp);
1639         u16 mask;
1640
1641         /* Return here if interrupt is shared and it's not for us */
1642         if (unlikely(status == 0)) {
1643                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644                 return IRQ_NONE;
1645         }
1646         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1647
1648         /* Return here if interrupt is disabled */
1649         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651                 return IRQ_HANDLED;
1652         }
1653
1654 #ifdef BNX2X_STOP_ON_ERROR
1655         if (unlikely(bp->panic))
1656                 return IRQ_HANDLED;
1657 #endif
1658
1659         mask = 0x2 << bp->fp[0].sb_id;
1660         if (status & mask) {
1661                 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663                 prefetch(fp->rx_cons_sb);
1664                 prefetch(fp->tx_cons_sb);
1665                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
1668                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1669
1670                 status &= ~mask;
1671         }
1672
1673
1674         if (unlikely(status & 0x1)) {
1675                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1676
1677                 status &= ~0x1;
1678                 if (!status)
1679                         return IRQ_HANDLED;
1680         }
1681
1682         if (status)
1683                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684                    status);
1685
1686         return IRQ_HANDLED;
1687 }
1688
1689 /* end of fast path */
1690
1691 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1692
1693 /* Link */
1694
1695 /*
1696  * General service functions
1697  */
1698
1699 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1700 {
1701         u32 lock_status;
1702         u32 resource_bit = (1 << resource);
1703         int func = BP_FUNC(bp);
1704         u32 hw_lock_control_reg;
1705         int cnt;
1706
1707         /* Validating that the resource is within range */
1708         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709                 DP(NETIF_MSG_HW,
1710                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712                 return -EINVAL;
1713         }
1714
1715         if (func <= 5) {
1716                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1717         } else {
1718                 hw_lock_control_reg =
1719                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1720         }
1721
1722         /* Validating that the resource is not already taken */
1723         lock_status = REG_RD(bp, hw_lock_control_reg);
1724         if (lock_status & resource_bit) {
1725                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1726                    lock_status, resource_bit);
1727                 return -EEXIST;
1728         }
1729
1730         /* Try for 5 second every 5ms */
1731         for (cnt = 0; cnt < 1000; cnt++) {
1732                 /* Try to acquire the lock */
1733                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734                 lock_status = REG_RD(bp, hw_lock_control_reg);
1735                 if (lock_status & resource_bit)
1736                         return 0;
1737
1738                 msleep(5);
1739         }
1740         DP(NETIF_MSG_HW, "Timeout\n");
1741         return -EAGAIN;
1742 }
1743
1744 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1745 {
1746         u32 lock_status;
1747         u32 resource_bit = (1 << resource);
1748         int func = BP_FUNC(bp);
1749         u32 hw_lock_control_reg;
1750
1751         /* Validating that the resource is within range */
1752         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753                 DP(NETIF_MSG_HW,
1754                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756                 return -EINVAL;
1757         }
1758
1759         if (func <= 5) {
1760                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761         } else {
1762                 hw_lock_control_reg =
1763                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1764         }
1765
1766         /* Validating that the resource is currently taken */
1767         lock_status = REG_RD(bp, hw_lock_control_reg);
1768         if (!(lock_status & resource_bit)) {
1769                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1770                    lock_status, resource_bit);
1771                 return -EFAULT;
1772         }
1773
1774         REG_WR(bp, hw_lock_control_reg, resource_bit);
1775         return 0;
1776 }
1777
1778 /* HW Lock for shared dual port PHYs */
1779 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1780 {
1781         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1782
1783         mutex_lock(&bp->port.phy_mutex);
1784
1785         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 }
1789
1790 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1791 {
1792         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1793
1794         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1796                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1797
1798         mutex_unlock(&bp->port.phy_mutex);
1799 }
1800
1801 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1802 {
1803         /* The GPIO should be swapped if swap register is set and active */
1804         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1805                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1806         int gpio_shift = gpio_num +
1807                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808         u32 gpio_mask = (1 << gpio_shift);
1809         u32 gpio_reg;
1810
1811         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813                 return -EINVAL;
1814         }
1815
1816         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1817         /* read GPIO and mask except the float bits */
1818         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1819
1820         switch (mode) {
1821         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823                    gpio_num, gpio_shift);
1824                 /* clear FLOAT and set CLR */
1825                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1827                 break;
1828
1829         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831                    gpio_num, gpio_shift);
1832                 /* clear FLOAT and set SET */
1833                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1835                 break;
1836
1837         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1838                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839                    gpio_num, gpio_shift);
1840                 /* set FLOAT */
1841                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1842                 break;
1843
1844         default:
1845                 break;
1846         }
1847
1848         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1849         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1850
1851         return 0;
1852 }
1853
1854 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1855 {
1856         u32 spio_mask = (1 << spio_num);
1857         u32 spio_reg;
1858
1859         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860             (spio_num > MISC_REGISTERS_SPIO_7)) {
1861                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862                 return -EINVAL;
1863         }
1864
1865         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1866         /* read SPIO and mask except the float bits */
1867         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1868
1869         switch (mode) {
1870         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1871                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872                 /* clear FLOAT and set CLR */
1873                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1875                 break;
1876
1877         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1878                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879                 /* clear FLOAT and set SET */
1880                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1882                 break;
1883
1884         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1886                 /* set FLOAT */
1887                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1888                 break;
1889
1890         default:
1891                 break;
1892         }
1893
1894         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1895         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1896
1897         return 0;
1898 }
1899
1900 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1901 {
1902         switch (bp->link_vars.ieee_fc) {
1903         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1904                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1905                                           ADVERTISED_Pause);
1906                 break;
1907         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1908                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1909                                          ADVERTISED_Pause);
1910                 break;
1911         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1912                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1913                 break;
1914         default:
1915                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916                                           ADVERTISED_Pause);
1917                 break;
1918         }
1919 }
1920
1921 static void bnx2x_link_report(struct bnx2x *bp)
1922 {
1923         if (bp->link_vars.link_up) {
1924                 if (bp->state == BNX2X_STATE_OPEN)
1925                         netif_carrier_on(bp->dev);
1926                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1927
1928                 printk("%d Mbps ", bp->link_vars.line_speed);
1929
1930                 if (bp->link_vars.duplex == DUPLEX_FULL)
1931                         printk("full duplex");
1932                 else
1933                         printk("half duplex");
1934
1935                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1936                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1937                                 printk(", receive ");
1938                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1939                                         printk("& transmit ");
1940                         } else {
1941                                 printk(", transmit ");
1942                         }
1943                         printk("flow control ON");
1944                 }
1945                 printk("\n");
1946
1947         } else { /* link_down */
1948                 netif_carrier_off(bp->dev);
1949                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1950         }
1951 }
1952
1953 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1954 {
1955         if (!BP_NOMCP(bp)) {
1956                 u8 rc;
1957
1958                 /* Initialize link parameters structure variables */
1959                 /* It is recommended to turn off RX FC for jumbo frames
1960                    for better performance */
1961                 if (IS_E1HMF(bp))
1962                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1963                 else if (bp->dev->mtu > 5000)
1964                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1965                 else
1966                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1967
1968                 bnx2x_acquire_phy_lock(bp);
1969                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1970                 bnx2x_release_phy_lock(bp);
1971
1972                 if (bp->link_vars.link_up)
1973                         bnx2x_link_report(bp);
1974
1975                 bnx2x_calc_fc_adv(bp);
1976
1977                 return rc;
1978         }
1979         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1980         return -EINVAL;
1981 }
1982
1983 static void bnx2x_link_set(struct bnx2x *bp)
1984 {
1985         if (!BP_NOMCP(bp)) {
1986                 bnx2x_acquire_phy_lock(bp);
1987                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1988                 bnx2x_release_phy_lock(bp);
1989
1990                 bnx2x_calc_fc_adv(bp);
1991         } else
1992                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1993 }
1994
1995 static void bnx2x__link_reset(struct bnx2x *bp)
1996 {
1997         if (!BP_NOMCP(bp)) {
1998                 bnx2x_acquire_phy_lock(bp);
1999                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001         } else
2002                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2003 }
2004
2005 static u8 bnx2x_link_test(struct bnx2x *bp)
2006 {
2007         u8 rc;
2008
2009         bnx2x_acquire_phy_lock(bp);
2010         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2011         bnx2x_release_phy_lock(bp);
2012
2013         return rc;
2014 }
2015
2016 /* Calculates the sum of vn_min_rates.
2017    It's needed for further normalizing of the min_rates.
2018
2019    Returns:
2020      sum of vn_min_rates
2021        or
2022      0 - if all the min_rates are 0.
2023      In the later case fairness algorithm should be deactivated.
2024      If not all min_rates are zero then those that are zeroes will
2025      be set to 1.
2026  */
2027 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2028 {
2029         int i, port = BP_PORT(bp);
2030         u32 wsum = 0;
2031         int all_zero = 1;
2032
2033         for (i = 0; i < E1HVN_MAX; i++) {
2034                 u32 vn_cfg =
2035                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2036                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2037                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2038                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2039                         /* If min rate is zero - set it to 1 */
2040                         if (!vn_min_rate)
2041                                 vn_min_rate = DEF_MIN_RATE;
2042                         else
2043                                 all_zero = 0;
2044
2045                         wsum += vn_min_rate;
2046                 }
2047         }
2048
2049         /* ... only if all min rates are zeros - disable FAIRNESS */
2050         if (all_zero)
2051                 return 0;
2052
2053         return wsum;
2054 }
2055
2056 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2057                                    int en_fness,
2058                                    u16 port_rate,
2059                                    struct cmng_struct_per_port *m_cmng_port)
2060 {
2061         u32 r_param = port_rate / 8;
2062         int port = BP_PORT(bp);
2063         int i;
2064
2065         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2066
2067         /* Enable minmax only if we are in e1hmf mode */
2068         if (IS_E1HMF(bp)) {
2069                 u32 fair_periodic_timeout_usec;
2070                 u32 t_fair;
2071
2072                 /* Enable rate shaping and fairness */
2073                 m_cmng_port->flags.cmng_vn_enable = 1;
2074                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2075                 m_cmng_port->flags.rate_shaping_enable = 1;
2076
2077                 if (!en_fness)
2078                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2079                            "  fairness will be disabled\n");
2080
2081                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2082                 m_cmng_port->rs_vars.rs_periodic_timeout =
2083                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2084
2085                 /* this is the threshold below which no timer arming will occur
2086                    1.25 coefficient is for the threshold to be a little bigger
2087                    than the real time, to compensate for timer in-accuracy */
2088                 m_cmng_port->rs_vars.rs_threshold =
2089                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2090
2091                 /* resolution of fairness timer */
2092                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2093                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2094                 t_fair = T_FAIR_COEF / port_rate;
2095
2096                 /* this is the threshold below which we won't arm
2097                    the timer anymore */
2098                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2099
2100                 /* we multiply by 1e3/8 to get bytes/msec.
2101                    We don't want the credits to pass a credit
2102                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2103                 m_cmng_port->fair_vars.upper_bound =
2104                                                 r_param * t_fair * FAIR_MEM;
2105                 /* since each tick is 4 usec */
2106                 m_cmng_port->fair_vars.fairness_timeout =
2107                                                 fair_periodic_timeout_usec / 4;
2108
2109         } else {
2110                 /* Disable rate shaping and fairness */
2111                 m_cmng_port->flags.cmng_vn_enable = 0;
2112                 m_cmng_port->flags.fairness_enable = 0;
2113                 m_cmng_port->flags.rate_shaping_enable = 0;
2114
2115                 DP(NETIF_MSG_IFUP,
2116                    "Single function mode  minmax will be disabled\n");
2117         }
2118
2119         /* Store it to internal memory */
2120         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2121                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2122                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2123                        ((u32 *)(m_cmng_port))[i]);
2124 }
2125
2126 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2127                                    u32 wsum, u16 port_rate,
2128                                  struct cmng_struct_per_port *m_cmng_port)
2129 {
2130         struct rate_shaping_vars_per_vn m_rs_vn;
2131         struct fairness_vars_per_vn m_fair_vn;
2132         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2133         u16 vn_min_rate, vn_max_rate;
2134         int i;
2135
2136         /* If function is hidden - set min and max to zeroes */
2137         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2138                 vn_min_rate = 0;
2139                 vn_max_rate = 0;
2140
2141         } else {
2142                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2143                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2144                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2145                    if current min rate is zero - set it to 1.
2146                    This is a requirement of the algorithm. */
2147                 if ((vn_min_rate == 0) && wsum)
2148                         vn_min_rate = DEF_MIN_RATE;
2149                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2150                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2151         }
2152
2153         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2154            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2155
2156         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2157         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2158
2159         /* global vn counter - maximal Mbps for this vn */
2160         m_rs_vn.vn_counter.rate = vn_max_rate;
2161
2162         /* quota - number of bytes transmitted in this period */
2163         m_rs_vn.vn_counter.quota =
2164                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2165
2166 #ifdef BNX2X_PER_PROT_QOS
2167         /* per protocol counter */
2168         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2169                 /* maximal Mbps for this protocol */
2170                 m_rs_vn.protocol_counters[protocol].rate =
2171                                                 protocol_max_rate[protocol];
2172                 /* the quota in each timer period -
2173                    number of bytes transmitted in this period */
2174                 m_rs_vn.protocol_counters[protocol].quota =
2175                         (u32)(rs_periodic_timeout_usec *
2176                           ((double)m_rs_vn.
2177                                    protocol_counters[protocol].rate/8));
2178         }
2179 #endif
2180
2181         if (wsum) {
2182                 /* credit for each period of the fairness algorithm:
2183                    number of bytes in T_FAIR (the vn share the port rate).
2184                    wsum should not be larger than 10000, thus
2185                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2186                 m_fair_vn.vn_credit_delta =
2187                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2188                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2189                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2190                    m_fair_vn.vn_credit_delta);
2191         }
2192
2193 #ifdef BNX2X_PER_PROT_QOS
2194         do {
2195                 u32 protocolWeightSum = 0;
2196
2197                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2198                         protocolWeightSum +=
2199                                         drvInit.protocol_min_rate[protocol];
2200                 /* per protocol counter -
2201                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2202                 if (protocolWeightSum > 0) {
2203                         for (protocol = 0;
2204                              protocol < NUM_OF_PROTOCOLS; protocol++)
2205                                 /* credit for each period of the
2206                                    fairness algorithm - number of bytes in
2207                                    T_FAIR (the protocol share the vn rate) */
2208                                 m_fair_vn.protocol_credit_delta[protocol] =
2209                                         (u32)((vn_min_rate / 8) * t_fair *
2210                                         protocol_min_rate / protocolWeightSum);
2211                 }
2212         } while (0);
2213 #endif
2214
2215         /* Store it to internal memory */
2216         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2217                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2218                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2219                        ((u32 *)(&m_rs_vn))[i]);
2220
2221         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2222                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2224                        ((u32 *)(&m_fair_vn))[i]);
2225 }
2226
2227 /* This function is called upon link interrupt */
2228 static void bnx2x_link_attn(struct bnx2x *bp)
2229 {
2230         int vn;
2231
2232         /* Make sure that we are synced with the current statistics */
2233         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2234
2235         bnx2x_acquire_phy_lock(bp);
2236         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2237         bnx2x_release_phy_lock(bp);
2238
2239         if (bp->link_vars.link_up) {
2240
2241                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2242                         struct host_port_stats *pstats;
2243
2244                         pstats = bnx2x_sp(bp, port_stats);
2245                         /* reset old bmac stats */
2246                         memset(&(pstats->mac_stx[0]), 0,
2247                                sizeof(struct mac_stx));
2248                 }
2249                 if ((bp->state == BNX2X_STATE_OPEN) ||
2250                     (bp->state == BNX2X_STATE_DISABLED))
2251                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2252         }
2253
2254         /* indicate link status */
2255         bnx2x_link_report(bp);
2256
2257         if (IS_E1HMF(bp)) {
2258                 int func;
2259
2260                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2261                         if (vn == BP_E1HVN(bp))
2262                                 continue;
2263
2264                         func = ((vn << 1) | BP_PORT(bp));
2265
2266                         /* Set the attention towards other drivers
2267                            on the same port */
2268                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2269                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2270                 }
2271         }
2272
2273         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2274                 struct cmng_struct_per_port m_cmng_port;
2275                 u32 wsum;
2276                 int port = BP_PORT(bp);
2277
2278                 /* Init RATE SHAPING and FAIRNESS contexts */
2279                 wsum = bnx2x_calc_vn_wsum(bp);
2280                 bnx2x_init_port_minmax(bp, (int)wsum,
2281                                         bp->link_vars.line_speed,
2282                                         &m_cmng_port);
2283                 if (IS_E1HMF(bp))
2284                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2285                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2286                                         wsum, bp->link_vars.line_speed,
2287                                                      &m_cmng_port);
2288         }
2289 }
2290
2291 static void bnx2x__link_status_update(struct bnx2x *bp)
2292 {
2293         if (bp->state != BNX2X_STATE_OPEN)
2294                 return;
2295
2296         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2297
2298         if (bp->link_vars.link_up)
2299                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2300         else
2301                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2302
2303         /* indicate link status */
2304         bnx2x_link_report(bp);
2305 }
2306
2307 static void bnx2x_pmf_update(struct bnx2x *bp)
2308 {
2309         int port = BP_PORT(bp);
2310         u32 val;
2311
2312         bp->port.pmf = 1;
2313         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2314
2315         /* enable nig attention */
2316         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2317         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2318         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2319
2320         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2321 }
2322
2323 /* end of Link */
2324
2325 /* slow path */
2326
2327 /*
2328  * General service functions
2329  */
2330
2331 /* the slow path queue is odd since completions arrive on the fastpath ring */
2332 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2333                          u32 data_hi, u32 data_lo, int common)
2334 {
2335         int func = BP_FUNC(bp);
2336
2337         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2338            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2339            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2340            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2341            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2342
2343 #ifdef BNX2X_STOP_ON_ERROR
2344         if (unlikely(bp->panic))
2345                 return -EIO;
2346 #endif
2347
2348         spin_lock_bh(&bp->spq_lock);
2349
2350         if (!bp->spq_left) {
2351                 BNX2X_ERR("BUG! SPQ ring full!\n");
2352                 spin_unlock_bh(&bp->spq_lock);
2353                 bnx2x_panic();
2354                 return -EBUSY;
2355         }
2356
2357         /* CID needs port number to be encoded int it */
2358         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2359                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2360                                      HW_CID(bp, cid)));
2361         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2362         if (common)
2363                 bp->spq_prod_bd->hdr.type |=
2364                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2365
2366         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2367         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2368
2369         bp->spq_left--;
2370
2371         if (bp->spq_prod_bd == bp->spq_last_bd) {
2372                 bp->spq_prod_bd = bp->spq;
2373                 bp->spq_prod_idx = 0;
2374                 DP(NETIF_MSG_TIMER, "end of spq\n");
2375
2376         } else {
2377                 bp->spq_prod_bd++;
2378                 bp->spq_prod_idx++;
2379         }
2380
2381         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2382                bp->spq_prod_idx);
2383
2384         spin_unlock_bh(&bp->spq_lock);
2385         return 0;
2386 }
2387
2388 /* acquire split MCP access lock register */
2389 static int bnx2x_acquire_alr(struct bnx2x *bp)
2390 {
2391         u32 i, j, val;
2392         int rc = 0;
2393
2394         might_sleep();
2395         i = 100;
2396         for (j = 0; j < i*10; j++) {
2397                 val = (1UL << 31);
2398                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2399                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2400                 if (val & (1L << 31))
2401                         break;
2402
2403                 msleep(5);
2404         }
2405         if (!(val & (1L << 31))) {
2406                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2407                 rc = -EBUSY;
2408         }
2409
2410         return rc;
2411 }
2412
2413 /* release split MCP access lock register */
2414 static void bnx2x_release_alr(struct bnx2x *bp)
2415 {
2416         u32 val = 0;
2417
2418         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2419 }
2420
2421 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2422 {
2423         struct host_def_status_block *def_sb = bp->def_status_blk;
2424         u16 rc = 0;
2425
2426         barrier(); /* status block is written to by the chip */
2427         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2428                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2429                 rc |= 1;
2430         }
2431         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2432                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2433                 rc |= 2;
2434         }
2435         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2436                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2437                 rc |= 4;
2438         }
2439         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2440                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2441                 rc |= 8;
2442         }
2443         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2444                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2445                 rc |= 16;
2446         }
2447         return rc;
2448 }
2449
2450 /*
2451  * slow path service functions
2452  */
2453
2454 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2455 {
2456         int port = BP_PORT(bp);
2457         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2458                        COMMAND_REG_ATTN_BITS_SET);
2459         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2460                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2461         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2462                                        NIG_REG_MASK_INTERRUPT_PORT0;
2463         u32 aeu_mask;
2464
2465         if (bp->attn_state & asserted)
2466                 BNX2X_ERR("IGU ERROR\n");
2467
2468         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469         aeu_mask = REG_RD(bp, aeu_addr);
2470
2471         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2472            aeu_mask, asserted);
2473         aeu_mask &= ~(asserted & 0xff);
2474         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2475
2476         REG_WR(bp, aeu_addr, aeu_mask);
2477         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2478
2479         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2480         bp->attn_state |= asserted;
2481         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2482
2483         if (asserted & ATTN_HARD_WIRED_MASK) {
2484                 if (asserted & ATTN_NIG_FOR_FUNC) {
2485
2486                         /* save nig interrupt mask */
2487                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2488                         REG_WR(bp, nig_int_mask_addr, 0);
2489
2490                         bnx2x_link_attn(bp);
2491
2492                         /* handle unicore attn? */
2493                 }
2494                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2495                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2496
2497                 if (asserted & GPIO_2_FUNC)
2498                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2499
2500                 if (asserted & GPIO_3_FUNC)
2501                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2502
2503                 if (asserted & GPIO_4_FUNC)
2504                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2505
2506                 if (port == 0) {
2507                         if (asserted & ATTN_GENERAL_ATTN_1) {
2508                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2509                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2510                         }
2511                         if (asserted & ATTN_GENERAL_ATTN_2) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2514                         }
2515                         if (asserted & ATTN_GENERAL_ATTN_3) {
2516                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2517                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2518                         }
2519                 } else {
2520                         if (asserted & ATTN_GENERAL_ATTN_4) {
2521                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2522                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2523                         }
2524                         if (asserted & ATTN_GENERAL_ATTN_5) {
2525                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2526                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2527                         }
2528                         if (asserted & ATTN_GENERAL_ATTN_6) {
2529                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2530                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2531                         }
2532                 }
2533
2534         } /* if hardwired */
2535
2536         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2537            asserted, hc_addr);
2538         REG_WR(bp, hc_addr, asserted);
2539
2540         /* now set back the mask */
2541         if (asserted & ATTN_NIG_FOR_FUNC)
2542                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2543 }
2544
2545 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2546 {
2547         int port = BP_PORT(bp);
2548         int reg_offset;
2549         u32 val;
2550
2551         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2552                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2553
2554         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2555
2556                 val = REG_RD(bp, reg_offset);
2557                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2558                 REG_WR(bp, reg_offset, val);
2559
2560                 BNX2X_ERR("SPIO5 hw attention\n");
2561
2562                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2563                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2564                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2565                         /* Fan failure attention */
2566
2567                         /* The PHY reset is controlled by GPIO 1 */
2568                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2569                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2570                         /* Low power mode is controlled by GPIO 2 */
2571                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2572                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2573                         /* mark the failure */
2574                         bp->link_params.ext_phy_config &=
2575                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2576                         bp->link_params.ext_phy_config |=
2577                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2578                         SHMEM_WR(bp,
2579                                  dev_info.port_hw_config[port].
2580                                                         external_phy_config,
2581                                  bp->link_params.ext_phy_config);
2582                         /* log the failure */
2583                         printk(KERN_ERR PFX "Fan Failure on Network"
2584                                " Controller %s has caused the driver to"
2585                                " shutdown the card to prevent permanent"
2586                                " damage.  Please contact Dell Support for"
2587                                " assistance\n", bp->dev->name);
2588                         break;
2589
2590                 default:
2591                         break;
2592                 }
2593         }
2594
2595         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2596
2597                 val = REG_RD(bp, reg_offset);
2598                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2599                 REG_WR(bp, reg_offset, val);
2600
2601                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2602                           (attn & HW_INTERRUT_ASSERT_SET_0));
2603                 bnx2x_panic();
2604         }
2605 }
2606
2607 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2608 {
2609         u32 val;
2610
2611         if (attn & BNX2X_DOORQ_ASSERT) {
2612
2613                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2614                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2615                 /* DORQ discard attention */
2616                 if (val & 0x2)
2617                         BNX2X_ERR("FATAL error from DORQ\n");
2618         }
2619
2620         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2621
2622                 int port = BP_PORT(bp);
2623                 int reg_offset;
2624
2625                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2626                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2627
2628                 val = REG_RD(bp, reg_offset);
2629                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2630                 REG_WR(bp, reg_offset, val);
2631
2632                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2633                           (attn & HW_INTERRUT_ASSERT_SET_1));
2634                 bnx2x_panic();
2635         }
2636 }
2637
2638 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2639 {
2640         u32 val;
2641
2642         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2643
2644                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2645                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2646                 /* CFC error attention */
2647                 if (val & 0x2)
2648                         BNX2X_ERR("FATAL error from CFC\n");
2649         }
2650
2651         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2652
2653                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2654                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2655                 /* RQ_USDMDP_FIFO_OVERFLOW */
2656                 if (val & 0x18000)
2657                         BNX2X_ERR("FATAL error from PXP\n");
2658         }
2659
2660         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2661
2662                 int port = BP_PORT(bp);
2663                 int reg_offset;
2664
2665                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2666                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2667
2668                 val = REG_RD(bp, reg_offset);
2669                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2670                 REG_WR(bp, reg_offset, val);
2671
2672                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2673                           (attn & HW_INTERRUT_ASSERT_SET_2));
2674                 bnx2x_panic();
2675         }
2676 }
2677
2678 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2679 {
2680         u32 val;
2681
2682         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2683
2684                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2685                         int func = BP_FUNC(bp);
2686
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2688                         bnx2x__link_status_update(bp);
2689                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2690                                                         DRV_STATUS_PMF)
2691                                 bnx2x_pmf_update(bp);
2692
2693                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2694
2695                         BNX2X_ERR("MC assert!\n");
2696                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2697                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2698                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2699                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2700                         bnx2x_panic();
2701
2702                 } else if (attn & BNX2X_MCP_ASSERT) {
2703
2704                         BNX2X_ERR("MCP assert!\n");
2705                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2706                         bnx2x_fw_dump(bp);
2707
2708                 } else
2709                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2710         }
2711
2712         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2713                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2714                 if (attn & BNX2X_GRC_TIMEOUT) {
2715                         val = CHIP_IS_E1H(bp) ?
2716                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2717                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2718                 }
2719                 if (attn & BNX2X_GRC_RSV) {
2720                         val = CHIP_IS_E1H(bp) ?
2721                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2722                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2723                 }
2724                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2725         }
2726 }
2727
2728 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2729 {
2730         struct attn_route attn;
2731         struct attn_route group_mask;
2732         int port = BP_PORT(bp);
2733         int index;
2734         u32 reg_addr;
2735         u32 val;
2736         u32 aeu_mask;
2737
2738         /* need to take HW lock because MCP or other port might also
2739            try to handle this event */
2740         bnx2x_acquire_alr(bp);
2741
2742         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2743         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2744         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2745         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2746         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2747            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2748
2749         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2750                 if (deasserted & (1 << index)) {
2751                         group_mask = bp->attn_group[index];
2752
2753                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2754                            index, group_mask.sig[0], group_mask.sig[1],
2755                            group_mask.sig[2], group_mask.sig[3]);
2756
2757                         bnx2x_attn_int_deasserted3(bp,
2758                                         attn.sig[3] & group_mask.sig[3]);
2759                         bnx2x_attn_int_deasserted1(bp,
2760                                         attn.sig[1] & group_mask.sig[1]);
2761                         bnx2x_attn_int_deasserted2(bp,
2762                                         attn.sig[2] & group_mask.sig[2]);
2763                         bnx2x_attn_int_deasserted0(bp,
2764                                         attn.sig[0] & group_mask.sig[0]);
2765
2766                         if ((attn.sig[0] & group_mask.sig[0] &
2767                                                 HW_PRTY_ASSERT_SET_0) ||
2768                             (attn.sig[1] & group_mask.sig[1] &
2769                                                 HW_PRTY_ASSERT_SET_1) ||
2770                             (attn.sig[2] & group_mask.sig[2] &
2771                                                 HW_PRTY_ASSERT_SET_2))
2772                                 BNX2X_ERR("FATAL HW block parity attention\n");
2773                 }
2774         }
2775
2776         bnx2x_release_alr(bp);
2777
2778         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2779
2780         val = ~deasserted;
2781         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2782            val, reg_addr);
2783         REG_WR(bp, reg_addr, val);
2784
2785         if (~bp->attn_state & deasserted)
2786                 BNX2X_ERR("IGU ERROR\n");
2787
2788         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2789                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2790
2791         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792         aeu_mask = REG_RD(bp, reg_addr);
2793
2794         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2795            aeu_mask, deasserted);
2796         aeu_mask |= (deasserted & 0xff);
2797         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2798
2799         REG_WR(bp, reg_addr, aeu_mask);
2800         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2801
2802         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2803         bp->attn_state &= ~deasserted;
2804         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2805 }
2806
2807 static void bnx2x_attn_int(struct bnx2x *bp)
2808 {
2809         /* read local copy of bits */
2810         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2811         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2812         u32 attn_state = bp->attn_state;
2813
2814         /* look for changed bits */
2815         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2816         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2817
2818         DP(NETIF_MSG_HW,
2819            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2820            attn_bits, attn_ack, asserted, deasserted);
2821
2822         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2823                 BNX2X_ERR("BAD attention state\n");
2824
2825         /* handle bits that were raised */
2826         if (asserted)
2827                 bnx2x_attn_int_asserted(bp, asserted);
2828
2829         if (deasserted)
2830                 bnx2x_attn_int_deasserted(bp, deasserted);
2831 }
2832
2833 static void bnx2x_sp_task(struct work_struct *work)
2834 {
2835         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2836         u16 status;
2837
2838
2839         /* Return here if interrupt is disabled */
2840         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842                 return;
2843         }
2844
2845         status = bnx2x_update_dsb_idx(bp);
2846 /*      if (status == 0)                                     */
2847 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2848
2849         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2850
2851         /* HW attentions */
2852         if (status & 0x1)
2853                 bnx2x_attn_int(bp);
2854
2855         /* CStorm events: query_stats, port delete ramrod */
2856         if (status & 0x2)
2857                 bp->stats_pending = 0;
2858
2859         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2860                      IGU_INT_NOP, 1);
2861         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2862                      IGU_INT_NOP, 1);
2863         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2864                      IGU_INT_NOP, 1);
2865         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2866                      IGU_INT_NOP, 1);
2867         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2868                      IGU_INT_ENABLE, 1);
2869
2870 }
2871
2872 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2873 {
2874         struct net_device *dev = dev_instance;
2875         struct bnx2x *bp = netdev_priv(dev);
2876
2877         /* Return here if interrupt is disabled */
2878         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2879                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2880                 return IRQ_HANDLED;
2881         }
2882
2883         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2884
2885 #ifdef BNX2X_STOP_ON_ERROR
2886         if (unlikely(bp->panic))
2887                 return IRQ_HANDLED;
2888 #endif
2889
2890         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2891
2892         return IRQ_HANDLED;
2893 }
2894
2895 /* end of slow path */
2896
2897 /* Statistics */
2898
2899 /****************************************************************************
2900 * Macros
2901 ****************************************************************************/
2902
2903 /* sum[hi:lo] += add[hi:lo] */
2904 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2905         do { \
2906                 s_lo += a_lo; \
2907                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2908         } while (0)
2909
2910 /* difference = minuend - subtrahend */
2911 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2912         do { \
2913                 if (m_lo < s_lo) { \
2914                         /* underflow */ \
2915                         d_hi = m_hi - s_hi; \
2916                         if (d_hi > 0) { \
2917                                 /* we can 'loan' 1 */ \
2918                                 d_hi--; \
2919                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2920                         } else { \
2921                                 /* m_hi <= s_hi */ \
2922                                 d_hi = 0; \
2923                                 d_lo = 0; \
2924                         } \
2925                 } else { \
2926                         /* m_lo >= s_lo */ \
2927                         if (m_hi < s_hi) { \
2928                                 d_hi = 0; \
2929                                 d_lo = 0; \
2930                         } else { \
2931                                 /* m_hi >= s_hi */ \
2932                                 d_hi = m_hi - s_hi; \
2933                                 d_lo = m_lo - s_lo; \
2934                         } \
2935                 } \
2936         } while (0)
2937
2938 #define UPDATE_STAT64(s, t) \
2939         do { \
2940                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2941                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2942                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2943                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2944                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2945                        pstats->mac_stx[1].t##_lo, diff.lo); \
2946         } while (0)
2947
2948 #define UPDATE_STAT64_NIG(s, t) \
2949         do { \
2950                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2951                         diff.lo, new->s##_lo, old->s##_lo); \
2952                 ADD_64(estats->t##_hi, diff.hi, \
2953                        estats->t##_lo, diff.lo); \
2954         } while (0)
2955
2956 /* sum[hi:lo] += add */
2957 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2958         do { \
2959                 s_lo += a; \
2960                 s_hi += (s_lo < a) ? 1 : 0; \
2961         } while (0)
2962
2963 #define UPDATE_EXTEND_STAT(s) \
2964         do { \
2965                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2966                               pstats->mac_stx[1].s##_lo, \
2967                               new->s); \
2968         } while (0)
2969
2970 #define UPDATE_EXTEND_TSTAT(s, t) \
2971         do { \
2972                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2973                 old_tclient->s = le32_to_cpu(tclient->s); \
2974                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2975         } while (0)
2976
2977 #define UPDATE_EXTEND_XSTAT(s, t) \
2978         do { \
2979                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2980                 old_xclient->s = le32_to_cpu(xclient->s); \
2981                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2982         } while (0)
2983
2984 /*
2985  * General service functions
2986  */
2987
2988 static inline long bnx2x_hilo(u32 *hiref)
2989 {
2990         u32 lo = *(hiref + 1);
2991 #if (BITS_PER_LONG == 64)
2992         u32 hi = *hiref;
2993
2994         return HILO_U64(hi, lo);
2995 #else
2996         return lo;
2997 #endif
2998 }
2999
3000 /*
3001  * Init service functions
3002  */
3003
3004 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3005 {
3006         if (!bp->stats_pending) {
3007                 struct eth_query_ramrod_data ramrod_data = {0};
3008                 int rc;
3009
3010                 ramrod_data.drv_counter = bp->stats_counter++;
3011                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3012                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3013
3014                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3015                                    ((u32 *)&ramrod_data)[1],
3016                                    ((u32 *)&ramrod_data)[0], 0);
3017                 if (rc == 0) {
3018                         /* stats ramrod has it's own slot on the spq */
3019                         bp->spq_left++;
3020                         bp->stats_pending = 1;
3021                 }
3022         }
3023 }
3024
3025 static void bnx2x_stats_init(struct bnx2x *bp)
3026 {
3027         int port = BP_PORT(bp);
3028
3029         bp->executer_idx = 0;
3030         bp->stats_counter = 0;
3031
3032         /* port stats */
3033         if (!BP_NOMCP(bp))
3034                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3035         else
3036                 bp->port.port_stx = 0;
3037         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3038
3039         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3040         bp->port.old_nig_stats.brb_discard =
3041                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3042         bp->port.old_nig_stats.brb_truncate =
3043                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3044         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3045                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3046         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3047                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3048
3049         /* function stats */
3050         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3051         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3052         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3053         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3054
3055         bp->stats_state = STATS_STATE_DISABLED;
3056         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3057                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3058 }
3059
3060 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3061 {
3062         struct dmae_command *dmae = &bp->stats_dmae;
3063         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3064
3065         *stats_comp = DMAE_COMP_VAL;
3066
3067         /* loader */
3068         if (bp->executer_idx) {
3069                 int loader_idx = PMF_DMAE_C(bp);
3070
3071                 memset(dmae, 0, sizeof(struct dmae_command));
3072
3073                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3074                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3075                                 DMAE_CMD_DST_RESET |
3076 #ifdef __BIG_ENDIAN
3077                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3078 #else
3079                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3080 #endif
3081                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3082                                                DMAE_CMD_PORT_0) |
3083                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3084                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3085                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3086                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3087                                      sizeof(struct dmae_command) *
3088                                      (loader_idx + 1)) >> 2;
3089                 dmae->dst_addr_hi = 0;
3090                 dmae->len = sizeof(struct dmae_command) >> 2;
3091                 if (CHIP_IS_E1(bp))
3092                         dmae->len--;
3093                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3094                 dmae->comp_addr_hi = 0;
3095                 dmae->comp_val = 1;
3096
3097                 *stats_comp = 0;
3098                 bnx2x_post_dmae(bp, dmae, loader_idx);
3099
3100         } else if (bp->func_stx) {
3101                 *stats_comp = 0;
3102                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3103         }
3104 }
3105
3106 static int bnx2x_stats_comp(struct bnx2x *bp)
3107 {
3108         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109         int cnt = 10;
3110
3111         might_sleep();
3112         while (*stats_comp != DMAE_COMP_VAL) {
3113                 if (!cnt) {
3114                         BNX2X_ERR("timeout waiting for stats finished\n");
3115                         break;
3116                 }
3117                 cnt--;
3118                 msleep(1);
3119         }
3120         return 1;
3121 }
3122
3123 /*
3124  * Statistics service functions
3125  */
3126
3127 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3128 {
3129         struct dmae_command *dmae;
3130         u32 opcode;
3131         int loader_idx = PMF_DMAE_C(bp);
3132         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3133
3134         /* sanity */
3135         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3136                 BNX2X_ERR("BUG!\n");
3137                 return;
3138         }
3139
3140         bp->executer_idx = 0;
3141
3142         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3143                   DMAE_CMD_C_ENABLE |
3144                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3145 #ifdef __BIG_ENDIAN
3146                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3147 #else
3148                   DMAE_CMD_ENDIANITY_DW_SWAP |
3149 #endif
3150                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3151                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3152
3153         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3155         dmae->src_addr_lo = bp->port.port_stx >> 2;
3156         dmae->src_addr_hi = 0;
3157         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3158         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3159         dmae->len = DMAE_LEN32_RD_MAX;
3160         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3161         dmae->comp_addr_hi = 0;
3162         dmae->comp_val = 1;
3163
3164         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3165         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3166         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3167         dmae->src_addr_hi = 0;
3168         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3169                                    DMAE_LEN32_RD_MAX * 4);
3170         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3171                                    DMAE_LEN32_RD_MAX * 4);
3172         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3173         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3174         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3175         dmae->comp_val = DMAE_COMP_VAL;
3176
3177         *stats_comp = 0;
3178         bnx2x_hw_stats_post(bp);
3179         bnx2x_stats_comp(bp);
3180 }
3181
3182 static void bnx2x_port_stats_init(struct bnx2x *bp)
3183 {
3184         struct dmae_command *dmae;
3185         int port = BP_PORT(bp);
3186         int vn = BP_E1HVN(bp);
3187         u32 opcode;
3188         int loader_idx = PMF_DMAE_C(bp);
3189         u32 mac_addr;
3190         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3191
3192         /* sanity */
3193         if (!bp->link_vars.link_up || !bp->port.pmf) {
3194                 BNX2X_ERR("BUG!\n");
3195                 return;
3196         }
3197
3198         bp->executer_idx = 0;
3199
3200         /* MCP */
3201         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3202                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3203                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3204 #ifdef __BIG_ENDIAN
3205                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3206 #else
3207                   DMAE_CMD_ENDIANITY_DW_SWAP |
3208 #endif
3209                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3210                   (vn << DMAE_CMD_E1HVN_SHIFT));
3211
3212         if (bp->port.port_stx) {
3213
3214                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3215                 dmae->opcode = opcode;
3216                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3217                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3218                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3219                 dmae->dst_addr_hi = 0;
3220                 dmae->len = sizeof(struct host_port_stats) >> 2;
3221                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3222                 dmae->comp_addr_hi = 0;
3223                 dmae->comp_val = 1;
3224         }
3225
3226         if (bp->func_stx) {
3227
3228                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3229                 dmae->opcode = opcode;
3230                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3231                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3232                 dmae->dst_addr_lo = bp->func_stx >> 2;
3233                 dmae->dst_addr_hi = 0;
3234                 dmae->len = sizeof(struct host_func_stats) >> 2;
3235                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3236                 dmae->comp_addr_hi = 0;
3237                 dmae->comp_val = 1;
3238         }
3239
3240         /* MAC */
3241         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3242                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3243                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3244 #ifdef __BIG_ENDIAN
3245                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3246 #else
3247                   DMAE_CMD_ENDIANITY_DW_SWAP |
3248 #endif
3249                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3250                   (vn << DMAE_CMD_E1HVN_SHIFT));
3251
3252         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3253
3254                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3255                                    NIG_REG_INGRESS_BMAC0_MEM);
3256
3257                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3258                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3259                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260                 dmae->opcode = opcode;
3261                 dmae->src_addr_lo = (mac_addr +
3262                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3263                 dmae->src_addr_hi = 0;
3264                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3265                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3266                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3267                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3268                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269                 dmae->comp_addr_hi = 0;
3270                 dmae->comp_val = 1;
3271
3272                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3273                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3274                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275                 dmae->opcode = opcode;
3276                 dmae->src_addr_lo = (mac_addr +
3277                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3278                 dmae->src_addr_hi = 0;
3279                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3280                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3281                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3282                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3283                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3284                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3285                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3286                 dmae->comp_addr_hi = 0;
3287                 dmae->comp_val = 1;
3288
3289         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3290
3291                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3292
3293                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3294                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295                 dmae->opcode = opcode;
3296                 dmae->src_addr_lo = (mac_addr +
3297                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3298                 dmae->src_addr_hi = 0;
3299                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3302                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303                 dmae->comp_addr_hi = 0;
3304                 dmae->comp_val = 1;
3305
3306                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3307                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308                 dmae->opcode = opcode;
3309                 dmae->src_addr_lo = (mac_addr +
3310                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3311                 dmae->src_addr_hi = 0;
3312                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3313                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3314                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3315                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3316                 dmae->len = 1;
3317                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3318                 dmae->comp_addr_hi = 0;
3319                 dmae->comp_val = 1;
3320
3321                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3322                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323                 dmae->opcode = opcode;
3324                 dmae->src_addr_lo = (mac_addr +
3325                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3326                 dmae->src_addr_hi = 0;
3327                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3328                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3329                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3330                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3331                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3332                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3333                 dmae->comp_addr_hi = 0;
3334                 dmae->comp_val = 1;
3335         }
3336
3337         /* NIG */
3338         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339         dmae->opcode = opcode;
3340         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3341                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3342         dmae->src_addr_hi = 0;
3343         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3344         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3345         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3346         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3347         dmae->comp_addr_hi = 0;
3348         dmae->comp_val = 1;
3349
3350         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3351         dmae->opcode = opcode;
3352         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3353                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3354         dmae->src_addr_hi = 0;
3355         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3356                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3357         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3358                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3359         dmae->len = (2*sizeof(u32)) >> 2;
3360         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361         dmae->comp_addr_hi = 0;
3362         dmae->comp_val = 1;
3363
3364         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3365         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3366                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3367                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3368 #ifdef __BIG_ENDIAN
3369                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3370 #else
3371                         DMAE_CMD_ENDIANITY_DW_SWAP |
3372 #endif
3373                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3374                         (vn << DMAE_CMD_E1HVN_SHIFT));
3375         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3376                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3377         dmae->src_addr_hi = 0;
3378         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3379                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3380         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3381                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3382         dmae->len = (2*sizeof(u32)) >> 2;
3383         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3384         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3385         dmae->comp_val = DMAE_COMP_VAL;
3386
3387         *stats_comp = 0;
3388 }
3389
3390 static void bnx2x_func_stats_init(struct bnx2x *bp)
3391 {
3392         struct dmae_command *dmae = &bp->stats_dmae;
3393         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3394
3395         /* sanity */
3396         if (!bp->func_stx) {
3397                 BNX2X_ERR("BUG!\n");
3398                 return;
3399         }
3400
3401         bp->executer_idx = 0;
3402         memset(dmae, 0, sizeof(struct dmae_command));
3403
3404         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3405                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3406                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3407 #ifdef __BIG_ENDIAN
3408                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3409 #else
3410                         DMAE_CMD_ENDIANITY_DW_SWAP |
3411 #endif
3412                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3413                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3414         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3415         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3416         dmae->dst_addr_lo = bp->func_stx >> 2;
3417         dmae->dst_addr_hi = 0;
3418         dmae->len = sizeof(struct host_func_stats) >> 2;
3419         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3420         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3421         dmae->comp_val = DMAE_COMP_VAL;
3422
3423         *stats_comp = 0;
3424 }
3425
3426 static void bnx2x_stats_start(struct bnx2x *bp)
3427 {
3428         if (bp->port.pmf)
3429                 bnx2x_port_stats_init(bp);
3430
3431         else if (bp->func_stx)
3432                 bnx2x_func_stats_init(bp);
3433
3434         bnx2x_hw_stats_post(bp);
3435         bnx2x_storm_stats_post(bp);
3436 }
3437
3438 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3439 {
3440         bnx2x_stats_comp(bp);
3441         bnx2x_stats_pmf_update(bp);
3442         bnx2x_stats_start(bp);
3443 }
3444
3445 static void bnx2x_stats_restart(struct bnx2x *bp)
3446 {
3447         bnx2x_stats_comp(bp);
3448         bnx2x_stats_start(bp);
3449 }
3450
3451 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3452 {
3453         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3454         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3455         struct regpair diff;
3456
3457         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3458         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3459         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3460         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3461         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3462         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3463         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3464         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3465         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3466         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3467         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3468         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3469         UPDATE_STAT64(tx_stat_gt127,
3470                                 tx_stat_etherstatspkts65octetsto127octets);
3471         UPDATE_STAT64(tx_stat_gt255,
3472                                 tx_stat_etherstatspkts128octetsto255octets);
3473         UPDATE_STAT64(tx_stat_gt511,
3474                                 tx_stat_etherstatspkts256octetsto511octets);
3475         UPDATE_STAT64(tx_stat_gt1023,
3476                                 tx_stat_etherstatspkts512octetsto1023octets);
3477         UPDATE_STAT64(tx_stat_gt1518,
3478                                 tx_stat_etherstatspkts1024octetsto1522octets);
3479         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3480         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3481         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3482         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3483         UPDATE_STAT64(tx_stat_gterr,
3484                                 tx_stat_dot3statsinternalmactransmiterrors);
3485         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3486 }
3487
3488 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3489 {
3490         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3491         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3492
3493         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3494         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3495         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3496         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3497         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3498         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3499         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3500         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3501         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3502         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3503         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3504         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3505         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3506         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3507         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3508         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3509         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3511         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3513         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3514         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3515         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3516         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3517         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3518         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3519         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3520         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3521         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3522         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3523         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3524 }
3525
3526 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3527 {
3528         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3529         struct nig_stats *old = &(bp->port.old_nig_stats);
3530         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3531         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3532         struct regpair diff;
3533
3534         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3535                 bnx2x_bmac_stats_update(bp);
3536
3537         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3538                 bnx2x_emac_stats_update(bp);
3539
3540         else { /* unreached */
3541                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3542                 return -1;
3543         }
3544
3545         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3546                       new->brb_discard - old->brb_discard);
3547         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3548                       new->brb_truncate - old->brb_truncate);
3549
3550         UPDATE_STAT64_NIG(egress_mac_pkt0,
3551                                         etherstatspkts1024octetsto1522octets);
3552         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3553
3554         memcpy(old, new, sizeof(struct nig_stats));
3555
3556         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3557                sizeof(struct mac_stx));
3558         estats->brb_drop_hi = pstats->brb_drop_hi;
3559         estats->brb_drop_lo = pstats->brb_drop_lo;
3560
3561         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3562
3563         return 0;
3564 }
3565
3566 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3567 {
3568         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3569         int cl_id = BP_CL_ID(bp);
3570         struct tstorm_per_port_stats *tport =
3571                                 &stats->tstorm_common.port_statistics;
3572         struct tstorm_per_client_stats *tclient =
3573                         &stats->tstorm_common.client_statistics[cl_id];
3574         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3575         struct xstorm_per_client_stats *xclient =
3576                         &stats->xstorm_common.client_statistics[cl_id];
3577         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3578         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3579         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3580         u32 diff;
3581
3582         /* are storm stats valid? */
3583         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3584                                                         bp->stats_counter) {
3585                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3586                    "  tstorm counter (%d) != stats_counter (%d)\n",
3587                    tclient->stats_counter, bp->stats_counter);
3588                 return -1;
3589         }
3590         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3591                                                         bp->stats_counter) {
3592                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3593                    "  xstorm counter (%d) != stats_counter (%d)\n",
3594                    xclient->stats_counter, bp->stats_counter);
3595                 return -2;
3596         }
3597
3598         fstats->total_bytes_received_hi =
3599         fstats->valid_bytes_received_hi =
3600                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3601         fstats->total_bytes_received_lo =
3602         fstats->valid_bytes_received_lo =
3603                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3604
3605         estats->error_bytes_received_hi =
3606                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3607         estats->error_bytes_received_lo =
3608                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3609         ADD_64(estats->error_bytes_received_hi,
3610                estats->rx_stat_ifhcinbadoctets_hi,
3611                estats->error_bytes_received_lo,
3612                estats->rx_stat_ifhcinbadoctets_lo);
3613
3614         ADD_64(fstats->total_bytes_received_hi,
3615                estats->error_bytes_received_hi,
3616                fstats->total_bytes_received_lo,
3617                estats->error_bytes_received_lo);
3618
3619         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3620         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3621                                 total_multicast_packets_received);
3622         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3623                                 total_broadcast_packets_received);
3624
3625         fstats->total_bytes_transmitted_hi =
3626                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3627         fstats->total_bytes_transmitted_lo =
3628                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3629
3630         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3631                                 total_unicast_packets_transmitted);
3632         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3633                                 total_multicast_packets_transmitted);
3634         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3635                                 total_broadcast_packets_transmitted);
3636
3637         memcpy(estats, &(fstats->total_bytes_received_hi),
3638                sizeof(struct host_func_stats) - 2*sizeof(u32));
3639
3640         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3641         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3642         estats->brb_truncate_discard =
3643                                 le32_to_cpu(tport->brb_truncate_discard);
3644         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3645
3646         old_tclient->rcv_unicast_bytes.hi =
3647                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3648         old_tclient->rcv_unicast_bytes.lo =
3649                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3650         old_tclient->rcv_broadcast_bytes.hi =
3651                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3652         old_tclient->rcv_broadcast_bytes.lo =
3653                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3654         old_tclient->rcv_multicast_bytes.hi =
3655                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3656         old_tclient->rcv_multicast_bytes.lo =
3657                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3658         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3659
3660         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3661         old_tclient->packets_too_big_discard =
3662                                 le32_to_cpu(tclient->packets_too_big_discard);
3663         estats->no_buff_discard =
3664         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3665         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3666
3667         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3668         old_xclient->unicast_bytes_sent.hi =
3669                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3670         old_xclient->unicast_bytes_sent.lo =
3671                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3672         old_xclient->multicast_bytes_sent.hi =
3673                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3674         old_xclient->multicast_bytes_sent.lo =
3675                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3676         old_xclient->broadcast_bytes_sent.hi =
3677                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3678         old_xclient->broadcast_bytes_sent.lo =
3679                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3680
3681         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3682
3683         return 0;
3684 }
3685
3686 static void bnx2x_net_stats_update(struct bnx2x *bp)
3687 {
3688         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3689         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3690         struct net_device_stats *nstats = &bp->dev->stats;
3691
3692         nstats->rx_packets =
3693                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3694                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3695                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3696
3697         nstats->tx_packets =
3698                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3699                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3700                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3701
3702         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3703
3704         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3705
3706         nstats->rx_dropped = old_tclient->checksum_discard +
3707                              estats->mac_discard;
3708         nstats->tx_dropped = 0;
3709
3710         nstats->multicast =
3711                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3712
3713         nstats->collisions =
3714                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3715                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3716                         estats->tx_stat_dot3statslatecollisions_lo +
3717                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3718
3719         estats->jabber_packets_received =
3720                                 old_tclient->packets_too_big_discard +
3721                                 estats->rx_stat_dot3statsframestoolong_lo;
3722
3723         nstats->rx_length_errors =
3724                                 estats->rx_stat_etherstatsundersizepkts_lo +
3725                                 estats->jabber_packets_received;
3726         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3727         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3728         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3729         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3730         nstats->rx_missed_errors = estats->xxoverflow_discard;
3731
3732         nstats->rx_errors = nstats->rx_length_errors +
3733                             nstats->rx_over_errors +
3734                             nstats->rx_crc_errors +
3735                             nstats->rx_frame_errors +
3736                             nstats->rx_fifo_errors +
3737                             nstats->rx_missed_errors;
3738
3739         nstats->tx_aborted_errors =
3740                         estats->tx_stat_dot3statslatecollisions_lo +
3741                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3742         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3743         nstats->tx_fifo_errors = 0;
3744         nstats->tx_heartbeat_errors = 0;
3745         nstats->tx_window_errors = 0;
3746
3747         nstats->tx_errors = nstats->tx_aborted_errors +
3748                             nstats->tx_carrier_errors;
3749 }
3750
3751 static void bnx2x_stats_update(struct bnx2x *bp)
3752 {
3753         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3754         int update = 0;
3755
3756         if (*stats_comp != DMAE_COMP_VAL)
3757                 return;
3758
3759         if (bp->port.pmf)
3760                 update = (bnx2x_hw_stats_update(bp) == 0);
3761
3762         update |= (bnx2x_storm_stats_update(bp) == 0);
3763
3764         if (update)
3765                 bnx2x_net_stats_update(bp);
3766
3767         else {
3768                 if (bp->stats_pending) {
3769                         bp->stats_pending++;
3770                         if (bp->stats_pending == 3) {
3771                                 BNX2X_ERR("stats not updated for 3 times\n");
3772                                 bnx2x_panic();
3773                                 return;
3774                         }
3775                 }
3776         }
3777
3778         if (bp->msglevel & NETIF_MSG_TIMER) {
3779                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3780                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3781                 struct net_device_stats *nstats = &bp->dev->stats;
3782                 int i;
3783
3784                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3785                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3786                                   "  tx pkt (%lx)\n",
3787                        bnx2x_tx_avail(bp->fp),
3788                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3789                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3790                                   "  rx pkt (%lx)\n",
3791                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3792                              bp->fp->rx_comp_cons),
3793                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3794                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3795                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3796                        estats->driver_xoff, estats->brb_drop_lo);
3797                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3798                         "packets_too_big_discard %u  no_buff_discard %u  "
3799                         "mac_discard %u  mac_filter_discard %u  "
3800                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3801                         "ttl0_discard %u\n",
3802                        old_tclient->checksum_discard,
3803                        old_tclient->packets_too_big_discard,
3804                        old_tclient->no_buff_discard, estats->mac_discard,
3805                        estats->mac_filter_discard, estats->xxoverflow_discard,
3806                        estats->brb_truncate_discard,
3807                        old_tclient->ttl0_discard);
3808
3809                 for_each_queue(bp, i) {
3810                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3811                                bnx2x_fp(bp, i, tx_pkt),
3812                                bnx2x_fp(bp, i, rx_pkt),
3813                                bnx2x_fp(bp, i, rx_calls));
3814                 }
3815         }
3816
3817         bnx2x_hw_stats_post(bp);
3818         bnx2x_storm_stats_post(bp);
3819 }
3820
3821 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3822 {
3823         struct dmae_command *dmae;
3824         u32 opcode;
3825         int loader_idx = PMF_DMAE_C(bp);
3826         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3827
3828         bp->executer_idx = 0;
3829
3830         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3831                   DMAE_CMD_C_ENABLE |
3832                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3833 #ifdef __BIG_ENDIAN
3834                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3835 #else
3836                   DMAE_CMD_ENDIANITY_DW_SWAP |
3837 #endif
3838                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3839                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3840
3841         if (bp->port.port_stx) {
3842
3843                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3844                 if (bp->func_stx)
3845                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3846                 else
3847                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3848                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3849                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3850                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3851                 dmae->dst_addr_hi = 0;
3852                 dmae->len = sizeof(struct host_port_stats) >> 2;
3853                 if (bp->func_stx) {
3854                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3855                         dmae->comp_addr_hi = 0;
3856                         dmae->comp_val = 1;
3857                 } else {
3858                         dmae->comp_addr_lo =
3859                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860                         dmae->comp_addr_hi =
3861                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3862                         dmae->comp_val = DMAE_COMP_VAL;
3863
3864                         *stats_comp = 0;
3865                 }
3866         }
3867
3868         if (bp->func_stx) {
3869
3870                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3872                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3873                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3874                 dmae->dst_addr_lo = bp->func_stx >> 2;
3875                 dmae->dst_addr_hi = 0;
3876                 dmae->len = sizeof(struct host_func_stats) >> 2;
3877                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3878                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3879                 dmae->comp_val = DMAE_COMP_VAL;
3880
3881                 *stats_comp = 0;
3882         }
3883 }
3884
3885 static void bnx2x_stats_stop(struct bnx2x *bp)
3886 {
3887         int update = 0;
3888
3889         bnx2x_stats_comp(bp);
3890
3891         if (bp->port.pmf)
3892                 update = (bnx2x_hw_stats_update(bp) == 0);
3893
3894         update |= (bnx2x_storm_stats_update(bp) == 0);
3895
3896         if (update) {
3897                 bnx2x_net_stats_update(bp);
3898
3899                 if (bp->port.pmf)
3900                         bnx2x_port_stats_stop(bp);
3901
3902                 bnx2x_hw_stats_post(bp);
3903                 bnx2x_stats_comp(bp);
3904         }
3905 }
3906
3907 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3908 {
3909 }
3910
3911 static const struct {
3912         void (*action)(struct bnx2x *bp);
3913         enum bnx2x_stats_state next_state;
3914 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3915 /* state        event   */
3916 {
3917 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3918 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3919 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3920 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3921 },
3922 {
3923 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3924 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3925 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3926 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3927 }
3928 };
3929
3930 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3931 {
3932         enum bnx2x_stats_state state = bp->stats_state;
3933
3934         bnx2x_stats_stm[state][event].action(bp);
3935         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3936
3937         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3938                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3939                    state, event, bp->stats_state);
3940 }
3941
3942 static void bnx2x_timer(unsigned long data)
3943 {
3944         struct bnx2x *bp = (struct bnx2x *) data;
3945
3946         if (!netif_running(bp->dev))
3947                 return;
3948
3949         if (atomic_read(&bp->intr_sem) != 0)
3950                 goto timer_restart;
3951
3952         if (poll) {
3953                 struct bnx2x_fastpath *fp = &bp->fp[0];
3954                 int rc;
3955
3956                 bnx2x_tx_int(fp, 1000);
3957                 rc = bnx2x_rx_int(fp, 1000);
3958         }
3959
3960         if (!BP_NOMCP(bp)) {
3961                 int func = BP_FUNC(bp);
3962                 u32 drv_pulse;
3963                 u32 mcp_pulse;
3964
3965                 ++bp->fw_drv_pulse_wr_seq;
3966                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3967                 /* TBD - add SYSTEM_TIME */
3968                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3969                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3970
3971                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3972                              MCP_PULSE_SEQ_MASK);
3973                 /* The delta between driver pulse and mcp response
3974                  * should be 1 (before mcp response) or 0 (after mcp response)
3975                  */
3976                 if ((drv_pulse != mcp_pulse) &&
3977                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3978                         /* someone lost a heartbeat... */
3979                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3980                                   drv_pulse, mcp_pulse);
3981                 }
3982         }
3983
3984         if ((bp->state == BNX2X_STATE_OPEN) ||
3985             (bp->state == BNX2X_STATE_DISABLED))
3986                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3987
3988 timer_restart:
3989         mod_timer(&bp->timer, jiffies + bp->current_interval);
3990 }
3991
3992 /* end of Statistics */
3993
3994 /* nic init */
3995
3996 /*
3997  * nic init service functions
3998  */
3999
4000 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4001 {
4002         int port = BP_PORT(bp);
4003
4004         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4005                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4006                         sizeof(struct ustorm_status_block)/4);
4007         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4008                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4009                         sizeof(struct cstorm_status_block)/4);
4010 }
4011
4012 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4013                           dma_addr_t mapping, int sb_id)
4014 {
4015         int port = BP_PORT(bp);
4016         int func = BP_FUNC(bp);
4017         int index;
4018         u64 section;
4019
4020         /* USTORM */
4021         section = ((u64)mapping) + offsetof(struct host_status_block,
4022                                             u_status_block);
4023         sb->u_status_block.status_block_id = sb_id;
4024
4025         REG_WR(bp, BAR_USTRORM_INTMEM +
4026                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4027         REG_WR(bp, BAR_USTRORM_INTMEM +
4028                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4029                U64_HI(section));
4030         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4031                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4032
4033         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4034                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4035                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4036
4037         /* CSTORM */
4038         section = ((u64)mapping) + offsetof(struct host_status_block,
4039                                             c_status_block);
4040         sb->c_status_block.status_block_id = sb_id;
4041
4042         REG_WR(bp, BAR_CSTRORM_INTMEM +
4043                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4044         REG_WR(bp, BAR_CSTRORM_INTMEM +
4045                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4046                U64_HI(section));
4047         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4048                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4049
4050         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4051                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4052                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4053
4054         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4055 }
4056
4057 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4058 {
4059         int func = BP_FUNC(bp);
4060
4061         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4062                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4063                         sizeof(struct ustorm_def_status_block)/4);
4064         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4065                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4066                         sizeof(struct cstorm_def_status_block)/4);
4067         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4068                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4069                         sizeof(struct xstorm_def_status_block)/4);
4070         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4071                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4072                         sizeof(struct tstorm_def_status_block)/4);
4073 }
4074
4075 static void bnx2x_init_def_sb(struct bnx2x *bp,
4076                               struct host_def_status_block *def_sb,
4077                               dma_addr_t mapping, int sb_id)
4078 {
4079         int port = BP_PORT(bp);
4080         int func = BP_FUNC(bp);
4081         int index, val, reg_offset;
4082         u64 section;
4083
4084         /* ATTN */
4085         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4086                                             atten_status_block);
4087         def_sb->atten_status_block.status_block_id = sb_id;
4088
4089         bp->attn_state = 0;
4090
4091         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4092                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4093
4094         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4095                 bp->attn_group[index].sig[0] = REG_RD(bp,
4096                                                      reg_offset + 0x10*index);
4097                 bp->attn_group[index].sig[1] = REG_RD(bp,
4098                                                reg_offset + 0x4 + 0x10*index);
4099                 bp->attn_group[index].sig[2] = REG_RD(bp,
4100                                                reg_offset + 0x8 + 0x10*index);
4101                 bp->attn_group[index].sig[3] = REG_RD(bp,
4102                                                reg_offset + 0xc + 0x10*index);
4103         }
4104
4105         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4106                              HC_REG_ATTN_MSG0_ADDR_L);
4107
4108         REG_WR(bp, reg_offset, U64_LO(section));
4109         REG_WR(bp, reg_offset + 4, U64_HI(section));
4110
4111         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4112
4113         val = REG_RD(bp, reg_offset);
4114         val |= sb_id;
4115         REG_WR(bp, reg_offset, val);
4116
4117         /* USTORM */
4118         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4119                                             u_def_status_block);
4120         def_sb->u_def_status_block.status_block_id = sb_id;
4121
4122         REG_WR(bp, BAR_USTRORM_INTMEM +
4123                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4124         REG_WR(bp, BAR_USTRORM_INTMEM +
4125                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4126                U64_HI(section));
4127         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4128                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4129
4130         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4131                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4132                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4133
4134         /* CSTORM */
4135         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4136                                             c_def_status_block);
4137         def_sb->c_def_status_block.status_block_id = sb_id;
4138
4139         REG_WR(bp, BAR_CSTRORM_INTMEM +
4140                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4141         REG_WR(bp, BAR_CSTRORM_INTMEM +
4142                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4143                U64_HI(section));
4144         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4145                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4146
4147         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4148                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4149                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4150
4151         /* TSTORM */
4152         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153                                             t_def_status_block);
4154         def_sb->t_def_status_block.status_block_id = sb_id;
4155
4156         REG_WR(bp, BAR_TSTRORM_INTMEM +
4157                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4158         REG_WR(bp, BAR_TSTRORM_INTMEM +
4159                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4160                U64_HI(section));
4161         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4162                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163
4164         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4165                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4166                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4167
4168         /* XSTORM */
4169         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170                                             x_def_status_block);
4171         def_sb->x_def_status_block.status_block_id = sb_id;
4172
4173         REG_WR(bp, BAR_XSTRORM_INTMEM +
4174                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4175         REG_WR(bp, BAR_XSTRORM_INTMEM +
4176                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4177                U64_HI(section));
4178         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4179                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4180
4181         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4182                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4183                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4184
4185         bp->stats_pending = 0;
4186         bp->set_mac_pending = 0;
4187
4188         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4189 }
4190
4191 static void bnx2x_update_coalesce(struct bnx2x *bp)
4192 {
4193         int port = BP_PORT(bp);
4194         int i;
4195
4196         for_each_queue(bp, i) {
4197                 int sb_id = bp->fp[i].sb_id;
4198
4199                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4200                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4201                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4202                                                     U_SB_ETH_RX_CQ_INDEX),
4203                         bp->rx_ticks/12);
4204                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4205                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4206                                                      U_SB_ETH_RX_CQ_INDEX),
4207                          bp->rx_ticks ? 0 : 1);
4208                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4209                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210                                                      U_SB_ETH_RX_BD_INDEX),
4211                          bp->rx_ticks ? 0 : 1);
4212
4213                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4214                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4215                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4216                                                     C_SB_ETH_TX_CQ_INDEX),
4217                         bp->tx_ticks/12);
4218                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4219                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4220                                                      C_SB_ETH_TX_CQ_INDEX),
4221                          bp->tx_ticks ? 0 : 1);
4222         }
4223 }
4224
4225 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4226                                        struct bnx2x_fastpath *fp, int last)
4227 {
4228         int i;
4229
4230         for (i = 0; i < last; i++) {
4231                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4232                 struct sk_buff *skb = rx_buf->skb;
4233
4234                 if (skb == NULL) {
4235                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4236                         continue;
4237                 }
4238
4239                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4240                         pci_unmap_single(bp->pdev,
4241                                          pci_unmap_addr(rx_buf, mapping),
4242                                          bp->rx_buf_size,
4243                                          PCI_DMA_FROMDEVICE);
4244
4245                 dev_kfree_skb(skb);
4246                 rx_buf->skb = NULL;
4247         }
4248 }
4249
4250 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4251 {
4252         int func = BP_FUNC(bp);
4253         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4254                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4255         u16 ring_prod, cqe_ring_prod;
4256         int i, j;
4257
4258         bp->rx_buf_size = bp->dev->mtu;
4259         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4260                 BCM_RX_ETH_PAYLOAD_ALIGN;
4261
4262         if (bp->flags & TPA_ENABLE_FLAG) {
4263                 DP(NETIF_MSG_IFUP,
4264                    "rx_buf_size %d  effective_mtu %d\n",
4265                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4266
4267                 for_each_queue(bp, j) {
4268                         struct bnx2x_fastpath *fp = &bp->fp[j];
4269
4270                         for (i = 0; i < max_agg_queues; i++) {
4271                                 fp->tpa_pool[i].skb =
4272                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4273                                 if (!fp->tpa_pool[i].skb) {
4274                                         BNX2X_ERR("Failed to allocate TPA "
4275                                                   "skb pool for queue[%d] - "
4276                                                   "disabling TPA on this "
4277                                                   "queue!\n", j);
4278                                         bnx2x_free_tpa_pool(bp, fp, i);
4279                                         fp->disable_tpa = 1;
4280                                         break;
4281                                 }
4282                                 pci_unmap_addr_set((struct sw_rx_bd *)
4283                                                         &bp->fp->tpa_pool[i],
4284                                                    mapping, 0);
4285                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4286                         }
4287                 }
4288         }
4289
4290         for_each_queue(bp, j) {
4291                 struct bnx2x_fastpath *fp = &bp->fp[j];
4292
4293                 fp->rx_bd_cons = 0;
4294                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4295                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4296
4297                 /* "next page" elements initialization */
4298                 /* SGE ring */
4299                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4300                         struct eth_rx_sge *sge;
4301
4302                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4303                         sge->addr_hi =
4304                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4305                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4306                         sge->addr_lo =
4307                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4308                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4309                 }
4310
4311                 bnx2x_init_sge_ring_bit_mask(fp);
4312
4313                 /* RX BD ring */
4314                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4315                         struct eth_rx_bd *rx_bd;
4316
4317                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4318                         rx_bd->addr_hi =
4319                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4320                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4321                         rx_bd->addr_lo =
4322                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4323                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4324                 }
4325
4326                 /* CQ ring */
4327                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4328                         struct eth_rx_cqe_next_page *nextpg;
4329
4330                         nextpg = (struct eth_rx_cqe_next_page *)
4331                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4332                         nextpg->addr_hi =
4333                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4334                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4335                         nextpg->addr_lo =
4336                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4337                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4338                 }
4339
4340                 /* Allocate SGEs and initialize the ring elements */
4341                 for (i = 0, ring_prod = 0;
4342                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4343
4344                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4345                                 BNX2X_ERR("was only able to allocate "
4346                                           "%d rx sges\n", i);
4347                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4348                                 /* Cleanup already allocated elements */
4349                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4350                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4351                                 fp->disable_tpa = 1;
4352                                 ring_prod = 0;
4353                                 break;
4354                         }
4355                         ring_prod = NEXT_SGE_IDX(ring_prod);
4356                 }
4357                 fp->rx_sge_prod = ring_prod;
4358
4359                 /* Allocate BDs and initialize BD ring */
4360                 fp->rx_comp_cons = 0;
4361                 cqe_ring_prod = ring_prod = 0;
4362                 for (i = 0; i < bp->rx_ring_size; i++) {
4363                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4364                                 BNX2X_ERR("was only able to allocate "
4365                                           "%d rx skbs\n", i);
4366                                 bp->eth_stats.rx_skb_alloc_failed++;
4367                                 break;
4368                         }
4369                         ring_prod = NEXT_RX_IDX(ring_prod);
4370                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4371                         WARN_ON(ring_prod <= i);
4372                 }
4373
4374                 fp->rx_bd_prod = ring_prod;
4375                 /* must not have more available CQEs than BDs */
4376                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4377                                        cqe_ring_prod);
4378                 fp->rx_pkt = fp->rx_calls = 0;
4379
4380                 /* Warning!
4381                  * this will generate an interrupt (to the TSTORM)
4382                  * must only be done after chip is initialized
4383                  */
4384                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4385                                      fp->rx_sge_prod);
4386                 if (j != 0)
4387                         continue;
4388
4389                 REG_WR(bp, BAR_USTRORM_INTMEM +
4390                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4391                        U64_LO(fp->rx_comp_mapping));
4392                 REG_WR(bp, BAR_USTRORM_INTMEM +
4393                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4394                        U64_HI(fp->rx_comp_mapping));
4395         }
4396 }
4397
4398 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4399 {
4400         int i, j;
4401
4402         for_each_queue(bp, j) {
4403                 struct bnx2x_fastpath *fp = &bp->fp[j];
4404
4405                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4406                         struct eth_tx_bd *tx_bd =
4407                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4408
4409                         tx_bd->addr_hi =
4410                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4411                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4412                         tx_bd->addr_lo =
4413                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4414                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4415                 }
4416
4417                 fp->tx_pkt_prod = 0;
4418                 fp->tx_pkt_cons = 0;
4419                 fp->tx_bd_prod = 0;
4420                 fp->tx_bd_cons = 0;
4421                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4422                 fp->tx_pkt = 0;
4423         }
4424 }
4425
4426 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4427 {
4428         int func = BP_FUNC(bp);
4429
4430         spin_lock_init(&bp->spq_lock);
4431
4432         bp->spq_left = MAX_SPQ_PENDING;
4433         bp->spq_prod_idx = 0;
4434         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4435         bp->spq_prod_bd = bp->spq;
4436         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4437
4438         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4439                U64_LO(bp->spq_mapping));
4440         REG_WR(bp,
4441                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4442                U64_HI(bp->spq_mapping));
4443
4444         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4445                bp->spq_prod_idx);
4446 }
4447
4448 static void bnx2x_init_context(struct bnx2x *bp)
4449 {
4450         int i;
4451
4452         for_each_queue(bp, i) {
4453                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4454                 struct bnx2x_fastpath *fp = &bp->fp[i];
4455                 u8 sb_id = FP_SB_ID(fp);
4456
4457                 context->xstorm_st_context.tx_bd_page_base_hi =
4458                                                 U64_HI(fp->tx_desc_mapping);
4459                 context->xstorm_st_context.tx_bd_page_base_lo =
4460                                                 U64_LO(fp->tx_desc_mapping);
4461                 context->xstorm_st_context.db_data_addr_hi =
4462                                                 U64_HI(fp->tx_prods_mapping);
4463                 context->xstorm_st_context.db_data_addr_lo =
4464                                                 U64_LO(fp->tx_prods_mapping);
4465                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4466                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4467
4468                 context->ustorm_st_context.common.sb_index_numbers =
4469                                                 BNX2X_RX_SB_INDEX_NUM;
4470                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4471                 context->ustorm_st_context.common.status_block_id = sb_id;
4472                 context->ustorm_st_context.common.flags =
4473                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4474                 context->ustorm_st_context.common.mc_alignment_size =
4475                         BCM_RX_ETH_PAYLOAD_ALIGN;
4476                 context->ustorm_st_context.common.bd_buff_size =
4477                                                 bp->rx_buf_size;
4478                 context->ustorm_st_context.common.bd_page_base_hi =
4479                                                 U64_HI(fp->rx_desc_mapping);
4480                 context->ustorm_st_context.common.bd_page_base_lo =
4481                                                 U64_LO(fp->rx_desc_mapping);
4482                 if (!fp->disable_tpa) {
4483                         context->ustorm_st_context.common.flags |=
4484                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4485                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4486                         context->ustorm_st_context.common.sge_buff_size =
4487                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4488                         context->ustorm_st_context.common.sge_page_base_hi =
4489                                                 U64_HI(fp->rx_sge_mapping);
4490                         context->ustorm_st_context.common.sge_page_base_lo =
4491                                                 U64_LO(fp->rx_sge_mapping);
4492                 }
4493
4494                 context->cstorm_st_context.sb_index_number =
4495                                                 C_SB_ETH_TX_CQ_INDEX;
4496                 context->cstorm_st_context.status_block_id = sb_id;
4497
4498                 context->xstorm_ag_context.cdu_reserved =
4499                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4500                                                CDU_REGION_NUMBER_XCM_AG,
4501                                                ETH_CONNECTION_TYPE);
4502                 context->ustorm_ag_context.cdu_usage =
4503                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4504                                                CDU_REGION_NUMBER_UCM_AG,
4505                                                ETH_CONNECTION_TYPE);
4506         }
4507 }
4508
4509 static void bnx2x_init_ind_table(struct bnx2x *bp)
4510 {
4511         int port = BP_PORT(bp);
4512         int i;
4513
4514         if (!is_multi(bp))
4515                 return;
4516
4517         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4518         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4519                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4520                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4521                         i % bp->num_queues);
4522
4523         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4524 }
4525
4526 static void bnx2x_set_client_config(struct bnx2x *bp)
4527 {
4528         struct tstorm_eth_client_config tstorm_client = {0};
4529         int port = BP_PORT(bp);
4530         int i;
4531
4532         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4533         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4534         tstorm_client.config_flags =
4535                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4536 #ifdef BCM_VLAN
4537         if (bp->rx_mode && bp->vlgrp) {
4538                 tstorm_client.config_flags |=
4539                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4540                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4541         }
4542 #endif
4543
4544         if (bp->flags & TPA_ENABLE_FLAG) {
4545                 tstorm_client.max_sges_for_packet =
4546                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4547                 tstorm_client.max_sges_for_packet =
4548                         ((tstorm_client.max_sges_for_packet +
4549                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4550                         PAGES_PER_SGE_SHIFT;
4551
4552                 tstorm_client.config_flags |=
4553                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4554         }
4555
4556         for_each_queue(bp, i) {
4557                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4558                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4559                        ((u32 *)&tstorm_client)[0]);
4560                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4561                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4562                        ((u32 *)&tstorm_client)[1]);
4563         }
4564
4565         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4566            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4567 }
4568
4569 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4570 {
4571         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4572         int mode = bp->rx_mode;
4573         int mask = (1 << BP_L_ID(bp));
4574         int func = BP_FUNC(bp);
4575         int i;
4576
4577         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4578
4579         switch (mode) {
4580         case BNX2X_RX_MODE_NONE: /* no Rx */
4581                 tstorm_mac_filter.ucast_drop_all = mask;
4582                 tstorm_mac_filter.mcast_drop_all = mask;
4583                 tstorm_mac_filter.bcast_drop_all = mask;
4584                 break;
4585         case BNX2X_RX_MODE_NORMAL:
4586                 tstorm_mac_filter.bcast_accept_all = mask;
4587                 break;
4588         case BNX2X_RX_MODE_ALLMULTI:
4589                 tstorm_mac_filter.mcast_accept_all = mask;
4590                 tstorm_mac_filter.bcast_accept_all = mask;
4591                 break;
4592         case BNX2X_RX_MODE_PROMISC:
4593                 tstorm_mac_filter.ucast_accept_all = mask;
4594                 tstorm_mac_filter.mcast_accept_all = mask;
4595                 tstorm_mac_filter.bcast_accept_all = mask;
4596                 break;
4597         default:
4598                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4599                 break;
4600         }
4601
4602         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4603                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4604                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4605                        ((u32 *)&tstorm_mac_filter)[i]);
4606
4607 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4608                    ((u32 *)&tstorm_mac_filter)[i]); */
4609         }
4610
4611         if (mode != BNX2X_RX_MODE_NONE)
4612                 bnx2x_set_client_config(bp);
4613 }
4614
4615 static void bnx2x_init_internal_common(struct bnx2x *bp)
4616 {
4617         int i;
4618
4619         if (bp->flags & TPA_ENABLE_FLAG) {
4620                 struct tstorm_eth_tpa_exist tpa = {0};
4621
4622                 tpa.tpa_exist = 1;
4623
4624                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4625                        ((u32 *)&tpa)[0]);
4626                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4627                        ((u32 *)&tpa)[1]);
4628         }
4629
4630         /* Zero this manually as its initialization is
4631            currently missing in the initTool */
4632         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4633                 REG_WR(bp, BAR_USTRORM_INTMEM +
4634                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4635 }
4636
4637 static void bnx2x_init_internal_port(struct bnx2x *bp)
4638 {
4639         int port = BP_PORT(bp);
4640
4641         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4642         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4643         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645 }
4646
4647 static void bnx2x_init_internal_func(struct bnx2x *bp)
4648 {
4649         struct tstorm_eth_function_common_config tstorm_config = {0};
4650         struct stats_indication_flags stats_flags = {0};
4651         int port = BP_PORT(bp);
4652         int func = BP_FUNC(bp);
4653         int i;
4654         u16 max_agg_size;
4655
4656         if (is_multi(bp)) {
4657                 tstorm_config.config_flags = MULTI_FLAGS;
4658                 tstorm_config.rss_result_mask = MULTI_MASK;
4659         }
4660
4661         tstorm_config.leading_client_id = BP_L_ID(bp);
4662
4663         REG_WR(bp, BAR_TSTRORM_INTMEM +
4664                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4665                (*(u32 *)&tstorm_config));
4666
4667         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4668         bnx2x_set_storm_rx_mode(bp);
4669
4670         /* reset xstorm per client statistics */
4671         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4672                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4673                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4674                        i*4, 0);
4675         }
4676         /* reset tstorm per client statistics */
4677         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4678                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4679                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4680                        i*4, 0);
4681         }
4682
4683         /* Init statistics related context */
4684         stats_flags.collect_eth = 1;
4685
4686         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4687                ((u32 *)&stats_flags)[0]);
4688         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4689                ((u32 *)&stats_flags)[1]);
4690
4691         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4692                ((u32 *)&stats_flags)[0]);
4693         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4694                ((u32 *)&stats_flags)[1]);
4695
4696         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4697                ((u32 *)&stats_flags)[0]);
4698         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4699                ((u32 *)&stats_flags)[1]);
4700
4701         REG_WR(bp, BAR_XSTRORM_INTMEM +
4702                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4703                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4704         REG_WR(bp, BAR_XSTRORM_INTMEM +
4705                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4706                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4707
4708         REG_WR(bp, BAR_TSTRORM_INTMEM +
4709                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4710                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4711         REG_WR(bp, BAR_TSTRORM_INTMEM +
4712                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4713                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4714
4715         if (CHIP_IS_E1H(bp)) {
4716                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4717                         IS_E1HMF(bp));
4718                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4719                         IS_E1HMF(bp));
4720                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4721                         IS_E1HMF(bp));
4722                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4723                         IS_E1HMF(bp));
4724
4725                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4726                          bp->e1hov);
4727         }
4728
4729         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4730         max_agg_size =
4731                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4732                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4733                     (u32)0xffff);
4734         for_each_queue(bp, i) {
4735                 struct bnx2x_fastpath *fp = &bp->fp[i];
4736
4737                 REG_WR(bp, BAR_USTRORM_INTMEM +
4738                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4739                        U64_LO(fp->rx_comp_mapping));
4740                 REG_WR(bp, BAR_USTRORM_INTMEM +
4741                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4742                        U64_HI(fp->rx_comp_mapping));
4743
4744                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4745                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4746                          max_agg_size);
4747         }
4748 }
4749
4750 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4751 {
4752         switch (load_code) {
4753         case FW_MSG_CODE_DRV_LOAD_COMMON:
4754                 bnx2x_init_internal_common(bp);
4755                 /* no break */
4756
4757         case FW_MSG_CODE_DRV_LOAD_PORT:
4758                 bnx2x_init_internal_port(bp);
4759                 /* no break */
4760
4761         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4762                 bnx2x_init_internal_func(bp);
4763                 break;
4764
4765         default:
4766                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4767                 break;
4768         }
4769 }
4770
4771 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4772 {
4773         int i;
4774
4775         for_each_queue(bp, i) {
4776                 struct bnx2x_fastpath *fp = &bp->fp[i];
4777
4778                 fp->bp = bp;
4779                 fp->state = BNX2X_FP_STATE_CLOSED;
4780                 fp->index = i;
4781                 fp->cl_id = BP_L_ID(bp) + i;
4782                 fp->sb_id = fp->cl_id;
4783                 DP(NETIF_MSG_IFUP,
4784                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4785                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4786                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4787                               FP_SB_ID(fp));
4788                 bnx2x_update_fpsb_idx(fp);
4789         }
4790
4791         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4792                           DEF_SB_ID);
4793         bnx2x_update_dsb_idx(bp);
4794         bnx2x_update_coalesce(bp);
4795         bnx2x_init_rx_rings(bp);
4796         bnx2x_init_tx_ring(bp);
4797         bnx2x_init_sp_ring(bp);
4798         bnx2x_init_context(bp);
4799         bnx2x_init_internal(bp, load_code);
4800         bnx2x_init_ind_table(bp);
4801         bnx2x_int_enable(bp);
4802 }
4803
4804 /* end of nic init */
4805
4806 /*
4807  * gzip service functions
4808  */
4809
4810 static int bnx2x_gunzip_init(struct bnx2x *bp)
4811 {
4812         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4813                                               &bp->gunzip_mapping);
4814         if (bp->gunzip_buf  == NULL)
4815                 goto gunzip_nomem1;
4816
4817         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4818         if (bp->strm  == NULL)
4819                 goto gunzip_nomem2;
4820
4821         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4822                                       GFP_KERNEL);
4823         if (bp->strm->workspace == NULL)
4824                 goto gunzip_nomem3;
4825
4826         return 0;
4827
4828 gunzip_nomem3:
4829         kfree(bp->strm);
4830         bp->strm = NULL;
4831
4832 gunzip_nomem2:
4833         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4834                             bp->gunzip_mapping);
4835         bp->gunzip_buf = NULL;
4836
4837 gunzip_nomem1:
4838         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4839                " un-compression\n", bp->dev->name);
4840         return -ENOMEM;
4841 }
4842
4843 static void bnx2x_gunzip_end(struct bnx2x *bp)
4844 {
4845         kfree(bp->strm->workspace);
4846
4847         kfree(bp->strm);
4848         bp->strm = NULL;
4849
4850         if (bp->gunzip_buf) {
4851                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4852                                     bp->gunzip_mapping);
4853                 bp->gunzip_buf = NULL;
4854         }
4855 }
4856
4857 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4858 {
4859         int n, rc;
4860
4861         /* check gzip header */
4862         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4863                 return -EINVAL;
4864
4865         n = 10;
4866
4867 #define FNAME                           0x8
4868
4869         if (zbuf[3] & FNAME)
4870                 while ((zbuf[n++] != 0) && (n < len));
4871
4872         bp->strm->next_in = zbuf + n;
4873         bp->strm->avail_in = len - n;
4874         bp->strm->next_out = bp->gunzip_buf;
4875         bp->strm->avail_out = FW_BUF_SIZE;
4876
4877         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4878         if (rc != Z_OK)
4879                 return rc;
4880
4881         rc = zlib_inflate(bp->strm, Z_FINISH);
4882         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4883                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4884                        bp->dev->name, bp->strm->msg);
4885
4886         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4887         if (bp->gunzip_outlen & 0x3)
4888                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4889                                     " gunzip_outlen (%d) not aligned\n",
4890                        bp->dev->name, bp->gunzip_outlen);
4891         bp->gunzip_outlen >>= 2;
4892
4893         zlib_inflateEnd(bp->strm);
4894
4895         if (rc == Z_STREAM_END)
4896                 return 0;
4897
4898         return rc;
4899 }
4900
4901 /* nic load/unload */
4902
4903 /*
4904  * General service functions
4905  */
4906
4907 /* send a NIG loopback debug packet */
4908 static void bnx2x_lb_pckt(struct bnx2x *bp)
4909 {
4910         u32 wb_write[3];
4911
4912         /* Ethernet source and destination addresses */
4913         wb_write[0] = 0x55555555;
4914         wb_write[1] = 0x55555555;
4915         wb_write[2] = 0x20;             /* SOP */
4916         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4917
4918         /* NON-IP protocol */
4919         wb_write[0] = 0x09000000;
4920         wb_write[1] = 0x55555555;
4921         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4922         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4923 }
4924
4925 /* some of the internal memories
4926  * are not directly readable from the driver
4927  * to test them we send debug packets
4928  */
4929 static int bnx2x_int_mem_test(struct bnx2x *bp)
4930 {
4931         int factor;
4932         int count, i;
4933         u32 val = 0;
4934
4935         if (CHIP_REV_IS_FPGA(bp))
4936                 factor = 120;
4937         else if (CHIP_REV_IS_EMUL(bp))
4938                 factor = 200;
4939         else
4940                 factor = 1;
4941
4942         DP(NETIF_MSG_HW, "start part1\n");
4943
4944         /* Disable inputs of parser neighbor blocks */
4945         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4946         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4947         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4948         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4949
4950         /*  Write 0 to parser credits for CFC search request */
4951         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4952
4953         /* send Ethernet packet */
4954         bnx2x_lb_pckt(bp);
4955
4956         /* TODO do i reset NIG statistic? */
4957         /* Wait until NIG register shows 1 packet of size 0x10 */
4958         count = 1000 * factor;
4959         while (count) {
4960
4961                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4962                 val = *bnx2x_sp(bp, wb_data[0]);
4963                 if (val == 0x10)
4964                         break;
4965
4966                 msleep(10);
4967                 count--;
4968         }
4969         if (val != 0x10) {
4970                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4971                 return -1;
4972         }
4973
4974         /* Wait until PRS register shows 1 packet */
4975         count = 1000 * factor;
4976         while (count) {
4977                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4978                 if (val == 1)
4979                         break;
4980
4981                 msleep(10);
4982                 count--;
4983         }
4984         if (val != 0x1) {
4985                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4986                 return -2;
4987         }
4988
4989         /* Reset and init BRB, PRS */
4990         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4991         msleep(50);
4992         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4993         msleep(50);
4994         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4995         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4996
4997         DP(NETIF_MSG_HW, "part2\n");
4998
4999         /* Disable inputs of parser neighbor blocks */
5000         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5001         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5002         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5003         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5004
5005         /* Write 0 to parser credits for CFC search request */
5006         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5007
5008         /* send 10 Ethernet packets */
5009         for (i = 0; i < 10; i++)
5010                 bnx2x_lb_pckt(bp);
5011
5012         /* Wait until NIG register shows 10 + 1
5013            packets of size 11*0x10 = 0xb0 */
5014         count = 1000 * factor;
5015         while (count) {
5016
5017                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5018                 val = *bnx2x_sp(bp, wb_data[0]);
5019                 if (val == 0xb0)
5020                         break;
5021
5022                 msleep(10);
5023                 count--;
5024         }
5025         if (val != 0xb0) {
5026                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5027                 return -3;
5028         }
5029
5030         /* Wait until PRS register shows 2 packets */
5031         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5032         if (val != 2)
5033                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5034
5035         /* Write 1 to parser credits for CFC search request */
5036         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5037
5038         /* Wait until PRS register shows 3 packets */
5039         msleep(10 * factor);
5040         /* Wait until NIG register shows 1 packet of size 0x10 */
5041         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5042         if (val != 3)
5043                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5044
5045         /* clear NIG EOP FIFO */
5046         for (i = 0; i < 11; i++)
5047                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5048         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5049         if (val != 1) {
5050                 BNX2X_ERR("clear of NIG failed\n");
5051                 return -4;
5052         }
5053
5054         /* Reset and init BRB, PRS, NIG */
5055         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5056         msleep(50);
5057         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5058         msleep(50);
5059         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5060         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5061 #ifndef BCM_ISCSI
5062         /* set NIC mode */
5063         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5064 #endif
5065
5066         /* Enable inputs of parser neighbor blocks */
5067         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5068         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5069         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5070         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5071
5072         DP(NETIF_MSG_HW, "done\n");
5073
5074         return 0; /* OK */
5075 }
5076
5077 static void enable_blocks_attention(struct bnx2x *bp)
5078 {
5079         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5080         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5081         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5082         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5083         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5084         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5085         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5086         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5087         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5088 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5089 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5090         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5091         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5092         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5093 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5094 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5095         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5096         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5097         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5098         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5099 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5100 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5101         if (CHIP_REV_IS_FPGA(bp))
5102                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5103         else
5104                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5105         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5106         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5107         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5108 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5109 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5110         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5111         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5112 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5113         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5114 }
5115
5116
5117 static int bnx2x_init_common(struct bnx2x *bp)
5118 {
5119         u32 val, i;
5120
5121         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5122
5123         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5124         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5125
5126         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5127         if (CHIP_IS_E1H(bp))
5128                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5129
5130         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5131         msleep(30);
5132         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5133
5134         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5135         if (CHIP_IS_E1(bp)) {
5136                 /* enable HW interrupt from PXP on USDM overflow
5137                    bit 16 on INT_MASK_0 */
5138                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5139         }
5140
5141         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5142         bnx2x_init_pxp(bp);
5143
5144 #ifdef __BIG_ENDIAN
5145         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5146         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5147         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5148         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5149         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5150         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5151
5152 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5153         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5154         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5155         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5156         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5157 #endif
5158
5159         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5160 #ifdef BCM_ISCSI
5161         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5162         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5163         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5164 #endif
5165
5166         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5167                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5168
5169         /* let the HW do it's magic ... */
5170         msleep(100);
5171         /* finish PXP init */
5172         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5173         if (val != 1) {
5174                 BNX2X_ERR("PXP2 CFG failed\n");
5175                 return -EBUSY;
5176         }
5177         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5178         if (val != 1) {
5179                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5180                 return -EBUSY;
5181         }
5182
5183         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5184         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5185
5186         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5187
5188         /* clean the DMAE memory */
5189         bp->dmae_ready = 1;
5190         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5191
5192         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5193         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5194         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5195         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5196
5197         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5198         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5199         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5200         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5201
5202         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5203         /* soft reset pulse */
5204         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5205         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5206
5207 #ifdef BCM_ISCSI
5208         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5209 #endif
5210
5211         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5212         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5213         if (!CHIP_REV_IS_SLOW(bp)) {
5214                 /* enable hw interrupt from doorbell Q */
5215                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5216         }
5217
5218         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5219         if (CHIP_REV_IS_SLOW(bp)) {
5220                 /* fix for emulation and FPGA for no pause */
5221                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5222                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5223                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5224                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5225         }
5226
5227         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5228         /* set NIC mode */
5229         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5230         if (CHIP_IS_E1H(bp))
5231                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5232
5233         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5234         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5235         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5236         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5237
5238         if (CHIP_IS_E1H(bp)) {
5239                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1H/2);
5241                 bnx2x_init_fill(bp,
5242                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243                                 0, STORM_INTMEM_SIZE_E1H/2);
5244                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5245                                 STORM_INTMEM_SIZE_E1H/2);
5246                 bnx2x_init_fill(bp,
5247                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248                                 0, STORM_INTMEM_SIZE_E1H/2);
5249                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5250                                 STORM_INTMEM_SIZE_E1H/2);
5251                 bnx2x_init_fill(bp,
5252                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5253                                 0, STORM_INTMEM_SIZE_E1H/2);
5254                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5255                                 STORM_INTMEM_SIZE_E1H/2);
5256                 bnx2x_init_fill(bp,
5257                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258                                 0, STORM_INTMEM_SIZE_E1H/2);
5259         } else { /* E1 */
5260                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5261                                 STORM_INTMEM_SIZE_E1);
5262                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5263                                 STORM_INTMEM_SIZE_E1);
5264                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265                                 STORM_INTMEM_SIZE_E1);
5266                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5267                                 STORM_INTMEM_SIZE_E1);
5268         }
5269
5270         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5271         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5272         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5273         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5274
5275         /* sync semi rtc */
5276         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5277                0x80000000);
5278         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5279                0x80000000);
5280
5281         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5282         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5283         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5284
5285         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5286         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5287                 REG_WR(bp, i, 0xc0cac01a);
5288                 /* TODO: replace with something meaningful */
5289         }
5290         if (CHIP_IS_E1H(bp))
5291                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5292         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5293
5294         if (sizeof(union cdu_context) != 1024)
5295                 /* we currently assume that a context is 1024 bytes */
5296                 printk(KERN_ALERT PFX "please adjust the size of"
5297                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5298
5299         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5300         val = (4 << 24) + (0 << 12) + 1024;
5301         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5302         if (CHIP_IS_E1(bp)) {
5303                 /* !!! fix pxp client crdit until excel update */
5304                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5305                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5306         }
5307
5308         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5309         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5310
5311         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5312         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5313
5314         /* PXPCS COMMON comes here */
5315         /* Reset PCIE errors for debug */
5316         REG_WR(bp, 0x2814, 0xffffffff);
5317         REG_WR(bp, 0x3820, 0xffffffff);
5318
5319         /* EMAC0 COMMON comes here */
5320         /* EMAC1 COMMON comes here */
5321         /* DBU COMMON comes here */
5322         /* DBG COMMON comes here */
5323
5324         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5325         if (CHIP_IS_E1H(bp)) {
5326                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5327                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5328         }
5329
5330         if (CHIP_REV_IS_SLOW(bp))
5331                 msleep(200);
5332
5333         /* finish CFC init */
5334         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5335         if (val != 1) {
5336                 BNX2X_ERR("CFC LL_INIT failed\n");
5337                 return -EBUSY;
5338         }
5339         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5340         if (val != 1) {
5341                 BNX2X_ERR("CFC AC_INIT failed\n");
5342                 return -EBUSY;
5343         }
5344         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5345         if (val != 1) {
5346                 BNX2X_ERR("CFC CAM_INIT failed\n");
5347                 return -EBUSY;
5348         }
5349         REG_WR(bp, CFC_REG_DEBUG0, 0);
5350
5351         /* read NIG statistic
5352            to see if this is our first up since powerup */
5353         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5354         val = *bnx2x_sp(bp, wb_data[0]);
5355
5356         /* do internal memory self test */
5357         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5358                 BNX2X_ERR("internal mem self test failed\n");
5359                 return -EBUSY;
5360         }
5361
5362         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5363         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5364         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5365                 /* Fan failure is indicated by SPIO 5 */
5366                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5367                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5368
5369                 /* set to active low mode */
5370                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5371                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5372                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5373                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5374
5375                 /* enable interrupt to signal the IGU */
5376                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5377                 val |= (1 << MISC_REGISTERS_SPIO_5);
5378                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5379                 break;
5380
5381         default:
5382                 break;
5383         }
5384
5385         /* clear PXP2 attentions */
5386         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5387
5388         enable_blocks_attention(bp);
5389
5390         if (!BP_NOMCP(bp)) {
5391                 bnx2x_acquire_phy_lock(bp);
5392                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5393                 bnx2x_release_phy_lock(bp);
5394         } else
5395                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5396
5397         return 0;
5398 }
5399
5400 static int bnx2x_init_port(struct bnx2x *bp)
5401 {
5402         int port = BP_PORT(bp);
5403         u32 val;
5404
5405         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5406
5407         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5408
5409         /* Port PXP comes here */
5410         /* Port PXP2 comes here */
5411 #ifdef BCM_ISCSI
5412         /* Port0  1
5413          * Port1  385 */
5414         i++;
5415         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5416         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5417         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5418         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5419
5420         /* Port0  2
5421          * Port1  386 */
5422         i++;
5423         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5424         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5425         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5426         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5427
5428         /* Port0  3
5429          * Port1  387 */
5430         i++;
5431         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5432         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5433         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5434         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5435 #endif
5436         /* Port CMs come here */
5437
5438         /* Port QM comes here */
5439 #ifdef BCM_ISCSI
5440         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5441         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5442
5443         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5444                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5445 #endif
5446         /* Port DQ comes here */
5447         /* Port BRB1 comes here */
5448         /* Port PRS comes here */
5449         /* Port TSDM comes here */
5450         /* Port CSDM comes here */
5451         /* Port USDM comes here */
5452         /* Port XSDM comes here */
5453         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5454                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5455         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5456                              port ? USEM_PORT1_END : USEM_PORT0_END);
5457         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5458                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5459         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5460                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5461         /* Port UPB comes here */
5462         /* Port XPB comes here */
5463
5464         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5465                              port ? PBF_PORT1_END : PBF_PORT0_END);
5466
5467         /* configure PBF to work without PAUSE mtu 9000 */
5468         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5469
5470         /* update threshold */
5471         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5472         /* update init credit */
5473         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5474
5475         /* probe changes */
5476         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5477         msleep(5);
5478         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5479
5480 #ifdef BCM_ISCSI
5481         /* tell the searcher where the T2 table is */
5482         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5483
5484         wb_write[0] = U64_LO(bp->t2_mapping);
5485         wb_write[1] = U64_HI(bp->t2_mapping);
5486         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5487         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5488         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5489         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5490
5491         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5492         /* Port SRCH comes here */
5493 #endif
5494         /* Port CDU comes here */
5495         /* Port CFC comes here */
5496
5497         if (CHIP_IS_E1(bp)) {
5498                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5499                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5500         }
5501         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5502                              port ? HC_PORT1_END : HC_PORT0_END);
5503
5504         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5505                                     MISC_AEU_PORT0_START,
5506                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5507         /* init aeu_mask_attn_func_0/1:
5508          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5509          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5510          *             bits 4-7 are used for "per vn group attention" */
5511         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5512                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5513
5514         /* Port PXPCS comes here */
5515         /* Port EMAC0 comes here */
5516         /* Port EMAC1 comes here */
5517         /* Port DBU comes here */
5518         /* Port DBG comes here */
5519         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5520                              port ? NIG_PORT1_END : NIG_PORT0_END);
5521
5522         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5523
5524         if (CHIP_IS_E1H(bp)) {
5525                 u32 wsum;
5526                 struct cmng_struct_per_port m_cmng_port;
5527                 int vn;
5528
5529                 /* 0x2 disable e1hov, 0x1 enable */
5530                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5531                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5532
5533                 /* Init RATE SHAPING and FAIRNESS contexts.
5534                    Initialize as if there is 10G link. */
5535                 wsum = bnx2x_calc_vn_wsum(bp);
5536                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5537                 if (IS_E1HMF(bp))
5538                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5539                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5540                                         wsum, 10000, &m_cmng_port);
5541         }
5542
5543         /* Port MCP comes here */
5544         /* Port DMAE comes here */
5545
5546         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5547         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5548         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5549                 /* add SPIO 5 to group 0 */
5550                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5551                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5552                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5553                 break;
5554
5555         default:
5556                 break;
5557         }
5558
5559         bnx2x__link_reset(bp);
5560
5561         return 0;
5562 }
5563
5564 #define ILT_PER_FUNC            (768/2)
5565 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5566 /* the phys address is shifted right 12 bits and has an added
5567    1=valid bit added to the 53rd bit
5568    then since this is a wide register(TM)
5569    we split it into two 32 bit writes
5570  */
5571 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5572 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5573 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5574 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5575
5576 #define CNIC_ILT_LINES          0
5577
5578 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5579 {
5580         int reg;
5581
5582         if (CHIP_IS_E1H(bp))
5583                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5584         else /* E1 */
5585                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5586
5587         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5588 }
5589
5590 static int bnx2x_init_func(struct bnx2x *bp)
5591 {
5592         int port = BP_PORT(bp);
5593         int func = BP_FUNC(bp);
5594         int i;
5595
5596         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5597
5598         i = FUNC_ILT_BASE(func);
5599
5600         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5601         if (CHIP_IS_E1H(bp)) {
5602                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5603                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5604         } else /* E1 */
5605                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5606                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5607
5608
5609         if (CHIP_IS_E1H(bp)) {
5610                 for (i = 0; i < 9; i++)
5611                         bnx2x_init_block(bp,
5612                                          cm_start[func][i], cm_end[func][i]);
5613
5614                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5615                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5616         }
5617
5618         /* HC init per function */
5619         if (CHIP_IS_E1H(bp)) {
5620                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5621
5622                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5623                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5624         }
5625         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5626
5627         if (CHIP_IS_E1H(bp))
5628                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5629
5630         /* Reset PCIE errors for debug */
5631         REG_WR(bp, 0x2114, 0xffffffff);
5632         REG_WR(bp, 0x2120, 0xffffffff);
5633
5634         return 0;
5635 }
5636
5637 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5638 {
5639         int i, rc = 0;
5640
5641         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5642            BP_FUNC(bp), load_code);
5643
5644         bp->dmae_ready = 0;
5645         mutex_init(&bp->dmae_mutex);
5646         bnx2x_gunzip_init(bp);
5647
5648         switch (load_code) {
5649         case FW_MSG_CODE_DRV_LOAD_COMMON:
5650                 rc = bnx2x_init_common(bp);
5651                 if (rc)
5652                         goto init_hw_err;
5653                 /* no break */
5654
5655         case FW_MSG_CODE_DRV_LOAD_PORT:
5656                 bp->dmae_ready = 1;
5657                 rc = bnx2x_init_port(bp);
5658                 if (rc)
5659                         goto init_hw_err;
5660                 /* no break */
5661
5662         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5663                 bp->dmae_ready = 1;
5664                 rc = bnx2x_init_func(bp);
5665                 if (rc)
5666                         goto init_hw_err;
5667                 break;
5668
5669         default:
5670                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5671                 break;
5672         }
5673
5674         if (!BP_NOMCP(bp)) {
5675                 int func = BP_FUNC(bp);
5676
5677                 bp->fw_drv_pulse_wr_seq =
5678                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5679                                  DRV_PULSE_SEQ_MASK);
5680                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5681                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5682                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5683         } else
5684                 bp->func_stx = 0;
5685
5686         /* this needs to be done before gunzip end */
5687         bnx2x_zero_def_sb(bp);
5688         for_each_queue(bp, i)
5689                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5690
5691 init_hw_err:
5692         bnx2x_gunzip_end(bp);
5693
5694         return rc;
5695 }
5696
5697 /* send the MCP a request, block until there is a reply */
5698 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5699 {
5700         int func = BP_FUNC(bp);
5701         u32 seq = ++bp->fw_seq;
5702         u32 rc = 0;
5703         u32 cnt = 1;
5704         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5705
5706         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5707         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5708
5709         do {
5710                 /* let the FW do it's magic ... */
5711                 msleep(delay);
5712
5713                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5714
5715                 /* Give the FW up to 2 second (200*10ms) */
5716         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5717
5718         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5719            cnt*delay, rc, seq);
5720
5721         /* is this a reply to our command? */
5722         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5723                 rc &= FW_MSG_CODE_MASK;
5724
5725         } else {
5726                 /* FW BUG! */
5727                 BNX2X_ERR("FW failed to respond!\n");
5728                 bnx2x_fw_dump(bp);
5729                 rc = 0;
5730         }
5731
5732         return rc;
5733 }
5734
5735 static void bnx2x_free_mem(struct bnx2x *bp)
5736 {
5737
5738 #define BNX2X_PCI_FREE(x, y, size) \
5739         do { \
5740                 if (x) { \
5741                         pci_free_consistent(bp->pdev, size, x, y); \
5742                         x = NULL; \
5743                         y = 0; \
5744                 } \
5745         } while (0)
5746
5747 #define BNX2X_FREE(x) \
5748         do { \
5749                 if (x) { \
5750                         vfree(x); \
5751                         x = NULL; \
5752                 } \
5753         } while (0)
5754
5755         int i;
5756
5757         /* fastpath */
5758         for_each_queue(bp, i) {
5759
5760                 /* Status blocks */
5761                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5762                                bnx2x_fp(bp, i, status_blk_mapping),
5763                                sizeof(struct host_status_block) +
5764                                sizeof(struct eth_tx_db_data));
5765
5766                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5767                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5768                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5769                                bnx2x_fp(bp, i, tx_desc_mapping),
5770                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5771
5772                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5773                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5774                                bnx2x_fp(bp, i, rx_desc_mapping),
5775                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5776
5777                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5778                                bnx2x_fp(bp, i, rx_comp_mapping),
5779                                sizeof(struct eth_fast_path_rx_cqe) *
5780                                NUM_RCQ_BD);
5781
5782                 /* SGE ring */
5783                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5784                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5785                                bnx2x_fp(bp, i, rx_sge_mapping),
5786                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5787         }
5788         /* end of fastpath */
5789
5790         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5791                        sizeof(struct host_def_status_block));
5792
5793         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5794                        sizeof(struct bnx2x_slowpath));
5795
5796 #ifdef BCM_ISCSI
5797         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5798         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5799         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5800         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5801 #endif
5802         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5803
5804 #undef BNX2X_PCI_FREE
5805 #undef BNX2X_KFREE
5806 }
5807
5808 static int bnx2x_alloc_mem(struct bnx2x *bp)
5809 {
5810
5811 #define BNX2X_PCI_ALLOC(x, y, size) \
5812         do { \
5813                 x = pci_alloc_consistent(bp->pdev, size, y); \
5814                 if (x == NULL) \
5815                         goto alloc_mem_err; \
5816                 memset(x, 0, size); \
5817         } while (0)
5818
5819 #define BNX2X_ALLOC(x, size) \
5820         do { \
5821                 x = vmalloc(size); \
5822                 if (x == NULL) \
5823                         goto alloc_mem_err; \
5824                 memset(x, 0, size); \
5825         } while (0)
5826
5827         int i;
5828
5829         /* fastpath */
5830         for_each_queue(bp, i) {
5831                 bnx2x_fp(bp, i, bp) = bp;
5832
5833                 /* Status blocks */
5834                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5835                                 &bnx2x_fp(bp, i, status_blk_mapping),
5836                                 sizeof(struct host_status_block) +
5837                                 sizeof(struct eth_tx_db_data));
5838
5839                 bnx2x_fp(bp, i, hw_tx_prods) =
5840                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5841
5842                 bnx2x_fp(bp, i, tx_prods_mapping) =
5843                                 bnx2x_fp(bp, i, status_blk_mapping) +
5844                                 sizeof(struct host_status_block);
5845
5846                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5847                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5848                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5849                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5850                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5851                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5852
5853                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5854                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5855                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5856                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5857                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5858
5859                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5860                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5861                                 sizeof(struct eth_fast_path_rx_cqe) *
5862                                 NUM_RCQ_BD);
5863
5864                 /* SGE ring */
5865                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5866                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5867                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5868                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5869                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5870         }
5871         /* end of fastpath */
5872
5873         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5874                         sizeof(struct host_def_status_block));
5875
5876         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5877                         sizeof(struct bnx2x_slowpath));
5878
5879 #ifdef BCM_ISCSI
5880         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5881
5882         /* Initialize T1 */
5883         for (i = 0; i < 64*1024; i += 64) {
5884                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5885                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5886         }
5887
5888         /* allocate searcher T2 table
5889            we allocate 1/4 of alloc num for T2
5890           (which is not entered into the ILT) */
5891         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5892
5893         /* Initialize T2 */
5894         for (i = 0; i < 16*1024; i += 64)
5895                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5896
5897         /* now fixup the last line in the block to point to the next block */
5898         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5899
5900         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5901         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5902
5903         /* QM queues (128*MAX_CONN) */
5904         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5905 #endif
5906
5907         /* Slow path ring */
5908         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5909
5910         return 0;
5911
5912 alloc_mem_err:
5913         bnx2x_free_mem(bp);
5914         return -ENOMEM;
5915
5916 #undef BNX2X_PCI_ALLOC
5917 #undef BNX2X_ALLOC
5918 }
5919
5920 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5921 {
5922         int i;
5923
5924         for_each_queue(bp, i) {
5925                 struct bnx2x_fastpath *fp = &bp->fp[i];
5926
5927                 u16 bd_cons = fp->tx_bd_cons;
5928                 u16 sw_prod = fp->tx_pkt_prod;
5929                 u16 sw_cons = fp->tx_pkt_cons;
5930
5931                 while (sw_cons != sw_prod) {
5932                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5933                         sw_cons++;
5934                 }
5935         }
5936 }
5937
5938 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5939 {
5940         int i, j;
5941
5942         for_each_queue(bp, j) {
5943                 struct bnx2x_fastpath *fp = &bp->fp[j];
5944
5945                 for (i = 0; i < NUM_RX_BD; i++) {
5946                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5947                         struct sk_buff *skb = rx_buf->skb;
5948
5949                         if (skb == NULL)
5950                                 continue;
5951
5952                         pci_unmap_single(bp->pdev,
5953                                          pci_unmap_addr(rx_buf, mapping),
5954                                          bp->rx_buf_size,
5955                                          PCI_DMA_FROMDEVICE);
5956
5957                         rx_buf->skb = NULL;
5958                         dev_kfree_skb(skb);
5959                 }
5960                 if (!fp->disable_tpa)
5961                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5962                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5963                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5964         }
5965 }
5966
5967 static void bnx2x_free_skbs(struct bnx2x *bp)
5968 {
5969         bnx2x_free_tx_skbs(bp);
5970         bnx2x_free_rx_skbs(bp);
5971 }
5972
5973 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5974 {
5975         int i, offset = 1;
5976
5977         free_irq(bp->msix_table[0].vector, bp->dev);
5978         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5979            bp->msix_table[0].vector);
5980
5981         for_each_queue(bp, i) {
5982                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5983                    "state %x\n", i, bp->msix_table[i + offset].vector,
5984                    bnx2x_fp(bp, i, state));
5985
5986                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5987                         BNX2X_ERR("IRQ of fp #%d being freed while "
5988                                   "state != closed\n", i);
5989
5990                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5991         }
5992 }
5993
5994 static void bnx2x_free_irq(struct bnx2x *bp)
5995 {
5996         if (bp->flags & USING_MSIX_FLAG) {
5997                 bnx2x_free_msix_irqs(bp);
5998                 pci_disable_msix(bp->pdev);
5999                 bp->flags &= ~USING_MSIX_FLAG;
6000
6001         } else
6002                 free_irq(bp->pdev->irq, bp->dev);
6003 }
6004
6005 static int bnx2x_enable_msix(struct bnx2x *bp)
6006 {
6007         int i, rc, offset;
6008
6009         bp->msix_table[0].entry = 0;
6010         offset = 1;
6011         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6012
6013         for_each_queue(bp, i) {
6014                 int igu_vec = offset + i + BP_L_ID(bp);
6015
6016                 bp->msix_table[i + offset].entry = igu_vec;
6017                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6018                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6019         }
6020
6021         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6022                              bp->num_queues + offset);
6023         if (rc) {
6024                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6025                 return -1;
6026         }
6027         bp->flags |= USING_MSIX_FLAG;
6028
6029         return 0;
6030 }
6031
6032 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6033 {
6034         int i, rc, offset = 1;
6035
6036         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6037                          bp->dev->name, bp->dev);
6038         if (rc) {
6039                 BNX2X_ERR("request sp irq failed\n");
6040                 return -EBUSY;
6041         }
6042
6043         for_each_queue(bp, i) {
6044                 rc = request_irq(bp->msix_table[i + offset].vector,
6045                                  bnx2x_msix_fp_int, 0,
6046                                  bp->dev->name, &bp->fp[i]);
6047                 if (rc) {
6048                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6049                                   i + offset, -rc);
6050                         bnx2x_free_msix_irqs(bp);
6051                         return -EBUSY;
6052                 }
6053
6054                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6055         }
6056
6057         return 0;
6058 }
6059
6060 static int bnx2x_req_irq(struct bnx2x *bp)
6061 {
6062         int rc;
6063
6064         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6065                          bp->dev->name, bp->dev);
6066         if (!rc)
6067                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6068
6069         return rc;
6070 }
6071
6072 static void bnx2x_napi_enable(struct bnx2x *bp)
6073 {
6074         int i;
6075
6076         for_each_queue(bp, i)
6077                 napi_enable(&bnx2x_fp(bp, i, napi));
6078 }
6079
6080 static void bnx2x_napi_disable(struct bnx2x *bp)
6081 {
6082         int i;
6083
6084         for_each_queue(bp, i)
6085                 napi_disable(&bnx2x_fp(bp, i, napi));
6086 }
6087
6088 static void bnx2x_netif_start(struct bnx2x *bp)
6089 {
6090         if (atomic_dec_and_test(&bp->intr_sem)) {
6091                 if (netif_running(bp->dev)) {
6092                         if (bp->state == BNX2X_STATE_OPEN)
6093                                 netif_wake_queue(bp->dev);
6094                         bnx2x_napi_enable(bp);
6095                         bnx2x_int_enable(bp);
6096                 }
6097         }
6098 }
6099
6100 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6101 {
6102         bnx2x_int_disable_sync(bp, disable_hw);
6103         if (netif_running(bp->dev)) {
6104                 bnx2x_napi_disable(bp);
6105                 netif_tx_disable(bp->dev);
6106                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6107         }
6108 }
6109
6110 /*
6111  * Init service functions
6112  */
6113
6114 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6115 {
6116         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6117         int port = BP_PORT(bp);
6118
6119         /* CAM allocation
6120          * unicasts 0-31:port0 32-63:port1
6121          * multicast 64-127:port0 128-191:port1
6122          */
6123         config->hdr.length_6b = 2;
6124         config->hdr.offset = port ? 31 : 0;
6125         config->hdr.client_id = BP_CL_ID(bp);
6126         config->hdr.reserved1 = 0;
6127
6128         /* primary MAC */
6129         config->config_table[0].cam_entry.msb_mac_addr =
6130                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6131         config->config_table[0].cam_entry.middle_mac_addr =
6132                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6133         config->config_table[0].cam_entry.lsb_mac_addr =
6134                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6135         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6136         if (set)
6137                 config->config_table[0].target_table_entry.flags = 0;
6138         else
6139                 CAM_INVALIDATE(config->config_table[0]);
6140         config->config_table[0].target_table_entry.client_id = 0;
6141         config->config_table[0].target_table_entry.vlan_id = 0;
6142
6143         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6144            (set ? "setting" : "clearing"),
6145            config->config_table[0].cam_entry.msb_mac_addr,
6146            config->config_table[0].cam_entry.middle_mac_addr,
6147            config->config_table[0].cam_entry.lsb_mac_addr);
6148
6149         /* broadcast */
6150         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6151         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6152         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6153         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6154         if (set)
6155                 config->config_table[1].target_table_entry.flags =
6156                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6157         else
6158                 CAM_INVALIDATE(config->config_table[1]);
6159         config->config_table[1].target_table_entry.client_id = 0;
6160         config->config_table[1].target_table_entry.vlan_id = 0;
6161
6162         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6163                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6164                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6165 }
6166
6167 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6168 {
6169         struct mac_configuration_cmd_e1h *config =
6170                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6171
6172         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6173                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6174                 return;
6175         }
6176
6177         /* CAM allocation for E1H
6178          * unicasts: by func number
6179          * multicast: 20+FUNC*20, 20 each
6180          */
6181         config->hdr.length_6b = 1;
6182         config->hdr.offset = BP_FUNC(bp);
6183         config->hdr.client_id = BP_CL_ID(bp);
6184         config->hdr.reserved1 = 0;
6185
6186         /* primary MAC */
6187         config->config_table[0].msb_mac_addr =
6188                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6189         config->config_table[0].middle_mac_addr =
6190                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6191         config->config_table[0].lsb_mac_addr =
6192                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6193         config->config_table[0].client_id = BP_L_ID(bp);
6194         config->config_table[0].vlan_id = 0;
6195         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6196         if (set)
6197                 config->config_table[0].flags = BP_PORT(bp);
6198         else
6199                 config->config_table[0].flags =
6200                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6201
6202         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6203            (set ? "setting" : "clearing"),
6204            config->config_table[0].msb_mac_addr,
6205            config->config_table[0].middle_mac_addr,
6206            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6207
6208         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6209                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6210                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6211 }
6212
6213 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6214                              int *state_p, int poll)
6215 {
6216         /* can take a while if any port is running */
6217         int cnt = 500;
6218
6219         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6220            poll ? "polling" : "waiting", state, idx);
6221
6222         might_sleep();
6223         while (cnt--) {
6224                 if (poll) {
6225                         bnx2x_rx_int(bp->fp, 10);
6226                         /* if index is different from 0
6227                          * the reply for some commands will
6228                          * be on the non default queue
6229                          */
6230                         if (idx)
6231                                 bnx2x_rx_int(&bp->fp[idx], 10);
6232                 }
6233
6234                 mb(); /* state is changed by bnx2x_sp_event() */
6235                 if (*state_p == state)
6236                         return 0;
6237
6238                 msleep(1);
6239         }
6240
6241         /* timeout! */
6242         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6243                   poll ? "polling" : "waiting", state, idx);
6244 #ifdef BNX2X_STOP_ON_ERROR
6245         bnx2x_panic();
6246 #endif
6247
6248         return -EBUSY;
6249 }
6250
6251 static int bnx2x_setup_leading(struct bnx2x *bp)
6252 {
6253         int rc;
6254
6255         /* reset IGU state */
6256         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6257
6258         /* SETUP ramrod */
6259         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6260
6261         /* Wait for completion */
6262         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6263
6264         return rc;
6265 }
6266
6267 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6268 {
6269         /* reset IGU state */
6270         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6271
6272         /* SETUP ramrod */
6273         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6274         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6275
6276         /* Wait for completion */
6277         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6278                                  &(bp->fp[index].state), 0);
6279 }
6280
6281 static int bnx2x_poll(struct napi_struct *napi, int budget);
6282 static void bnx2x_set_rx_mode(struct net_device *dev);
6283
6284 /* must be called with rtnl_lock */
6285 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6286 {
6287         u32 load_code;
6288         int i, rc;
6289 #ifdef BNX2X_STOP_ON_ERROR
6290         if (unlikely(bp->panic))
6291                 return -EPERM;
6292 #endif
6293
6294         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6295
6296         /* Send LOAD_REQUEST command to MCP
6297            Returns the type of LOAD command:
6298            if it is the first port to be initialized
6299            common blocks should be initialized, otherwise - not
6300         */
6301         if (!BP_NOMCP(bp)) {
6302                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6303                 if (!load_code) {
6304                         BNX2X_ERR("MCP response failure, aborting\n");
6305                         return -EBUSY;
6306                 }
6307                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6308                         return -EBUSY; /* other port in diagnostic mode */
6309
6310         } else {
6311                 int port = BP_PORT(bp);
6312
6313                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6314                    load_count[0], load_count[1], load_count[2]);
6315                 load_count[0]++;
6316                 load_count[1 + port]++;
6317                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6318                    load_count[0], load_count[1], load_count[2]);
6319                 if (load_count[0] == 1)
6320                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6321                 else if (load_count[1 + port] == 1)
6322                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6323                 else
6324                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6325         }
6326
6327         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6328             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6329                 bp->port.pmf = 1;
6330         else
6331                 bp->port.pmf = 0;
6332         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6333
6334         /* if we can't use MSI-X we only need one fp,
6335          * so try to enable MSI-X with the requested number of fp's
6336          * and fallback to inta with one fp
6337          */
6338         if (use_inta) {
6339                 bp->num_queues = 1;
6340
6341         } else {
6342                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6343                         /* user requested number */
6344                         bp->num_queues = use_multi;
6345
6346                 else if (use_multi)
6347                         bp->num_queues = min_t(u32, num_online_cpus(),
6348                                                BP_MAX_QUEUES(bp));
6349                 else
6350                         bp->num_queues = 1;
6351
6352                 if (bnx2x_enable_msix(bp)) {
6353                         /* failed to enable MSI-X */
6354                         bp->num_queues = 1;
6355                         if (use_multi)
6356                                 BNX2X_ERR("Multi requested but failed"
6357                                           " to enable MSI-X\n");
6358                 }
6359         }
6360         DP(NETIF_MSG_IFUP,
6361            "set number of queues to %d\n", bp->num_queues);
6362
6363         if (bnx2x_alloc_mem(bp))
6364                 return -ENOMEM;
6365
6366         for_each_queue(bp, i)
6367                 bnx2x_fp(bp, i, disable_tpa) =
6368                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6369
6370         if (bp->flags & USING_MSIX_FLAG) {
6371                 rc = bnx2x_req_msix_irqs(bp);
6372                 if (rc) {
6373                         pci_disable_msix(bp->pdev);
6374                         goto load_error;
6375                 }
6376         } else {
6377                 bnx2x_ack_int(bp);
6378                 rc = bnx2x_req_irq(bp);
6379                 if (rc) {
6380                         BNX2X_ERR("IRQ request failed, aborting\n");
6381                         goto load_error;
6382                 }
6383         }
6384
6385         for_each_queue(bp, i)
6386                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6387                                bnx2x_poll, 128);
6388
6389         /* Initialize HW */
6390         rc = bnx2x_init_hw(bp, load_code);
6391         if (rc) {
6392                 BNX2X_ERR("HW init failed, aborting\n");
6393                 goto load_int_disable;
6394         }
6395
6396         /* Setup NIC internals and enable interrupts */
6397         bnx2x_nic_init(bp, load_code);
6398
6399         /* Send LOAD_DONE command to MCP */
6400         if (!BP_NOMCP(bp)) {
6401                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6402                 if (!load_code) {
6403                         BNX2X_ERR("MCP response failure, aborting\n");
6404                         rc = -EBUSY;
6405                         goto load_rings_free;
6406                 }
6407         }
6408
6409         bnx2x_stats_init(bp);
6410
6411         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6412
6413         /* Enable Rx interrupt handling before sending the ramrod
6414            as it's completed on Rx FP queue */
6415         bnx2x_napi_enable(bp);
6416
6417         /* Enable interrupt handling */
6418         atomic_set(&bp->intr_sem, 0);
6419
6420         rc = bnx2x_setup_leading(bp);
6421         if (rc) {
6422                 BNX2X_ERR("Setup leading failed!\n");
6423                 goto load_netif_stop;
6424         }
6425
6426         if (CHIP_IS_E1H(bp))
6427                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6428                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6429                         bp->state = BNX2X_STATE_DISABLED;
6430                 }
6431
6432         if (bp->state == BNX2X_STATE_OPEN)
6433                 for_each_nondefault_queue(bp, i) {
6434                         rc = bnx2x_setup_multi(bp, i);
6435                         if (rc)
6436                                 goto load_netif_stop;
6437                 }
6438
6439         if (CHIP_IS_E1(bp))
6440                 bnx2x_set_mac_addr_e1(bp, 1);
6441         else
6442                 bnx2x_set_mac_addr_e1h(bp, 1);
6443
6444         if (bp->port.pmf)
6445                 bnx2x_initial_phy_init(bp);
6446
6447         /* Start fast path */
6448         switch (load_mode) {
6449         case LOAD_NORMAL:
6450                 /* Tx queue should be only reenabled */
6451                 netif_wake_queue(bp->dev);
6452                 bnx2x_set_rx_mode(bp->dev);
6453                 break;
6454
6455         case LOAD_OPEN:
6456                 netif_start_queue(bp->dev);
6457                 bnx2x_set_rx_mode(bp->dev);
6458                 if (bp->flags & USING_MSIX_FLAG)
6459                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6460                                bp->dev->name);
6461                 break;
6462
6463         case LOAD_DIAG:
6464                 bnx2x_set_rx_mode(bp->dev);
6465                 bp->state = BNX2X_STATE_DIAG;
6466                 break;
6467
6468         default:
6469                 break;
6470         }
6471
6472         if (!bp->port.pmf)
6473                 bnx2x__link_status_update(bp);
6474
6475         /* start the timer */
6476         mod_timer(&bp->timer, jiffies + bp->current_interval);
6477
6478
6479         return 0;
6480
6481 load_netif_stop:
6482         bnx2x_napi_disable(bp);
6483 load_rings_free:
6484         /* Free SKBs, SGEs, TPA pool and driver internals */
6485         bnx2x_free_skbs(bp);
6486         for_each_queue(bp, i)
6487                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6488 load_int_disable:
6489         bnx2x_int_disable_sync(bp, 1);
6490         /* Release IRQs */
6491         bnx2x_free_irq(bp);
6492 load_error:
6493         bnx2x_free_mem(bp);
6494         bp->port.pmf = 0;
6495
6496         /* TBD we really need to reset the chip
6497            if we want to recover from this */
6498         return rc;
6499 }
6500
6501 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6502 {
6503         int rc;
6504
6505         /* halt the connection */
6506         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6507         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6508
6509         /* Wait for completion */
6510         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6511                                &(bp->fp[index].state), 1);
6512         if (rc) /* timeout */
6513                 return rc;
6514
6515         /* delete cfc entry */
6516         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6517
6518         /* Wait for completion */
6519         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6520                                &(bp->fp[index].state), 1);
6521         return rc;
6522 }
6523
6524 static int bnx2x_stop_leading(struct bnx2x *bp)
6525 {
6526         u16 dsb_sp_prod_idx;
6527         /* if the other port is handling traffic,
6528            this can take a lot of time */
6529         int cnt = 500;
6530         int rc;
6531
6532         might_sleep();
6533
6534         /* Send HALT ramrod */
6535         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6536         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6537
6538         /* Wait for completion */
6539         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6540                                &(bp->fp[0].state), 1);
6541         if (rc) /* timeout */
6542                 return rc;
6543
6544         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6545
6546         /* Send PORT_DELETE ramrod */
6547         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6548
6549         /* Wait for completion to arrive on default status block
6550            we are going to reset the chip anyway
6551            so there is not much to do if this times out
6552          */
6553         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6554                 if (!cnt) {
6555                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6556                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6557                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6558 #ifdef BNX2X_STOP_ON_ERROR
6559                         bnx2x_panic();
6560 #else
6561                         rc = -EBUSY;
6562 #endif
6563                         break;
6564                 }
6565                 cnt--;
6566                 msleep(1);
6567         }
6568         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6569         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6570
6571         return rc;
6572 }
6573
6574 static void bnx2x_reset_func(struct bnx2x *bp)
6575 {
6576         int port = BP_PORT(bp);
6577         int func = BP_FUNC(bp);
6578         int base, i;
6579
6580         /* Configure IGU */
6581         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6582         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6583
6584         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6585
6586         /* Clear ILT */
6587         base = FUNC_ILT_BASE(func);
6588         for (i = base; i < base + ILT_PER_FUNC; i++)
6589                 bnx2x_ilt_wr(bp, i, 0);
6590 }
6591
6592 static void bnx2x_reset_port(struct bnx2x *bp)
6593 {
6594         int port = BP_PORT(bp);
6595         u32 val;
6596
6597         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6598
6599         /* Do not rcv packets to BRB */
6600         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6601         /* Do not direct rcv packets that are not for MCP to the BRB */
6602         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6603                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6604
6605         /* Configure AEU */
6606         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6607
6608         msleep(100);
6609         /* Check for BRB port occupancy */
6610         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6611         if (val)
6612                 DP(NETIF_MSG_IFDOWN,
6613                    "BRB1 is not empty  %d blocks are occupied\n", val);
6614
6615         /* TODO: Close Doorbell port? */
6616 }
6617
6618 static void bnx2x_reset_common(struct bnx2x *bp)
6619 {
6620         /* reset_common */
6621         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6622                0xd3ffff7f);
6623         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6624 }
6625
6626 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6627 {
6628         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6629            BP_FUNC(bp), reset_code);
6630
6631         switch (reset_code) {
6632         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6633                 bnx2x_reset_port(bp);
6634                 bnx2x_reset_func(bp);
6635                 bnx2x_reset_common(bp);
6636                 break;
6637
6638         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6639                 bnx2x_reset_port(bp);
6640                 bnx2x_reset_func(bp);
6641                 break;
6642
6643         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6644                 bnx2x_reset_func(bp);
6645                 break;
6646
6647         default:
6648                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6649                 break;
6650         }
6651 }
6652
6653 /* must be called with rtnl_lock */
6654 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6655 {
6656         int port = BP_PORT(bp);
6657         u32 reset_code = 0;
6658         int i, cnt, rc;
6659
6660         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6661
6662         bp->rx_mode = BNX2X_RX_MODE_NONE;
6663         bnx2x_set_storm_rx_mode(bp);
6664
6665         bnx2x_netif_stop(bp, 1);
6666         if (!netif_running(bp->dev))
6667                 bnx2x_napi_disable(bp);
6668         del_timer_sync(&bp->timer);
6669         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6670                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6671         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6672
6673         /* Wait until tx fast path tasks complete */
6674         for_each_queue(bp, i) {
6675                 struct bnx2x_fastpath *fp = &bp->fp[i];
6676
6677                 cnt = 1000;
6678                 smp_rmb();
6679                 while (BNX2X_HAS_TX_WORK(fp)) {
6680
6681                         bnx2x_tx_int(fp, 1000);
6682                         if (!cnt) {
6683                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6684                                           i);
6685 #ifdef BNX2X_STOP_ON_ERROR
6686                                 bnx2x_panic();
6687                                 return -EBUSY;
6688 #else
6689                                 break;
6690 #endif
6691                         }
6692                         cnt--;
6693                         msleep(1);
6694                         smp_rmb();
6695                 }
6696         }
6697         /* Give HW time to discard old tx messages */
6698         msleep(1);
6699
6700         /* Release IRQs */
6701         bnx2x_free_irq(bp);
6702
6703         if (CHIP_IS_E1(bp)) {
6704                 struct mac_configuration_cmd *config =
6705                                                 bnx2x_sp(bp, mcast_config);
6706
6707                 bnx2x_set_mac_addr_e1(bp, 0);
6708
6709                 for (i = 0; i < config->hdr.length_6b; i++)
6710                         CAM_INVALIDATE(config->config_table[i]);
6711
6712                 config->hdr.length_6b = i;
6713                 if (CHIP_REV_IS_SLOW(bp))
6714                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6715                 else
6716                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6717                 config->hdr.client_id = BP_CL_ID(bp);
6718                 config->hdr.reserved1 = 0;
6719
6720                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6721                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6722                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6723
6724         } else { /* E1H */
6725                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6726
6727                 bnx2x_set_mac_addr_e1h(bp, 0);
6728
6729                 for (i = 0; i < MC_HASH_SIZE; i++)
6730                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6731         }
6732
6733         if (unload_mode == UNLOAD_NORMAL)
6734                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6735
6736         else if (bp->flags & NO_WOL_FLAG) {
6737                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6738                 if (CHIP_IS_E1H(bp))
6739                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6740
6741         } else if (bp->wol) {
6742                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6743                 u8 *mac_addr = bp->dev->dev_addr;
6744                 u32 val;
6745                 /* The mac address is written to entries 1-4 to
6746                    preserve entry 0 which is used by the PMF */
6747                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6748
6749                 val = (mac_addr[0] << 8) | mac_addr[1];
6750                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6751
6752                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6753                       (mac_addr[4] << 8) | mac_addr[5];
6754                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6755
6756                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6757
6758         } else
6759                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6760
6761         /* Close multi and leading connections
6762            Completions for ramrods are collected in a synchronous way */
6763         for_each_nondefault_queue(bp, i)
6764                 if (bnx2x_stop_multi(bp, i))
6765                         goto unload_error;
6766
6767         rc = bnx2x_stop_leading(bp);
6768         if (rc) {
6769                 BNX2X_ERR("Stop leading failed!\n");
6770 #ifdef BNX2X_STOP_ON_ERROR
6771                 return -EBUSY;
6772 #else
6773                 goto unload_error;
6774 #endif
6775         }
6776
6777 unload_error:
6778         if (!BP_NOMCP(bp))
6779                 reset_code = bnx2x_fw_command(bp, reset_code);
6780         else {
6781                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6782                    load_count[0], load_count[1], load_count[2]);
6783                 load_count[0]--;
6784                 load_count[1 + port]--;
6785                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6786                    load_count[0], load_count[1], load_count[2]);
6787                 if (load_count[0] == 0)
6788                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6789                 else if (load_count[1 + port] == 0)
6790                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6791                 else
6792                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6793         }
6794
6795         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6796             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6797                 bnx2x__link_reset(bp);
6798
6799         /* Reset the chip */
6800         bnx2x_reset_chip(bp, reset_code);
6801
6802         /* Report UNLOAD_DONE to MCP */
6803         if (!BP_NOMCP(bp))
6804                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6805         bp->port.pmf = 0;
6806
6807         /* Free SKBs, SGEs, TPA pool and driver internals */
6808         bnx2x_free_skbs(bp);
6809         for_each_queue(bp, i)
6810                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6811         bnx2x_free_mem(bp);
6812
6813         bp->state = BNX2X_STATE_CLOSED;
6814
6815         netif_carrier_off(bp->dev);
6816
6817         return 0;
6818 }
6819
6820 static void bnx2x_reset_task(struct work_struct *work)
6821 {
6822         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6823
6824 #ifdef BNX2X_STOP_ON_ERROR
6825         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6826                   " so reset not done to allow debug dump,\n"
6827          KERN_ERR " you will need to reboot when done\n");
6828         return;
6829 #endif
6830
6831         rtnl_lock();
6832
6833         if (!netif_running(bp->dev))
6834                 goto reset_task_exit;
6835
6836         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6837         bnx2x_nic_load(bp, LOAD_NORMAL);
6838
6839 reset_task_exit:
6840         rtnl_unlock();
6841 }
6842
6843 /* end of nic load/unload */
6844
6845 /* ethtool_ops */
6846
6847 /*
6848  * Init service functions
6849  */
6850
6851 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6852 {
6853         u32 val;
6854
6855         /* Check if there is any driver already loaded */
6856         val = REG_RD(bp, MISC_REG_UNPREPARED);
6857         if (val == 0x1) {
6858                 /* Check if it is the UNDI driver
6859                  * UNDI driver initializes CID offset for normal bell to 0x7
6860                  */
6861                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6862                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6863                 if (val == 0x7)
6864                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6865                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6866
6867                 if (val == 0x7) {
6868                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6869                         /* save our func */
6870                         int func = BP_FUNC(bp);
6871                         u32 swap_en;
6872                         u32 swap_val;
6873
6874                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6875
6876                         /* try unload UNDI on port 0 */
6877                         bp->func = 0;
6878                         bp->fw_seq =
6879                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6880                                 DRV_MSG_SEQ_NUMBER_MASK);
6881                         reset_code = bnx2x_fw_command(bp, reset_code);
6882
6883                         /* if UNDI is loaded on the other port */
6884                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6885
6886                                 /* send "DONE" for previous unload */
6887                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6888
6889                                 /* unload UNDI on port 1 */
6890                                 bp->func = 1;
6891                                 bp->fw_seq =
6892                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6893                                         DRV_MSG_SEQ_NUMBER_MASK);
6894                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6895
6896                                 bnx2x_fw_command(bp, reset_code);
6897                         }
6898
6899                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6900                                     HC_REG_CONFIG_0), 0x1000);
6901
6902                         /* close input traffic and wait for it */
6903                         /* Do not rcv packets to BRB */
6904                         REG_WR(bp,
6905                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6906                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6907                         /* Do not direct rcv packets that are not for MCP to
6908                          * the BRB */
6909                         REG_WR(bp,
6910                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6911                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6912                         /* clear AEU */
6913                         REG_WR(bp,
6914                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6915                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6916                         msleep(10);
6917
6918                         /* save NIG port swap info */
6919                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6920                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6921                         /* reset device */
6922                         REG_WR(bp,
6923                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6924                                0xd3ffffff);
6925                         REG_WR(bp,
6926                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6927                                0x1403);
6928                         /* take the NIG out of reset and restore swap values */
6929                         REG_WR(bp,
6930                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6931                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6932                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6933                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6934
6935                         /* send unload done to the MCP */
6936                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6937
6938                         /* restore our func and fw_seq */
6939                         bp->func = func;
6940                         bp->fw_seq =
6941                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6942                                 DRV_MSG_SEQ_NUMBER_MASK);
6943                 }
6944         }
6945 }
6946
6947 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6948 {
6949         u32 val, val2, val3, val4, id;
6950         u16 pmc;
6951
6952         /* Get the chip revision id and number. */
6953         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6954         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6955         id = ((val & 0xffff) << 16);
6956         val = REG_RD(bp, MISC_REG_CHIP_REV);
6957         id |= ((val & 0xf) << 12);
6958         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6959         id |= ((val & 0xff) << 4);
6960         REG_RD(bp, MISC_REG_BOND_ID);
6961         id |= (val & 0xf);
6962         bp->common.chip_id = id;
6963         bp->link_params.chip_id = bp->common.chip_id;
6964         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6965
6966         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6967         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6968                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6969         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6970                        bp->common.flash_size, bp->common.flash_size);
6971
6972         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6973         bp->link_params.shmem_base = bp->common.shmem_base;
6974         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6975
6976         if (!bp->common.shmem_base ||
6977             (bp->common.shmem_base < 0xA0000) ||
6978             (bp->common.shmem_base >= 0xC0000)) {
6979                 BNX2X_DEV_INFO("MCP not active\n");
6980                 bp->flags |= NO_MCP_FLAG;
6981                 return;
6982         }
6983
6984         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6985         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6986                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6987                 BNX2X_ERR("BAD MCP validity signature\n");
6988
6989         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6990         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6991
6992         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6993                        bp->common.hw_config, bp->common.board);
6994
6995         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6996                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6997                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6998
6999         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7000         bp->common.bc_ver = val;
7001         BNX2X_DEV_INFO("bc_ver %X\n", val);
7002         if (val < BNX2X_BC_VER) {
7003                 /* for now only warn
7004                  * later we might need to enforce this */
7005                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7006                           " please upgrade BC\n", BNX2X_BC_VER, val);
7007         }
7008
7009         if (BP_E1HVN(bp) == 0) {
7010                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7011                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7012         } else {
7013                 /* no WOL capability for E1HVN != 0 */
7014                 bp->flags |= NO_WOL_FLAG;
7015         }
7016         BNX2X_DEV_INFO("%sWoL capable\n",
7017                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7018
7019         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7020         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7021         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7022         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7023
7024         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7025                val, val2, val3, val4);
7026 }
7027
7028 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7029                                                     u32 switch_cfg)
7030 {
7031         int port = BP_PORT(bp);
7032         u32 ext_phy_type;
7033
7034         switch (switch_cfg) {
7035         case SWITCH_CFG_1G:
7036                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7037
7038                 ext_phy_type =
7039                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7040                 switch (ext_phy_type) {
7041                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7042                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7043                                        ext_phy_type);
7044
7045                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7046                                                SUPPORTED_10baseT_Full |
7047                                                SUPPORTED_100baseT_Half |
7048                                                SUPPORTED_100baseT_Full |
7049                                                SUPPORTED_1000baseT_Full |
7050                                                SUPPORTED_2500baseX_Full |
7051                                                SUPPORTED_TP |
7052                                                SUPPORTED_FIBRE |
7053                                                SUPPORTED_Autoneg |
7054                                                SUPPORTED_Pause |
7055                                                SUPPORTED_Asym_Pause);
7056                         break;
7057
7058                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7059                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7060                                        ext_phy_type);
7061
7062                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7063                                                SUPPORTED_10baseT_Full |
7064                                                SUPPORTED_100baseT_Half |
7065                                                SUPPORTED_100baseT_Full |
7066                                                SUPPORTED_1000baseT_Full |
7067                                                SUPPORTED_TP |
7068                                                SUPPORTED_FIBRE |
7069                                                SUPPORTED_Autoneg |
7070                                                SUPPORTED_Pause |
7071                                                SUPPORTED_Asym_Pause);
7072                         break;
7073
7074                 default:
7075                         BNX2X_ERR("NVRAM config error. "
7076                                   "BAD SerDes ext_phy_config 0x%x\n",
7077                                   bp->link_params.ext_phy_config);
7078                         return;
7079                 }
7080
7081                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7082                                            port*0x10);
7083                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7084                 break;
7085
7086         case SWITCH_CFG_10G:
7087                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7088
7089                 ext_phy_type =
7090                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7091                 switch (ext_phy_type) {
7092                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7093                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7094                                        ext_phy_type);
7095
7096                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7097                                                SUPPORTED_10baseT_Full |
7098                                                SUPPORTED_100baseT_Half |
7099                                                SUPPORTED_100baseT_Full |
7100                                                SUPPORTED_1000baseT_Full |
7101                                                SUPPORTED_2500baseX_Full |
7102                                                SUPPORTED_10000baseT_Full |
7103                                                SUPPORTED_TP |
7104                                                SUPPORTED_FIBRE |
7105                                                SUPPORTED_Autoneg |
7106                                                SUPPORTED_Pause |
7107                                                SUPPORTED_Asym_Pause);
7108                         break;
7109
7110                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7111                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7112                                        ext_phy_type);
7113
7114                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7115                                                SUPPORTED_FIBRE |
7116                                                SUPPORTED_Pause |
7117                                                SUPPORTED_Asym_Pause);
7118                         break;
7119
7120                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7121                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7122                                        ext_phy_type);
7123
7124                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125                                                SUPPORTED_1000baseT_Full |
7126                                                SUPPORTED_FIBRE |
7127                                                SUPPORTED_Pause |
7128                                                SUPPORTED_Asym_Pause);
7129                         break;
7130
7131                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7132                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7133                                        ext_phy_type);
7134
7135                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7136                                                SUPPORTED_1000baseT_Full |
7137                                                SUPPORTED_FIBRE |
7138                                                SUPPORTED_Autoneg |
7139                                                SUPPORTED_Pause |
7140                                                SUPPORTED_Asym_Pause);
7141                         break;
7142
7143                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7144                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7145                                        ext_phy_type);
7146
7147                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7148                                                SUPPORTED_2500baseX_Full |
7149                                                SUPPORTED_1000baseT_Full |
7150                                                SUPPORTED_FIBRE |
7151                                                SUPPORTED_Autoneg |
7152                                                SUPPORTED_Pause |
7153                                                SUPPORTED_Asym_Pause);
7154                         break;
7155
7156                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7157                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7158                                        ext_phy_type);
7159
7160                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7161                                                SUPPORTED_TP |
7162                                                SUPPORTED_Autoneg |
7163                                                SUPPORTED_Pause |
7164                                                SUPPORTED_Asym_Pause);
7165                         break;
7166
7167                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7168                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7169                                   bp->link_params.ext_phy_config);
7170                         break;
7171
7172                 default:
7173                         BNX2X_ERR("NVRAM config error. "
7174                                   "BAD XGXS ext_phy_config 0x%x\n",
7175                                   bp->link_params.ext_phy_config);
7176                         return;
7177                 }
7178
7179                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7180                                            port*0x18);
7181                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7182
7183                 break;
7184
7185         default:
7186                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7187                           bp->port.link_config);
7188                 return;
7189         }
7190         bp->link_params.phy_addr = bp->port.phy_addr;
7191
7192         /* mask what we support according to speed_cap_mask */
7193         if (!(bp->link_params.speed_cap_mask &
7194                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7195                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7196
7197         if (!(bp->link_params.speed_cap_mask &
7198                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7199                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7200
7201         if (!(bp->link_params.speed_cap_mask &
7202                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7203                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7204
7205         if (!(bp->link_params.speed_cap_mask &
7206                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7207                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7208
7209         if (!(bp->link_params.speed_cap_mask &
7210                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7211                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7212                                         SUPPORTED_1000baseT_Full);
7213
7214         if (!(bp->link_params.speed_cap_mask &
7215                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7216                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7217
7218         if (!(bp->link_params.speed_cap_mask &
7219                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7220                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7221
7222         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7223 }
7224
7225 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7226 {
7227         bp->link_params.req_duplex = DUPLEX_FULL;
7228
7229         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7230         case PORT_FEATURE_LINK_SPEED_AUTO:
7231                 if (bp->port.supported & SUPPORTED_Autoneg) {
7232                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7233                         bp->port.advertising = bp->port.supported;
7234                 } else {
7235                         u32 ext_phy_type =
7236                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7237
7238                         if ((ext_phy_type ==
7239                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7240                             (ext_phy_type ==
7241                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7242                                 /* force 10G, no AN */
7243                                 bp->link_params.req_line_speed = SPEED_10000;
7244                                 bp->port.advertising =
7245                                                 (ADVERTISED_10000baseT_Full |
7246                                                  ADVERTISED_FIBRE);
7247                                 break;
7248                         }
7249                         BNX2X_ERR("NVRAM config error. "
7250                                   "Invalid link_config 0x%x"
7251                                   "  Autoneg not supported\n",
7252                                   bp->port.link_config);
7253                         return;
7254                 }
7255                 break;
7256
7257         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7258                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7259                         bp->link_params.req_line_speed = SPEED_10;
7260                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7261                                                 ADVERTISED_TP);
7262                 } else {
7263                         BNX2X_ERR("NVRAM config error. "
7264                                   "Invalid link_config 0x%x"
7265                                   "  speed_cap_mask 0x%x\n",
7266                                   bp->port.link_config,
7267                                   bp->link_params.speed_cap_mask);
7268                         return;
7269                 }
7270                 break;
7271
7272         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7273                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7274                         bp->link_params.req_line_speed = SPEED_10;
7275                         bp->link_params.req_duplex = DUPLEX_HALF;
7276                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7277                                                 ADVERTISED_TP);
7278                 } else {
7279                         BNX2X_ERR("NVRAM config error. "
7280                                   "Invalid link_config 0x%x"
7281                                   "  speed_cap_mask 0x%x\n",
7282                                   bp->port.link_config,
7283                                   bp->link_params.speed_cap_mask);
7284                         return;
7285                 }
7286                 break;
7287
7288         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7289                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7290                         bp->link_params.req_line_speed = SPEED_100;
7291                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7292                                                 ADVERTISED_TP);
7293                 } else {
7294                         BNX2X_ERR("NVRAM config error. "
7295                                   "Invalid link_config 0x%x"
7296                                   "  speed_cap_mask 0x%x\n",
7297                                   bp->port.link_config,
7298                                   bp->link_params.speed_cap_mask);
7299                         return;
7300                 }
7301                 break;
7302
7303         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7304                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7305                         bp->link_params.req_line_speed = SPEED_100;
7306                         bp->link_params.req_duplex = DUPLEX_HALF;
7307                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7308                                                 ADVERTISED_TP);
7309                 } else {
7310                         BNX2X_ERR("NVRAM config error. "
7311                                   "Invalid link_config 0x%x"
7312                                   "  speed_cap_mask 0x%x\n",
7313                                   bp->port.link_config,
7314                                   bp->link_params.speed_cap_mask);
7315                         return;
7316                 }
7317                 break;
7318
7319         case PORT_FEATURE_LINK_SPEED_1G:
7320                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7321                         bp->link_params.req_line_speed = SPEED_1000;
7322                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7323                                                 ADVERTISED_TP);
7324                 } else {
7325                         BNX2X_ERR("NVRAM config error. "
7326                                   "Invalid link_config 0x%x"
7327                                   "  speed_cap_mask 0x%x\n",
7328                                   bp->port.link_config,
7329                                   bp->link_params.speed_cap_mask);
7330                         return;
7331                 }
7332                 break;
7333
7334         case PORT_FEATURE_LINK_SPEED_2_5G:
7335                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7336                         bp->link_params.req_line_speed = SPEED_2500;
7337                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7338                                                 ADVERTISED_TP);
7339                 } else {
7340                         BNX2X_ERR("NVRAM config error. "
7341                                   "Invalid link_config 0x%x"
7342                                   "  speed_cap_mask 0x%x\n",
7343                                   bp->port.link_config,
7344                                   bp->link_params.speed_cap_mask);
7345                         return;
7346                 }
7347                 break;
7348
7349         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7350         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7351         case PORT_FEATURE_LINK_SPEED_10G_KR:
7352                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7353                         bp->link_params.req_line_speed = SPEED_10000;
7354                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7355                                                 ADVERTISED_FIBRE);
7356                 } else {
7357                         BNX2X_ERR("NVRAM config error. "
7358                                   "Invalid link_config 0x%x"
7359                                   "  speed_cap_mask 0x%x\n",
7360                                   bp->port.link_config,
7361                                   bp->link_params.speed_cap_mask);
7362                         return;
7363                 }
7364                 break;
7365
7366         default:
7367                 BNX2X_ERR("NVRAM config error. "
7368                           "BAD link speed link_config 0x%x\n",
7369                           bp->port.link_config);
7370                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7371                 bp->port.advertising = bp->port.supported;
7372                 break;
7373         }
7374
7375         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7376                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7377         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7378             !(bp->port.supported & SUPPORTED_Autoneg))
7379                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7380
7381         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7382                        "  advertising 0x%x\n",
7383                        bp->link_params.req_line_speed,
7384                        bp->link_params.req_duplex,
7385                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7386 }
7387
7388 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7389 {
7390         int port = BP_PORT(bp);
7391         u32 val, val2;
7392
7393         bp->link_params.bp = bp;
7394         bp->link_params.port = port;
7395
7396         bp->link_params.serdes_config =
7397                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7398         bp->link_params.lane_config =
7399                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7400         bp->link_params.ext_phy_config =
7401                 SHMEM_RD(bp,
7402                          dev_info.port_hw_config[port].external_phy_config);
7403         bp->link_params.speed_cap_mask =
7404                 SHMEM_RD(bp,
7405                          dev_info.port_hw_config[port].speed_capability_mask);
7406
7407         bp->port.link_config =
7408                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7409
7410         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7411              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7412                        "  link_config 0x%08x\n",
7413                        bp->link_params.serdes_config,
7414                        bp->link_params.lane_config,
7415                        bp->link_params.ext_phy_config,
7416                        bp->link_params.speed_cap_mask, bp->port.link_config);
7417
7418         bp->link_params.switch_cfg = (bp->port.link_config &
7419                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7420         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7421
7422         bnx2x_link_settings_requested(bp);
7423
7424         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7425         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7426         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7427         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7428         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7429         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7430         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7431         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7432         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7433         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7434 }
7435
7436 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7437 {
7438         int func = BP_FUNC(bp);
7439         u32 val, val2;
7440         int rc = 0;
7441
7442         bnx2x_get_common_hwinfo(bp);
7443
7444         bp->e1hov = 0;
7445         bp->e1hmf = 0;
7446         if (CHIP_IS_E1H(bp)) {
7447                 bp->mf_config =
7448                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7449
7450                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7451                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7452                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7453
7454                         bp->e1hov = val;
7455                         bp->e1hmf = 1;
7456                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7457                                        "(0x%04x)\n",
7458                                        func, bp->e1hov, bp->e1hov);
7459                 } else {
7460                         BNX2X_DEV_INFO("Single function mode\n");
7461                         if (BP_E1HVN(bp)) {
7462                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7463                                           "  aborting\n", func);
7464                                 rc = -EPERM;
7465                         }
7466                 }
7467         }
7468
7469         if (!BP_NOMCP(bp)) {
7470                 bnx2x_get_port_hwinfo(bp);
7471
7472                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7473                               DRV_MSG_SEQ_NUMBER_MASK);
7474                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7475         }
7476
7477         if (IS_E1HMF(bp)) {
7478                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7479                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7480                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7481                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7482                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7483                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7484                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7485                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7486                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7487                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7488                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7489                                ETH_ALEN);
7490                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7491                                ETH_ALEN);
7492                 }
7493
7494                 return rc;
7495         }
7496
7497         if (BP_NOMCP(bp)) {
7498                 /* only supposed to happen on emulation/FPGA */
7499                 BNX2X_ERR("warning random MAC workaround active\n");
7500                 random_ether_addr(bp->dev->dev_addr);
7501                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7502         }
7503
7504         return rc;
7505 }
7506
7507 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7508 {
7509         int func = BP_FUNC(bp);
7510         int rc;
7511
7512         /* Disable interrupt handling until HW is initialized */
7513         atomic_set(&bp->intr_sem, 1);
7514
7515         mutex_init(&bp->port.phy_mutex);
7516
7517         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7518         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7519
7520         rc = bnx2x_get_hwinfo(bp);
7521
7522         /* need to reset chip if undi was active */
7523         if (!BP_NOMCP(bp))
7524                 bnx2x_undi_unload(bp);
7525
7526         if (CHIP_REV_IS_FPGA(bp))
7527                 printk(KERN_ERR PFX "FPGA detected\n");
7528
7529         if (BP_NOMCP(bp) && (func == 0))
7530                 printk(KERN_ERR PFX
7531                        "MCP disabled, must load devices in order!\n");
7532
7533         /* Set TPA flags */
7534         if (disable_tpa) {
7535                 bp->flags &= ~TPA_ENABLE_FLAG;
7536                 bp->dev->features &= ~NETIF_F_LRO;
7537         } else {
7538                 bp->flags |= TPA_ENABLE_FLAG;
7539                 bp->dev->features |= NETIF_F_LRO;
7540         }
7541
7542
7543         bp->tx_ring_size = MAX_TX_AVAIL;
7544         bp->rx_ring_size = MAX_RX_AVAIL;
7545
7546         bp->rx_csum = 1;
7547         bp->rx_offset = 0;
7548
7549         bp->tx_ticks = 50;
7550         bp->rx_ticks = 25;
7551
7552         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7553         bp->current_interval = (poll ? poll : bp->timer_interval);
7554
7555         init_timer(&bp->timer);
7556         bp->timer.expires = jiffies + bp->current_interval;
7557         bp->timer.data = (unsigned long) bp;
7558         bp->timer.function = bnx2x_timer;
7559
7560         return rc;
7561 }
7562
7563 /*
7564  * ethtool service functions
7565  */
7566
7567 /* All ethtool functions called with rtnl_lock */
7568
7569 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7570 {
7571         struct bnx2x *bp = netdev_priv(dev);
7572
7573         cmd->supported = bp->port.supported;
7574         cmd->advertising = bp->port.advertising;
7575
7576         if (netif_carrier_ok(dev)) {
7577                 cmd->speed = bp->link_vars.line_speed;
7578                 cmd->duplex = bp->link_vars.duplex;
7579         } else {
7580                 cmd->speed = bp->link_params.req_line_speed;
7581                 cmd->duplex = bp->link_params.req_duplex;
7582         }
7583         if (IS_E1HMF(bp)) {
7584                 u16 vn_max_rate;
7585
7586                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7587                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7588                 if (vn_max_rate < cmd->speed)
7589                         cmd->speed = vn_max_rate;
7590         }
7591
7592         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7593                 u32 ext_phy_type =
7594                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7595
7596                 switch (ext_phy_type) {
7597                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7598                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7599                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7600                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7601                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7602                         cmd->port = PORT_FIBRE;
7603                         break;
7604
7605                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7606                         cmd->port = PORT_TP;
7607                         break;
7608
7609                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7610                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7611                                   bp->link_params.ext_phy_config);
7612                         break;
7613
7614                 default:
7615                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7616                            bp->link_params.ext_phy_config);
7617                         break;
7618                 }
7619         } else
7620                 cmd->port = PORT_TP;
7621
7622         cmd->phy_address = bp->port.phy_addr;
7623         cmd->transceiver = XCVR_INTERNAL;
7624
7625         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7626                 cmd->autoneg = AUTONEG_ENABLE;
7627         else
7628                 cmd->autoneg = AUTONEG_DISABLE;
7629
7630         cmd->maxtxpkt = 0;
7631         cmd->maxrxpkt = 0;
7632
7633         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7634            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7635            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7636            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7637            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7638            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7639            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7640
7641         return 0;
7642 }
7643
7644 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7645 {
7646         struct bnx2x *bp = netdev_priv(dev);
7647         u32 advertising;
7648
7649         if (IS_E1HMF(bp))
7650                 return 0;
7651
7652         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7653            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7654            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7655            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7656            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7657            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7658            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7659
7660         if (cmd->autoneg == AUTONEG_ENABLE) {
7661                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7662                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7663                         return -EINVAL;
7664                 }
7665
7666                 /* advertise the requested speed and duplex if supported */
7667                 cmd->advertising &= bp->port.supported;
7668
7669                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7670                 bp->link_params.req_duplex = DUPLEX_FULL;
7671                 bp->port.advertising |= (ADVERTISED_Autoneg |
7672                                          cmd->advertising);
7673
7674         } else { /* forced speed */
7675                 /* advertise the requested speed and duplex if supported */
7676                 switch (cmd->speed) {
7677                 case SPEED_10:
7678                         if (cmd->duplex == DUPLEX_FULL) {
7679                                 if (!(bp->port.supported &
7680                                       SUPPORTED_10baseT_Full)) {
7681                                         DP(NETIF_MSG_LINK,
7682                                            "10M full not supported\n");
7683                                         return -EINVAL;
7684                                 }
7685
7686                                 advertising = (ADVERTISED_10baseT_Full |
7687                                                ADVERTISED_TP);
7688                         } else {
7689                                 if (!(bp->port.supported &
7690                                       SUPPORTED_10baseT_Half)) {
7691                                         DP(NETIF_MSG_LINK,
7692                                            "10M half not supported\n");
7693                                         return -EINVAL;
7694                                 }
7695
7696                                 advertising = (ADVERTISED_10baseT_Half |
7697                                                ADVERTISED_TP);
7698                         }
7699                         break;
7700
7701                 case SPEED_100:
7702                         if (cmd->duplex == DUPLEX_FULL) {
7703                                 if (!(bp->port.supported &
7704                                                 SUPPORTED_100baseT_Full)) {
7705                                         DP(NETIF_MSG_LINK,
7706                                            "100M full not supported\n");
7707                                         return -EINVAL;
7708                                 }
7709
7710                                 advertising = (ADVERTISED_100baseT_Full |
7711                                                ADVERTISED_TP);
7712                         } else {
7713                                 if (!(bp->port.supported &
7714                                                 SUPPORTED_100baseT_Half)) {
7715                                         DP(NETIF_MSG_LINK,
7716                                            "100M half not supported\n");
7717                                         return -EINVAL;
7718                                 }
7719
7720                                 advertising = (ADVERTISED_100baseT_Half |
7721                                                ADVERTISED_TP);
7722                         }
7723                         break;
7724
7725                 case SPEED_1000:
7726                         if (cmd->duplex != DUPLEX_FULL) {
7727                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7728                                 return -EINVAL;
7729                         }
7730
7731                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7732                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7733                                 return -EINVAL;
7734                         }
7735
7736                         advertising = (ADVERTISED_1000baseT_Full |
7737                                        ADVERTISED_TP);
7738                         break;
7739
7740                 case SPEED_2500:
7741                         if (cmd->duplex != DUPLEX_FULL) {
7742                                 DP(NETIF_MSG_LINK,
7743                                    "2.5G half not supported\n");
7744                                 return -EINVAL;
7745                         }
7746
7747                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7748                                 DP(NETIF_MSG_LINK,
7749                                    "2.5G full not supported\n");
7750                                 return -EINVAL;
7751                         }
7752
7753                         advertising = (ADVERTISED_2500baseX_Full |
7754                                        ADVERTISED_TP);
7755                         break;
7756
7757                 case SPEED_10000:
7758                         if (cmd->duplex != DUPLEX_FULL) {
7759                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7760                                 return -EINVAL;
7761                         }
7762
7763                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7764                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7765                                 return -EINVAL;
7766                         }
7767
7768                         advertising = (ADVERTISED_10000baseT_Full |
7769                                        ADVERTISED_FIBRE);
7770                         break;
7771
7772                 default:
7773                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7774                         return -EINVAL;
7775                 }
7776
7777                 bp->link_params.req_line_speed = cmd->speed;
7778                 bp->link_params.req_duplex = cmd->duplex;
7779                 bp->port.advertising = advertising;
7780         }
7781
7782         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7783            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7784            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7785            bp->port.advertising);
7786
7787         if (netif_running(dev)) {
7788                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7789                 bnx2x_link_set(bp);
7790         }
7791
7792         return 0;
7793 }
7794
7795 #define PHY_FW_VER_LEN                  10
7796
7797 static void bnx2x_get_drvinfo(struct net_device *dev,
7798                               struct ethtool_drvinfo *info)
7799 {
7800         struct bnx2x *bp = netdev_priv(dev);
7801         u8 phy_fw_ver[PHY_FW_VER_LEN];
7802
7803         strcpy(info->driver, DRV_MODULE_NAME);
7804         strcpy(info->version, DRV_MODULE_VERSION);
7805
7806         phy_fw_ver[0] = '\0';
7807         if (bp->port.pmf) {
7808                 bnx2x_acquire_phy_lock(bp);
7809                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7810                                              (bp->state != BNX2X_STATE_CLOSED),
7811                                              phy_fw_ver, PHY_FW_VER_LEN);
7812                 bnx2x_release_phy_lock(bp);
7813         }
7814
7815         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7816                  (bp->common.bc_ver & 0xff0000) >> 16,
7817                  (bp->common.bc_ver & 0xff00) >> 8,
7818                  (bp->common.bc_ver & 0xff),
7819                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7820         strcpy(info->bus_info, pci_name(bp->pdev));
7821         info->n_stats = BNX2X_NUM_STATS;
7822         info->testinfo_len = BNX2X_NUM_TESTS;
7823         info->eedump_len = bp->common.flash_size;
7824         info->regdump_len = 0;
7825 }
7826
7827 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7828 {
7829         struct bnx2x *bp = netdev_priv(dev);
7830
7831         if (bp->flags & NO_WOL_FLAG) {
7832                 wol->supported = 0;
7833                 wol->wolopts = 0;
7834         } else {
7835                 wol->supported = WAKE_MAGIC;
7836                 if (bp->wol)
7837                         wol->wolopts = WAKE_MAGIC;
7838                 else
7839                         wol->wolopts = 0;
7840         }
7841         memset(&wol->sopass, 0, sizeof(wol->sopass));
7842 }
7843
7844 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7845 {
7846         struct bnx2x *bp = netdev_priv(dev);
7847
7848         if (wol->wolopts & ~WAKE_MAGIC)
7849                 return -EINVAL;
7850
7851         if (wol->wolopts & WAKE_MAGIC) {
7852                 if (bp->flags & NO_WOL_FLAG)
7853                         return -EINVAL;
7854
7855                 bp->wol = 1;
7856         } else
7857                 bp->wol = 0;
7858
7859         return 0;
7860 }
7861
7862 static u32 bnx2x_get_msglevel(struct net_device *dev)
7863 {
7864         struct bnx2x *bp = netdev_priv(dev);
7865
7866         return bp->msglevel;
7867 }
7868
7869 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7870 {
7871         struct bnx2x *bp = netdev_priv(dev);
7872
7873         if (capable(CAP_NET_ADMIN))
7874                 bp->msglevel = level;
7875 }
7876
7877 static int bnx2x_nway_reset(struct net_device *dev)
7878 {
7879         struct bnx2x *bp = netdev_priv(dev);
7880
7881         if (!bp->port.pmf)
7882                 return 0;
7883
7884         if (netif_running(dev)) {
7885                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7886                 bnx2x_link_set(bp);
7887         }
7888
7889         return 0;
7890 }
7891
7892 static int bnx2x_get_eeprom_len(struct net_device *dev)
7893 {
7894         struct bnx2x *bp = netdev_priv(dev);
7895
7896         return bp->common.flash_size;
7897 }
7898
7899 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7900 {
7901         int port = BP_PORT(bp);
7902         int count, i;
7903         u32 val = 0;
7904
7905         /* adjust timeout for emulation/FPGA */
7906         count = NVRAM_TIMEOUT_COUNT;
7907         if (CHIP_REV_IS_SLOW(bp))
7908                 count *= 100;
7909
7910         /* request access to nvram interface */
7911         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7912                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7913
7914         for (i = 0; i < count*10; i++) {
7915                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7916                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7917                         break;
7918
7919                 udelay(5);
7920         }
7921
7922         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7923                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7924                 return -EBUSY;
7925         }
7926
7927         return 0;
7928 }
7929
7930 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7931 {
7932         int port = BP_PORT(bp);
7933         int count, i;
7934         u32 val = 0;
7935
7936         /* adjust timeout for emulation/FPGA */
7937         count = NVRAM_TIMEOUT_COUNT;
7938         if (CHIP_REV_IS_SLOW(bp))
7939                 count *= 100;
7940
7941         /* relinquish nvram interface */
7942         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7943                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7944
7945         for (i = 0; i < count*10; i++) {
7946                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7947                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7948                         break;
7949
7950                 udelay(5);
7951         }
7952
7953         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7954                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7955                 return -EBUSY;
7956         }
7957
7958         return 0;
7959 }
7960
7961 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7962 {
7963         u32 val;
7964
7965         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7966
7967         /* enable both bits, even on read */
7968         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7969                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7970                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7971 }
7972
7973 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7974 {
7975         u32 val;
7976
7977         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7978
7979         /* disable both bits, even after read */
7980         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7981                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7982                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7983 }
7984
7985 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7986                                   u32 cmd_flags)
7987 {
7988         int count, i, rc;
7989         u32 val;
7990
7991         /* build the command word */
7992         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7993
7994         /* need to clear DONE bit separately */
7995         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7996
7997         /* address of the NVRAM to read from */
7998         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7999                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8000
8001         /* issue a read command */
8002         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8003
8004         /* adjust timeout for emulation/FPGA */
8005         count = NVRAM_TIMEOUT_COUNT;
8006         if (CHIP_REV_IS_SLOW(bp))
8007                 count *= 100;
8008
8009         /* wait for completion */
8010         *ret_val = 0;
8011         rc = -EBUSY;
8012         for (i = 0; i < count; i++) {
8013                 udelay(5);
8014                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8015
8016                 if (val & MCPR_NVM_COMMAND_DONE) {
8017                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8018                         /* we read nvram data in cpu order
8019                          * but ethtool sees it as an array of bytes
8020                          * converting to big-endian will do the work */
8021                         val = cpu_to_be32(val);
8022                         *ret_val = val;
8023                         rc = 0;
8024                         break;
8025                 }
8026         }
8027
8028         return rc;
8029 }
8030
8031 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8032                             int buf_size)
8033 {
8034         int rc;
8035         u32 cmd_flags;
8036         u32 val;
8037
8038         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8039                 DP(BNX2X_MSG_NVM,
8040                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8041                    offset, buf_size);
8042                 return -EINVAL;
8043         }
8044
8045         if (offset + buf_size > bp->common.flash_size) {
8046                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8047                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8048                    offset, buf_size, bp->common.flash_size);
8049                 return -EINVAL;
8050         }
8051
8052         /* request access to nvram interface */
8053         rc = bnx2x_acquire_nvram_lock(bp);
8054         if (rc)
8055                 return rc;
8056
8057         /* enable access to nvram interface */
8058         bnx2x_enable_nvram_access(bp);
8059
8060         /* read the first word(s) */
8061         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8062         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8063                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8064                 memcpy(ret_buf, &val, 4);
8065
8066                 /* advance to the next dword */
8067                 offset += sizeof(u32);
8068                 ret_buf += sizeof(u32);
8069                 buf_size -= sizeof(u32);
8070                 cmd_flags = 0;
8071         }
8072
8073         if (rc == 0) {
8074                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8075                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8076                 memcpy(ret_buf, &val, 4);
8077         }
8078
8079         /* disable access to nvram interface */
8080         bnx2x_disable_nvram_access(bp);
8081         bnx2x_release_nvram_lock(bp);
8082
8083         return rc;
8084 }
8085
8086 static int bnx2x_get_eeprom(struct net_device *dev,
8087                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8088 {
8089         struct bnx2x *bp = netdev_priv(dev);
8090         int rc;
8091
8092         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8093            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8094            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8095            eeprom->len, eeprom->len);
8096
8097         /* parameters already validated in ethtool_get_eeprom */
8098
8099         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8100
8101         return rc;
8102 }
8103
8104 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8105                                    u32 cmd_flags)
8106 {
8107         int count, i, rc;
8108
8109         /* build the command word */
8110         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8111
8112         /* need to clear DONE bit separately */
8113         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8114
8115         /* write the data */
8116         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8117
8118         /* address of the NVRAM to write to */
8119         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8120                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8121
8122         /* issue the write command */
8123         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8124
8125         /* adjust timeout for emulation/FPGA */
8126         count = NVRAM_TIMEOUT_COUNT;
8127         if (CHIP_REV_IS_SLOW(bp))
8128                 count *= 100;
8129
8130         /* wait for completion */
8131         rc = -EBUSY;
8132         for (i = 0; i < count; i++) {
8133                 udelay(5);
8134                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8135                 if (val & MCPR_NVM_COMMAND_DONE) {
8136                         rc = 0;
8137                         break;
8138                 }
8139         }
8140
8141         return rc;
8142 }
8143
8144 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8145
8146 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8147                               int buf_size)
8148 {
8149         int rc;
8150         u32 cmd_flags;
8151         u32 align_offset;
8152         u32 val;
8153
8154         if (offset + buf_size > bp->common.flash_size) {
8155                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8156                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8157                    offset, buf_size, bp->common.flash_size);
8158                 return -EINVAL;
8159         }
8160
8161         /* request access to nvram interface */
8162         rc = bnx2x_acquire_nvram_lock(bp);
8163         if (rc)
8164                 return rc;
8165
8166         /* enable access to nvram interface */
8167         bnx2x_enable_nvram_access(bp);
8168
8169         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8170         align_offset = (offset & ~0x03);
8171         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8172
8173         if (rc == 0) {
8174                 val &= ~(0xff << BYTE_OFFSET(offset));
8175                 val |= (*data_buf << BYTE_OFFSET(offset));
8176
8177                 /* nvram data is returned as an array of bytes
8178                  * convert it back to cpu order */
8179                 val = be32_to_cpu(val);
8180
8181                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8182                                              cmd_flags);
8183         }
8184
8185         /* disable access to nvram interface */
8186         bnx2x_disable_nvram_access(bp);
8187         bnx2x_release_nvram_lock(bp);
8188
8189         return rc;
8190 }
8191
8192 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8193                              int buf_size)
8194 {
8195         int rc;
8196         u32 cmd_flags;
8197         u32 val;
8198         u32 written_so_far;
8199
8200         if (buf_size == 1)      /* ethtool */
8201                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8202
8203         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8204                 DP(BNX2X_MSG_NVM,
8205                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8206                    offset, buf_size);
8207                 return -EINVAL;
8208         }
8209
8210         if (offset + buf_size > bp->common.flash_size) {
8211                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8212                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8213                    offset, buf_size, bp->common.flash_size);
8214                 return -EINVAL;
8215         }
8216
8217         /* request access to nvram interface */
8218         rc = bnx2x_acquire_nvram_lock(bp);
8219         if (rc)
8220                 return rc;
8221
8222         /* enable access to nvram interface */
8223         bnx2x_enable_nvram_access(bp);
8224
8225         written_so_far = 0;
8226         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8227         while ((written_so_far < buf_size) && (rc == 0)) {
8228                 if (written_so_far == (buf_size - sizeof(u32)))
8229                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8230                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8231                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8232                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8233                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8234
8235                 memcpy(&val, data_buf, 4);
8236
8237                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8238
8239                 /* advance to the next dword */
8240                 offset += sizeof(u32);
8241                 data_buf += sizeof(u32);
8242                 written_so_far += sizeof(u32);
8243                 cmd_flags = 0;
8244         }
8245
8246         /* disable access to nvram interface */
8247         bnx2x_disable_nvram_access(bp);
8248         bnx2x_release_nvram_lock(bp);
8249
8250         return rc;
8251 }
8252
8253 static int bnx2x_set_eeprom(struct net_device *dev,
8254                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8255 {
8256         struct bnx2x *bp = netdev_priv(dev);
8257         int rc;
8258
8259         if (!netif_running(dev))
8260                 return -EAGAIN;
8261
8262         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8263            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8264            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8265            eeprom->len, eeprom->len);
8266
8267         /* parameters already validated in ethtool_set_eeprom */
8268
8269         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8270         if (eeprom->magic == 0x00504859)
8271                 if (bp->port.pmf) {
8272
8273                         bnx2x_acquire_phy_lock(bp);
8274                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8275                                              bp->link_params.ext_phy_config,
8276                                              (bp->state != BNX2X_STATE_CLOSED),
8277                                              eebuf, eeprom->len);
8278                         if ((bp->state == BNX2X_STATE_OPEN) ||
8279                             (bp->state == BNX2X_STATE_DISABLED)) {
8280                                 rc |= bnx2x_link_reset(&bp->link_params,
8281                                                        &bp->link_vars);
8282                                 rc |= bnx2x_phy_init(&bp->link_params,
8283                                                      &bp->link_vars);
8284                         }
8285                         bnx2x_release_phy_lock(bp);
8286
8287                 } else /* Only the PMF can access the PHY */
8288                         return -EINVAL;
8289         else
8290                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8291
8292         return rc;
8293 }
8294
8295 static int bnx2x_get_coalesce(struct net_device *dev,
8296                               struct ethtool_coalesce *coal)
8297 {
8298         struct bnx2x *bp = netdev_priv(dev);
8299
8300         memset(coal, 0, sizeof(struct ethtool_coalesce));
8301
8302         coal->rx_coalesce_usecs = bp->rx_ticks;
8303         coal->tx_coalesce_usecs = bp->tx_ticks;
8304
8305         return 0;
8306 }
8307
8308 static int bnx2x_set_coalesce(struct net_device *dev,
8309                               struct ethtool_coalesce *coal)
8310 {
8311         struct bnx2x *bp = netdev_priv(dev);
8312
8313         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8314         if (bp->rx_ticks > 3000)
8315                 bp->rx_ticks = 3000;
8316
8317         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8318         if (bp->tx_ticks > 0x3000)
8319                 bp->tx_ticks = 0x3000;
8320
8321         if (netif_running(dev))
8322                 bnx2x_update_coalesce(bp);
8323
8324         return 0;
8325 }
8326
8327 static void bnx2x_get_ringparam(struct net_device *dev,
8328                                 struct ethtool_ringparam *ering)
8329 {
8330         struct bnx2x *bp = netdev_priv(dev);
8331
8332         ering->rx_max_pending = MAX_RX_AVAIL;
8333         ering->rx_mini_max_pending = 0;
8334         ering->rx_jumbo_max_pending = 0;
8335
8336         ering->rx_pending = bp->rx_ring_size;
8337         ering->rx_mini_pending = 0;
8338         ering->rx_jumbo_pending = 0;
8339
8340         ering->tx_max_pending = MAX_TX_AVAIL;
8341         ering->tx_pending = bp->tx_ring_size;
8342 }
8343
8344 static int bnx2x_set_ringparam(struct net_device *dev,
8345                                struct ethtool_ringparam *ering)
8346 {
8347         struct bnx2x *bp = netdev_priv(dev);
8348         int rc = 0;
8349
8350         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8351             (ering->tx_pending > MAX_TX_AVAIL) ||
8352             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8353                 return -EINVAL;
8354
8355         bp->rx_ring_size = ering->rx_pending;
8356         bp->tx_ring_size = ering->tx_pending;
8357
8358         if (netif_running(dev)) {
8359                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8360                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8361         }
8362
8363         return rc;
8364 }
8365
8366 static void bnx2x_get_pauseparam(struct net_device *dev,
8367                                  struct ethtool_pauseparam *epause)
8368 {
8369         struct bnx2x *bp = netdev_priv(dev);
8370
8371         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8372                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8373
8374         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8375                             BNX2X_FLOW_CTRL_RX);
8376         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8377                             BNX2X_FLOW_CTRL_TX);
8378
8379         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8380            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8381            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382 }
8383
8384 static int bnx2x_set_pauseparam(struct net_device *dev,
8385                                 struct ethtool_pauseparam *epause)
8386 {
8387         struct bnx2x *bp = netdev_priv(dev);
8388
8389         if (IS_E1HMF(bp))
8390                 return 0;
8391
8392         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8393            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8394            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8395
8396         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8397
8398         if (epause->rx_pause)
8399                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8400
8401         if (epause->tx_pause)
8402                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8403
8404         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8405                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8406
8407         if (epause->autoneg) {
8408                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8409                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8410                         return -EINVAL;
8411                 }
8412
8413                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8414                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8415         }
8416
8417         DP(NETIF_MSG_LINK,
8418            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8419
8420         if (netif_running(dev)) {
8421                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8422                 bnx2x_link_set(bp);
8423         }
8424
8425         return 0;
8426 }
8427
8428 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8429 {
8430         struct bnx2x *bp = netdev_priv(dev);
8431         int changed = 0;
8432         int rc = 0;
8433
8434         /* TPA requires Rx CSUM offloading */
8435         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8436                 if (!(dev->features & NETIF_F_LRO)) {
8437                         dev->features |= NETIF_F_LRO;
8438                         bp->flags |= TPA_ENABLE_FLAG;
8439                         changed = 1;
8440                 }
8441
8442         } else if (dev->features & NETIF_F_LRO) {
8443                 dev->features &= ~NETIF_F_LRO;
8444                 bp->flags &= ~TPA_ENABLE_FLAG;
8445                 changed = 1;
8446         }
8447
8448         if (changed && netif_running(dev)) {
8449                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8450                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8451         }
8452
8453         return rc;
8454 }
8455
8456 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8457 {
8458         struct bnx2x *bp = netdev_priv(dev);
8459
8460         return bp->rx_csum;
8461 }
8462
8463 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8464 {
8465         struct bnx2x *bp = netdev_priv(dev);
8466         int rc = 0;
8467
8468         bp->rx_csum = data;
8469
8470         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8471            TPA'ed packets will be discarded due to wrong TCP CSUM */
8472         if (!data) {
8473                 u32 flags = ethtool_op_get_flags(dev);
8474
8475                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8476         }
8477
8478         return rc;
8479 }
8480
8481 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8482 {
8483         if (data) {
8484                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8485                 dev->features |= NETIF_F_TSO6;
8486         } else {
8487                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8488                 dev->features &= ~NETIF_F_TSO6;
8489         }
8490
8491         return 0;
8492 }
8493
8494 static const struct {
8495         char string[ETH_GSTRING_LEN];
8496 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8497         { "register_test (offline)" },
8498         { "memory_test (offline)" },
8499         { "loopback_test (offline)" },
8500         { "nvram_test (online)" },
8501         { "interrupt_test (online)" },
8502         { "link_test (online)" },
8503         { "idle check (online)" },
8504         { "MC errors (online)" }
8505 };
8506
8507 static int bnx2x_self_test_count(struct net_device *dev)
8508 {
8509         return BNX2X_NUM_TESTS;
8510 }
8511
8512 static int bnx2x_test_registers(struct bnx2x *bp)
8513 {
8514         int idx, i, rc = -ENODEV;
8515         u32 wr_val = 0;
8516         int port = BP_PORT(bp);
8517         static const struct {
8518                 u32  offset0;
8519                 u32  offset1;
8520                 u32  mask;
8521         } reg_tbl[] = {
8522 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8523                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8524                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8525                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8526                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8527                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8528                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8529                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8530                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8531                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8532 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8533                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8534                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8535                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8536                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8537                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8538                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8539                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8540                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8541                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8542 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8543                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8544                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8545                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8546                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8547                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8548                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8549                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8550                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8551                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8552 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8553                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8554                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8555                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8556                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8557                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8558                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8559                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8560
8561                 { 0xffffffff, 0, 0x00000000 }
8562         };
8563
8564         if (!netif_running(bp->dev))
8565                 return rc;
8566
8567         /* Repeat the test twice:
8568            First by writing 0x00000000, second by writing 0xffffffff */
8569         for (idx = 0; idx < 2; idx++) {
8570
8571                 switch (idx) {
8572                 case 0:
8573                         wr_val = 0;
8574                         break;
8575                 case 1:
8576                         wr_val = 0xffffffff;
8577                         break;
8578                 }
8579
8580                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8581                         u32 offset, mask, save_val, val;
8582
8583                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8584                         mask = reg_tbl[i].mask;
8585
8586                         save_val = REG_RD(bp, offset);
8587
8588                         REG_WR(bp, offset, wr_val);
8589                         val = REG_RD(bp, offset);
8590
8591                         /* Restore the original register's value */
8592                         REG_WR(bp, offset, save_val);
8593
8594                         /* verify that value is as expected value */
8595                         if ((val & mask) != (wr_val & mask))
8596                                 goto test_reg_exit;
8597                 }
8598         }
8599
8600         rc = 0;
8601
8602 test_reg_exit:
8603         return rc;
8604 }
8605
8606 static int bnx2x_test_memory(struct bnx2x *bp)
8607 {
8608         int i, j, rc = -ENODEV;
8609         u32 val;
8610         static const struct {
8611                 u32 offset;
8612                 int size;
8613         } mem_tbl[] = {
8614                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8615                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8616                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8617                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8618                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8619                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8620                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8621
8622                 { 0xffffffff, 0 }
8623         };
8624         static const struct {
8625                 char *name;
8626                 u32 offset;
8627                 u32 e1_mask;
8628                 u32 e1h_mask;
8629         } prty_tbl[] = {
8630                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8631                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8632                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8633                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8634                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8635                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8636
8637                 { NULL, 0xffffffff, 0, 0 }
8638         };
8639
8640         if (!netif_running(bp->dev))
8641                 return rc;
8642
8643         /* Go through all the memories */
8644         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8645                 for (j = 0; j < mem_tbl[i].size; j++)
8646                         REG_RD(bp, mem_tbl[i].offset + j*4);
8647
8648         /* Check the parity status */
8649         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8650                 val = REG_RD(bp, prty_tbl[i].offset);
8651                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8652                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8653                         DP(NETIF_MSG_HW,
8654                            "%s is 0x%x\n", prty_tbl[i].name, val);
8655                         goto test_mem_exit;
8656                 }
8657         }
8658
8659         rc = 0;
8660
8661 test_mem_exit:
8662         return rc;
8663 }
8664
8665 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8666 {
8667         int cnt = 1000;
8668
8669         if (link_up)
8670                 while (bnx2x_link_test(bp) && cnt--)
8671                         msleep(10);
8672 }
8673
8674 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8675 {
8676         unsigned int pkt_size, num_pkts, i;
8677         struct sk_buff *skb;
8678         unsigned char *packet;
8679         struct bnx2x_fastpath *fp = &bp->fp[0];
8680         u16 tx_start_idx, tx_idx;
8681         u16 rx_start_idx, rx_idx;
8682         u16 pkt_prod;
8683         struct sw_tx_bd *tx_buf;
8684         struct eth_tx_bd *tx_bd;
8685         dma_addr_t mapping;
8686         union eth_rx_cqe *cqe;
8687         u8 cqe_fp_flags;
8688         struct sw_rx_bd *rx_buf;
8689         u16 len;
8690         int rc = -ENODEV;
8691
8692         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8693                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8694                 bnx2x_acquire_phy_lock(bp);
8695                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8696                 bnx2x_release_phy_lock(bp);
8697
8698         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8699                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8700                 bnx2x_acquire_phy_lock(bp);
8701                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8702                 bnx2x_release_phy_lock(bp);
8703                 /* wait until link state is restored */
8704                 bnx2x_wait_for_link(bp, link_up);
8705
8706         } else
8707                 return -EINVAL;
8708
8709         pkt_size = 1514;
8710         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8711         if (!skb) {
8712                 rc = -ENOMEM;
8713                 goto test_loopback_exit;
8714         }
8715         packet = skb_put(skb, pkt_size);
8716         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8717         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8718         for (i = ETH_HLEN; i < pkt_size; i++)
8719                 packet[i] = (unsigned char) (i & 0xff);
8720
8721         num_pkts = 0;
8722         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8723         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8724
8725         pkt_prod = fp->tx_pkt_prod++;
8726         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8727         tx_buf->first_bd = fp->tx_bd_prod;
8728         tx_buf->skb = skb;
8729
8730         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8731         mapping = pci_map_single(bp->pdev, skb->data,
8732                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8733         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8734         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8735         tx_bd->nbd = cpu_to_le16(1);
8736         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8737         tx_bd->vlan = cpu_to_le16(pkt_prod);
8738         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8739                                        ETH_TX_BD_FLAGS_END_BD);
8740         tx_bd->general_data = ((UNICAST_ADDRESS <<
8741                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8742
8743         wmb();
8744
8745         fp->hw_tx_prods->bds_prod =
8746                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8747         mb(); /* FW restriction: must not reorder writing nbd and packets */
8748         fp->hw_tx_prods->packets_prod =
8749                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8750         DOORBELL(bp, FP_IDX(fp), 0);
8751
8752         mmiowb();
8753
8754         num_pkts++;
8755         fp->tx_bd_prod++;
8756         bp->dev->trans_start = jiffies;
8757
8758         udelay(100);
8759
8760         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8761         if (tx_idx != tx_start_idx + num_pkts)
8762                 goto test_loopback_exit;
8763
8764         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8765         if (rx_idx != rx_start_idx + num_pkts)
8766                 goto test_loopback_exit;
8767
8768         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8769         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8770         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8771                 goto test_loopback_rx_exit;
8772
8773         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8774         if (len != pkt_size)
8775                 goto test_loopback_rx_exit;
8776
8777         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8778         skb = rx_buf->skb;
8779         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8780         for (i = ETH_HLEN; i < pkt_size; i++)
8781                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8782                         goto test_loopback_rx_exit;
8783
8784         rc = 0;
8785
8786 test_loopback_rx_exit:
8787
8788         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8789         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8790         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8791         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8792
8793         /* Update producers */
8794         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8795                              fp->rx_sge_prod);
8796
8797 test_loopback_exit:
8798         bp->link_params.loopback_mode = LOOPBACK_NONE;
8799
8800         return rc;
8801 }
8802
8803 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8804 {
8805         int rc = 0;
8806
8807         if (!netif_running(bp->dev))
8808                 return BNX2X_LOOPBACK_FAILED;
8809
8810         bnx2x_netif_stop(bp, 1);
8811
8812         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8813                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8814                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8815         }
8816
8817         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8818                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8819                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8820         }
8821
8822         bnx2x_netif_start(bp);
8823
8824         return rc;
8825 }
8826
8827 #define CRC32_RESIDUAL                  0xdebb20e3
8828
8829 static int bnx2x_test_nvram(struct bnx2x *bp)
8830 {
8831         static const struct {
8832                 int offset;
8833                 int size;
8834         } nvram_tbl[] = {
8835                 {     0,  0x14 }, /* bootstrap */
8836                 {  0x14,  0xec }, /* dir */
8837                 { 0x100, 0x350 }, /* manuf_info */
8838                 { 0x450,  0xf0 }, /* feature_info */
8839                 { 0x640,  0x64 }, /* upgrade_key_info */
8840                 { 0x6a4,  0x64 },
8841                 { 0x708,  0x70 }, /* manuf_key_info */
8842                 { 0x778,  0x70 },
8843                 {     0,     0 }
8844         };
8845         u32 buf[0x350 / 4];
8846         u8 *data = (u8 *)buf;
8847         int i, rc;
8848         u32 magic, csum;
8849
8850         rc = bnx2x_nvram_read(bp, 0, data, 4);
8851         if (rc) {
8852                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8853                 goto test_nvram_exit;
8854         }
8855
8856         magic = be32_to_cpu(buf[0]);
8857         if (magic != 0x669955aa) {
8858                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8859                 rc = -ENODEV;
8860                 goto test_nvram_exit;
8861         }
8862
8863         for (i = 0; nvram_tbl[i].size; i++) {
8864
8865                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8866                                       nvram_tbl[i].size);
8867                 if (rc) {
8868                         DP(NETIF_MSG_PROBE,
8869                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8870                         goto test_nvram_exit;
8871                 }
8872
8873                 csum = ether_crc_le(nvram_tbl[i].size, data);
8874                 if (csum != CRC32_RESIDUAL) {
8875                         DP(NETIF_MSG_PROBE,
8876                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8877                         rc = -ENODEV;
8878                         goto test_nvram_exit;
8879                 }
8880         }
8881
8882 test_nvram_exit:
8883         return rc;
8884 }
8885
8886 static int bnx2x_test_intr(struct bnx2x *bp)
8887 {
8888         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8889         int i, rc;
8890
8891         if (!netif_running(bp->dev))
8892                 return -ENODEV;
8893
8894         config->hdr.length_6b = 0;
8895         config->hdr.offset = 0;
8896         config->hdr.client_id = BP_CL_ID(bp);
8897         config->hdr.reserved1 = 0;
8898
8899         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8900                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8901                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8902         if (rc == 0) {
8903                 bp->set_mac_pending++;
8904                 for (i = 0; i < 10; i++) {
8905                         if (!bp->set_mac_pending)
8906                                 break;
8907                         msleep_interruptible(10);
8908                 }
8909                 if (i == 10)
8910                         rc = -ENODEV;
8911         }
8912
8913         return rc;
8914 }
8915
8916 static void bnx2x_self_test(struct net_device *dev,
8917                             struct ethtool_test *etest, u64 *buf)
8918 {
8919         struct bnx2x *bp = netdev_priv(dev);
8920
8921         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8922
8923         if (!netif_running(dev))
8924                 return;
8925
8926         /* offline tests are not supported in MF mode */
8927         if (IS_E1HMF(bp))
8928                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8929
8930         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8931                 u8 link_up;
8932
8933                 link_up = bp->link_vars.link_up;
8934                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935                 bnx2x_nic_load(bp, LOAD_DIAG);
8936                 /* wait until link state is restored */
8937                 bnx2x_wait_for_link(bp, link_up);
8938
8939                 if (bnx2x_test_registers(bp) != 0) {
8940                         buf[0] = 1;
8941                         etest->flags |= ETH_TEST_FL_FAILED;
8942                 }
8943                 if (bnx2x_test_memory(bp) != 0) {
8944                         buf[1] = 1;
8945                         etest->flags |= ETH_TEST_FL_FAILED;
8946                 }
8947                 buf[2] = bnx2x_test_loopback(bp, link_up);
8948                 if (buf[2] != 0)
8949                         etest->flags |= ETH_TEST_FL_FAILED;
8950
8951                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8952                 bnx2x_nic_load(bp, LOAD_NORMAL);
8953                 /* wait until link state is restored */
8954                 bnx2x_wait_for_link(bp, link_up);
8955         }
8956         if (bnx2x_test_nvram(bp) != 0) {
8957                 buf[3] = 1;
8958                 etest->flags |= ETH_TEST_FL_FAILED;
8959         }
8960         if (bnx2x_test_intr(bp) != 0) {
8961                 buf[4] = 1;
8962                 etest->flags |= ETH_TEST_FL_FAILED;
8963         }
8964         if (bp->port.pmf)
8965                 if (bnx2x_link_test(bp) != 0) {
8966                         buf[5] = 1;
8967                         etest->flags |= ETH_TEST_FL_FAILED;
8968                 }
8969         buf[7] = bnx2x_mc_assert(bp);
8970         if (buf[7] != 0)
8971                 etest->flags |= ETH_TEST_FL_FAILED;
8972
8973 #ifdef BNX2X_EXTRA_DEBUG
8974         bnx2x_panic_dump(bp);
8975 #endif
8976 }
8977
8978 static const struct {
8979         long offset;
8980         int size;
8981         u32 flags;
8982 #define STATS_FLAGS_PORT                1
8983 #define STATS_FLAGS_FUNC                2
8984         u8 string[ETH_GSTRING_LEN];
8985 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8986 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8987                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8988         { STATS_OFFSET32(error_bytes_received_hi),
8989                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8990         { STATS_OFFSET32(total_bytes_transmitted_hi),
8991                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8992         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8993                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8994         { STATS_OFFSET32(total_unicast_packets_received_hi),
8995                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8996         { STATS_OFFSET32(total_multicast_packets_received_hi),
8997                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8998         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8999                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9000         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9001                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9002         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9003                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9004 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9006         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9007                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9008         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9009                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9010         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9011                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9012         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9013                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9014         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9015                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9016         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9017                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9018         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9019                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9020         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9021                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9022         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9023                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9024 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9025                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9026         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9027                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9028         { STATS_OFFSET32(jabber_packets_received),
9029                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9030         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9031                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9032         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9033                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9034         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9035                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9036         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9037                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9038         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9039                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9040         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9041                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9042         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9043                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9044 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9045                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9046         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9047                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9048         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9049                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9050         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9051                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9052         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9053                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9054         { STATS_OFFSET32(mac_filter_discard),
9055                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9056         { STATS_OFFSET32(no_buff_discard),
9057                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9058         { STATS_OFFSET32(xxoverflow_discard),
9059                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9060         { STATS_OFFSET32(brb_drop_hi),
9061                                 8, STATS_FLAGS_PORT, "brb_discard" },
9062         { STATS_OFFSET32(brb_truncate_hi),
9063                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9064 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9065                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9066         { STATS_OFFSET32(rx_skb_alloc_failed),
9067                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9068 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9069                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9070 };
9071
9072 #define IS_NOT_E1HMF_STAT(bp, i) \
9073                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9074
9075 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9076 {
9077         struct bnx2x *bp = netdev_priv(dev);
9078         int i, j;
9079
9080         switch (stringset) {
9081         case ETH_SS_STATS:
9082                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9083                         if (IS_NOT_E1HMF_STAT(bp, i))
9084                                 continue;
9085                         strcpy(buf + j*ETH_GSTRING_LEN,
9086                                bnx2x_stats_arr[i].string);
9087                         j++;
9088                 }
9089                 break;
9090
9091         case ETH_SS_TEST:
9092                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9093                 break;
9094         }
9095 }
9096
9097 static int bnx2x_get_stats_count(struct net_device *dev)
9098 {
9099         struct bnx2x *bp = netdev_priv(dev);
9100         int i, num_stats = 0;
9101
9102         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9103                 if (IS_NOT_E1HMF_STAT(bp, i))
9104                         continue;
9105                 num_stats++;
9106         }
9107         return num_stats;
9108 }
9109
9110 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9111                                     struct ethtool_stats *stats, u64 *buf)
9112 {
9113         struct bnx2x *bp = netdev_priv(dev);
9114         u32 *hw_stats = (u32 *)&bp->eth_stats;
9115         int i, j;
9116
9117         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9118                 if (IS_NOT_E1HMF_STAT(bp, i))
9119                         continue;
9120
9121                 if (bnx2x_stats_arr[i].size == 0) {
9122                         /* skip this counter */
9123                         buf[j] = 0;
9124                         j++;
9125                         continue;
9126                 }
9127                 if (bnx2x_stats_arr[i].size == 4) {
9128                         /* 4-byte counter */
9129                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9130                         j++;
9131                         continue;
9132                 }
9133                 /* 8-byte counter */
9134                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9135                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9136                 j++;
9137         }
9138 }
9139
9140 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9141 {
9142         struct bnx2x *bp = netdev_priv(dev);
9143         int port = BP_PORT(bp);
9144         int i;
9145
9146         if (!netif_running(dev))
9147                 return 0;
9148
9149         if (!bp->port.pmf)
9150                 return 0;
9151
9152         if (data == 0)
9153                 data = 2;
9154
9155         for (i = 0; i < (data * 2); i++) {
9156                 if ((i % 2) == 0)
9157                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9158                                       bp->link_params.hw_led_mode,
9159                                       bp->link_params.chip_id);
9160                 else
9161                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9162                                       bp->link_params.hw_led_mode,
9163                                       bp->link_params.chip_id);
9164
9165                 msleep_interruptible(500);
9166                 if (signal_pending(current))
9167                         break;
9168         }
9169
9170         if (bp->link_vars.link_up)
9171                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9172                               bp->link_vars.line_speed,
9173                               bp->link_params.hw_led_mode,
9174                               bp->link_params.chip_id);
9175
9176         return 0;
9177 }
9178
9179 static struct ethtool_ops bnx2x_ethtool_ops = {
9180         .get_settings           = bnx2x_get_settings,
9181         .set_settings           = bnx2x_set_settings,
9182         .get_drvinfo            = bnx2x_get_drvinfo,
9183         .get_wol                = bnx2x_get_wol,
9184         .set_wol                = bnx2x_set_wol,
9185         .get_msglevel           = bnx2x_get_msglevel,
9186         .set_msglevel           = bnx2x_set_msglevel,
9187         .nway_reset             = bnx2x_nway_reset,
9188         .get_link               = ethtool_op_get_link,
9189         .get_eeprom_len         = bnx2x_get_eeprom_len,
9190         .get_eeprom             = bnx2x_get_eeprom,
9191         .set_eeprom             = bnx2x_set_eeprom,
9192         .get_coalesce           = bnx2x_get_coalesce,
9193         .set_coalesce           = bnx2x_set_coalesce,
9194         .get_ringparam          = bnx2x_get_ringparam,
9195         .set_ringparam          = bnx2x_set_ringparam,
9196         .get_pauseparam         = bnx2x_get_pauseparam,
9197         .set_pauseparam         = bnx2x_set_pauseparam,
9198         .get_rx_csum            = bnx2x_get_rx_csum,
9199         .set_rx_csum            = bnx2x_set_rx_csum,
9200         .get_tx_csum            = ethtool_op_get_tx_csum,
9201         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9202         .set_flags              = bnx2x_set_flags,
9203         .get_flags              = ethtool_op_get_flags,
9204         .get_sg                 = ethtool_op_get_sg,
9205         .set_sg                 = ethtool_op_set_sg,
9206         .get_tso                = ethtool_op_get_tso,
9207         .set_tso                = bnx2x_set_tso,
9208         .self_test_count        = bnx2x_self_test_count,
9209         .self_test              = bnx2x_self_test,
9210         .get_strings            = bnx2x_get_strings,
9211         .phys_id                = bnx2x_phys_id,
9212         .get_stats_count        = bnx2x_get_stats_count,
9213         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9214 };
9215
9216 /* end of ethtool_ops */
9217
9218 /****************************************************************************
9219 * General service functions
9220 ****************************************************************************/
9221
9222 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9223 {
9224         u16 pmcsr;
9225
9226         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9227
9228         switch (state) {
9229         case PCI_D0:
9230                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9231                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9232                                        PCI_PM_CTRL_PME_STATUS));
9233
9234                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9235                         /* delay required during transition out of D3hot */
9236                         msleep(20);
9237                 break;
9238
9239         case PCI_D3hot:
9240                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9241                 pmcsr |= 3;
9242
9243                 if (bp->wol)
9244                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9245
9246                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9247                                       pmcsr);
9248
9249                 /* No more memory access after this point until
9250                 * device is brought back to D0.
9251                 */
9252                 break;
9253
9254         default:
9255                 return -EINVAL;
9256         }
9257         return 0;
9258 }
9259
9260 /*
9261  * net_device service functions
9262  */
9263
9264 static int bnx2x_poll(struct napi_struct *napi, int budget)
9265 {
9266         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9267                                                  napi);
9268         struct bnx2x *bp = fp->bp;
9269         int work_done = 0;
9270         u16 rx_cons_sb;
9271
9272 #ifdef BNX2X_STOP_ON_ERROR
9273         if (unlikely(bp->panic))
9274                 goto poll_panic;
9275 #endif
9276
9277         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9278         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9279         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9280
9281         bnx2x_update_fpsb_idx(fp);
9282
9283         if (BNX2X_HAS_TX_WORK(fp))
9284                 bnx2x_tx_int(fp, budget);
9285
9286         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9287         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9288                 rx_cons_sb++;
9289         if (BNX2X_HAS_RX_WORK(fp))
9290                 work_done = bnx2x_rx_int(fp, budget);
9291
9292         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9293         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9294         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9295                 rx_cons_sb++;
9296
9297         /* must not complete if we consumed full budget */
9298         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9299
9300 #ifdef BNX2X_STOP_ON_ERROR
9301 poll_panic:
9302 #endif
9303                 netif_rx_complete(napi);
9304
9305                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9306                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9307                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9308                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9309         }
9310         return work_done;
9311 }
9312
9313
9314 /* we split the first BD into headers and data BDs
9315  * to ease the pain of our fellow microcode engineers
9316  * we use one mapping for both BDs
9317  * So far this has only been observed to happen
9318  * in Other Operating Systems(TM)
9319  */
9320 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9321                                    struct bnx2x_fastpath *fp,
9322                                    struct eth_tx_bd **tx_bd, u16 hlen,
9323                                    u16 bd_prod, int nbd)
9324 {
9325         struct eth_tx_bd *h_tx_bd = *tx_bd;
9326         struct eth_tx_bd *d_tx_bd;
9327         dma_addr_t mapping;
9328         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9329
9330         /* first fix first BD */
9331         h_tx_bd->nbd = cpu_to_le16(nbd);
9332         h_tx_bd->nbytes = cpu_to_le16(hlen);
9333
9334         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9335            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9336            h_tx_bd->addr_lo, h_tx_bd->nbd);
9337
9338         /* now get a new data BD
9339          * (after the pbd) and fill it */
9340         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9341         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9342
9343         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9344                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9345
9346         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9347         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9348         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9349         d_tx_bd->vlan = 0;
9350         /* this marks the BD as one that has no individual mapping
9351          * the FW ignores this flag in a BD not marked start
9352          */
9353         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9354         DP(NETIF_MSG_TX_QUEUED,
9355            "TSO split data size is %d (%x:%x)\n",
9356            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9357
9358         /* update tx_bd for marking the last BD flag */
9359         *tx_bd = d_tx_bd;
9360
9361         return bd_prod;
9362 }
9363
9364 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9365 {
9366         if (fix > 0)
9367                 csum = (u16) ~csum_fold(csum_sub(csum,
9368                                 csum_partial(t_header - fix, fix, 0)));
9369
9370         else if (fix < 0)
9371                 csum = (u16) ~csum_fold(csum_add(csum,
9372                                 csum_partial(t_header, -fix, 0)));
9373
9374         return swab16(csum);
9375 }
9376
9377 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9378 {
9379         u32 rc;
9380
9381         if (skb->ip_summed != CHECKSUM_PARTIAL)
9382                 rc = XMIT_PLAIN;
9383
9384         else {
9385                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9386                         rc = XMIT_CSUM_V6;
9387                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9388                                 rc |= XMIT_CSUM_TCP;
9389
9390                 } else {
9391                         rc = XMIT_CSUM_V4;
9392                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9393                                 rc |= XMIT_CSUM_TCP;
9394                 }
9395         }
9396
9397         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9398                 rc |= XMIT_GSO_V4;
9399
9400         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9401                 rc |= XMIT_GSO_V6;
9402
9403         return rc;
9404 }
9405
9406 /* check if packet requires linearization (packet is too fragmented) */
9407 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9408                              u32 xmit_type)
9409 {
9410         int to_copy = 0;
9411         int hlen = 0;
9412         int first_bd_sz = 0;
9413
9414         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9415         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9416
9417                 if (xmit_type & XMIT_GSO) {
9418                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9419                         /* Check if LSO packet needs to be copied:
9420                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9421                         int wnd_size = MAX_FETCH_BD - 3;
9422                         /* Number of windows to check */
9423                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9424                         int wnd_idx = 0;
9425                         int frag_idx = 0;
9426                         u32 wnd_sum = 0;
9427
9428                         /* Headers length */
9429                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9430                                 tcp_hdrlen(skb);
9431
9432                         /* Amount of data (w/o headers) on linear part of SKB*/
9433                         first_bd_sz = skb_headlen(skb) - hlen;
9434
9435                         wnd_sum  = first_bd_sz;
9436
9437                         /* Calculate the first sum - it's special */
9438                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9439                                 wnd_sum +=
9440                                         skb_shinfo(skb)->frags[frag_idx].size;
9441
9442                         /* If there was data on linear skb data - check it */
9443                         if (first_bd_sz > 0) {
9444                                 if (unlikely(wnd_sum < lso_mss)) {
9445                                         to_copy = 1;
9446                                         goto exit_lbl;
9447                                 }
9448
9449                                 wnd_sum -= first_bd_sz;
9450                         }
9451
9452                         /* Others are easier: run through the frag list and
9453                            check all windows */
9454                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9455                                 wnd_sum +=
9456                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9457
9458                                 if (unlikely(wnd_sum < lso_mss)) {
9459                                         to_copy = 1;
9460                                         break;
9461                                 }
9462                                 wnd_sum -=
9463                                         skb_shinfo(skb)->frags[wnd_idx].size;
9464                         }
9465
9466                 } else {
9467                         /* in non-LSO too fragmented packet should always
9468                            be linearized */
9469                         to_copy = 1;
9470                 }
9471         }
9472
9473 exit_lbl:
9474         if (unlikely(to_copy))
9475                 DP(NETIF_MSG_TX_QUEUED,
9476                    "Linearization IS REQUIRED for %s packet. "
9477                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9478                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9479                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9480
9481         return to_copy;
9482 }
9483
9484 /* called with netif_tx_lock
9485  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9486  * netif_wake_queue()
9487  */
9488 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9489 {
9490         struct bnx2x *bp = netdev_priv(dev);
9491         struct bnx2x_fastpath *fp;
9492         struct sw_tx_bd *tx_buf;
9493         struct eth_tx_bd *tx_bd;
9494         struct eth_tx_parse_bd *pbd = NULL;
9495         u16 pkt_prod, bd_prod;
9496         int nbd, fp_index;
9497         dma_addr_t mapping;
9498         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9499         int vlan_off = (bp->e1hov ? 4 : 0);
9500         int i;
9501         u8 hlen = 0;
9502
9503 #ifdef BNX2X_STOP_ON_ERROR
9504         if (unlikely(bp->panic))
9505                 return NETDEV_TX_BUSY;
9506 #endif
9507
9508         fp_index = (smp_processor_id() % bp->num_queues);
9509         fp = &bp->fp[fp_index];
9510
9511         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9512                 bp->eth_stats.driver_xoff++,
9513                 netif_stop_queue(dev);
9514                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9515                 return NETDEV_TX_BUSY;
9516         }
9517
9518         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9519            "  gso type %x  xmit_type %x\n",
9520            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9521            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9522
9523         /* First, check if we need to linearize the skb
9524            (due to FW restrictions) */
9525         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9526                 /* Statistics of linearization */
9527                 bp->lin_cnt++;
9528                 if (skb_linearize(skb) != 0) {
9529                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9530                            "silently dropping this SKB\n");
9531                         dev_kfree_skb_any(skb);
9532                         return NETDEV_TX_OK;
9533                 }
9534         }
9535
9536         /*
9537         Please read carefully. First we use one BD which we mark as start,
9538         then for TSO or xsum we have a parsing info BD,
9539         and only then we have the rest of the TSO BDs.
9540         (don't forget to mark the last one as last,
9541         and to unmap only AFTER you write to the BD ...)
9542         And above all, all pdb sizes are in words - NOT DWORDS!
9543         */
9544
9545         pkt_prod = fp->tx_pkt_prod++;
9546         bd_prod = TX_BD(fp->tx_bd_prod);
9547
9548         /* get a tx_buf and first BD */
9549         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9550         tx_bd = &fp->tx_desc_ring[bd_prod];
9551
9552         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9553         tx_bd->general_data = (UNICAST_ADDRESS <<
9554                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9555         /* header nbd */
9556         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9557
9558         /* remember the first BD of the packet */
9559         tx_buf->first_bd = fp->tx_bd_prod;
9560         tx_buf->skb = skb;
9561
9562         DP(NETIF_MSG_TX_QUEUED,
9563            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9564            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9565
9566         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9567                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9568                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9569                 vlan_off += 4;
9570         } else
9571                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9572
9573         if (xmit_type) {
9574                 /* turn on parsing and get a BD */
9575                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9576                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9577
9578                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9579         }
9580
9581         if (xmit_type & XMIT_CSUM) {
9582                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9583
9584                 /* for now NS flag is not used in Linux */
9585                 pbd->global_data = (hlen |
9586                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9587                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9588
9589                 pbd->ip_hlen = (skb_transport_header(skb) -
9590                                 skb_network_header(skb)) / 2;
9591
9592                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9593
9594                 pbd->total_hlen = cpu_to_le16(hlen);
9595                 hlen = hlen*2 - vlan_off;
9596
9597                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9598
9599                 if (xmit_type & XMIT_CSUM_V4)
9600                         tx_bd->bd_flags.as_bitfield |=
9601                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9602                 else
9603                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9604
9605                 if (xmit_type & XMIT_CSUM_TCP) {
9606                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9607
9608                 } else {
9609                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9610
9611                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9612                         pbd->cs_offset = fix / 2;
9613
9614                         DP(NETIF_MSG_TX_QUEUED,
9615                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9616                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9617                            SKB_CS(skb));
9618
9619                         /* HW bug: fixup the CSUM */
9620                         pbd->tcp_pseudo_csum =
9621                                 bnx2x_csum_fix(skb_transport_header(skb),
9622                                                SKB_CS(skb), fix);
9623
9624                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9625                            pbd->tcp_pseudo_csum);
9626                 }
9627         }
9628
9629         mapping = pci_map_single(bp->pdev, skb->data,
9630                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9631
9632         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9633         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9634         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9635         tx_bd->nbd = cpu_to_le16(nbd);
9636         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9637
9638         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9639            "  nbytes %d  flags %x  vlan %x\n",
9640            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9641            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9642            le16_to_cpu(tx_bd->vlan));
9643
9644         if (xmit_type & XMIT_GSO) {
9645
9646                 DP(NETIF_MSG_TX_QUEUED,
9647                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9648                    skb->len, hlen, skb_headlen(skb),
9649                    skb_shinfo(skb)->gso_size);
9650
9651                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9652
9653                 if (unlikely(skb_headlen(skb) > hlen))
9654                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9655                                                  bd_prod, ++nbd);
9656
9657                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9658                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9659                 pbd->tcp_flags = pbd_tcp_flags(skb);
9660
9661                 if (xmit_type & XMIT_GSO_V4) {
9662                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9663                         pbd->tcp_pseudo_csum =
9664                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9665                                                           ip_hdr(skb)->daddr,
9666                                                           0, IPPROTO_TCP, 0));
9667
9668                 } else
9669                         pbd->tcp_pseudo_csum =
9670                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9671                                                         &ipv6_hdr(skb)->daddr,
9672                                                         0, IPPROTO_TCP, 0));
9673
9674                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9675         }
9676
9677         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9678                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9679
9680                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9681                 tx_bd = &fp->tx_desc_ring[bd_prod];
9682
9683                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9684                                        frag->size, PCI_DMA_TODEVICE);
9685
9686                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9687                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9688                 tx_bd->nbytes = cpu_to_le16(frag->size);
9689                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9690                 tx_bd->bd_flags.as_bitfield = 0;
9691
9692                 DP(NETIF_MSG_TX_QUEUED,
9693                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9694                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9695                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9696         }
9697
9698         /* now at last mark the BD as the last BD */
9699         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9700
9701         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9702            tx_bd, tx_bd->bd_flags.as_bitfield);
9703
9704         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9705
9706         /* now send a tx doorbell, counting the next BD
9707          * if the packet contains or ends with it
9708          */
9709         if (TX_BD_POFF(bd_prod) < nbd)
9710                 nbd++;
9711
9712         if (pbd)
9713                 DP(NETIF_MSG_TX_QUEUED,
9714                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9715                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9716                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9717                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9718                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9719
9720         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9721
9722         /*
9723          * Make sure that the BD data is updated before updating the producer
9724          * since FW might read the BD right after the producer is updated.
9725          * This is only applicable for weak-ordered memory model archs such
9726          * as IA-64. The following barrier is also mandatory since FW will
9727          * assumes packets must have BDs.
9728          */
9729         wmb();
9730
9731         fp->hw_tx_prods->bds_prod =
9732                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9733         mb(); /* FW restriction: must not reorder writing nbd and packets */
9734         fp->hw_tx_prods->packets_prod =
9735                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9736         DOORBELL(bp, FP_IDX(fp), 0);
9737
9738         mmiowb();
9739
9740         fp->tx_bd_prod += nbd;
9741         dev->trans_start = jiffies;
9742
9743         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9744                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9745                    if we put Tx into XOFF state. */
9746                 smp_mb();
9747                 netif_stop_queue(dev);
9748                 bp->eth_stats.driver_xoff++;
9749                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9750                         netif_wake_queue(dev);
9751         }
9752         fp->tx_pkt++;
9753
9754         return NETDEV_TX_OK;
9755 }
9756
9757 /* called with rtnl_lock */
9758 static int bnx2x_open(struct net_device *dev)
9759 {
9760         struct bnx2x *bp = netdev_priv(dev);
9761
9762         bnx2x_set_power_state(bp, PCI_D0);
9763
9764         return bnx2x_nic_load(bp, LOAD_OPEN);
9765 }
9766
9767 /* called with rtnl_lock */
9768 static int bnx2x_close(struct net_device *dev)
9769 {
9770         struct bnx2x *bp = netdev_priv(dev);
9771
9772         /* Unload the driver, release IRQs */
9773         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9774         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9775                 if (!CHIP_REV_IS_SLOW(bp))
9776                         bnx2x_set_power_state(bp, PCI_D3hot);
9777
9778         return 0;
9779 }
9780
9781 /* called with netif_tx_lock from set_multicast */
9782 static void bnx2x_set_rx_mode(struct net_device *dev)
9783 {
9784         struct bnx2x *bp = netdev_priv(dev);
9785         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9786         int port = BP_PORT(bp);
9787
9788         if (bp->state != BNX2X_STATE_OPEN) {
9789                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9790                 return;
9791         }
9792
9793         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9794
9795         if (dev->flags & IFF_PROMISC)
9796                 rx_mode = BNX2X_RX_MODE_PROMISC;
9797
9798         else if ((dev->flags & IFF_ALLMULTI) ||
9799                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9800                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9801
9802         else { /* some multicasts */
9803                 if (CHIP_IS_E1(bp)) {
9804                         int i, old, offset;
9805                         struct dev_mc_list *mclist;
9806                         struct mac_configuration_cmd *config =
9807                                                 bnx2x_sp(bp, mcast_config);
9808
9809                         for (i = 0, mclist = dev->mc_list;
9810                              mclist && (i < dev->mc_count);
9811                              i++, mclist = mclist->next) {
9812
9813                                 config->config_table[i].
9814                                         cam_entry.msb_mac_addr =
9815                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9816                                 config->config_table[i].
9817                                         cam_entry.middle_mac_addr =
9818                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9819                                 config->config_table[i].
9820                                         cam_entry.lsb_mac_addr =
9821                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9822                                 config->config_table[i].cam_entry.flags =
9823                                                         cpu_to_le16(port);
9824                                 config->config_table[i].
9825                                         target_table_entry.flags = 0;
9826                                 config->config_table[i].
9827                                         target_table_entry.client_id = 0;
9828                                 config->config_table[i].
9829                                         target_table_entry.vlan_id = 0;
9830
9831                                 DP(NETIF_MSG_IFUP,
9832                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9833                                    config->config_table[i].
9834                                                 cam_entry.msb_mac_addr,
9835                                    config->config_table[i].
9836                                                 cam_entry.middle_mac_addr,
9837                                    config->config_table[i].
9838                                                 cam_entry.lsb_mac_addr);
9839                         }
9840                         old = config->hdr.length_6b;
9841                         if (old > i) {
9842                                 for (; i < old; i++) {
9843                                         if (CAM_IS_INVALID(config->
9844                                                            config_table[i])) {
9845                                                 i--; /* already invalidated */
9846                                                 break;
9847                                         }
9848                                         /* invalidate */
9849                                         CAM_INVALIDATE(config->
9850                                                        config_table[i]);
9851                                 }
9852                         }
9853
9854                         if (CHIP_REV_IS_SLOW(bp))
9855                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9856                         else
9857                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9858
9859                         config->hdr.length_6b = i;
9860                         config->hdr.offset = offset;
9861                         config->hdr.client_id = BP_CL_ID(bp);
9862                         config->hdr.reserved1 = 0;
9863
9864                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9865                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9866                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9867                                       0);
9868                 } else { /* E1H */
9869                         /* Accept one or more multicasts */
9870                         struct dev_mc_list *mclist;
9871                         u32 mc_filter[MC_HASH_SIZE];
9872                         u32 crc, bit, regidx;
9873                         int i;
9874
9875                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9876
9877                         for (i = 0, mclist = dev->mc_list;
9878                              mclist && (i < dev->mc_count);
9879                              i++, mclist = mclist->next) {
9880
9881                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9882                                    mclist->dmi_addr);
9883
9884                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9885                                 bit = (crc >> 24) & 0xff;
9886                                 regidx = bit >> 5;
9887                                 bit &= 0x1f;
9888                                 mc_filter[regidx] |= (1 << bit);
9889                         }
9890
9891                         for (i = 0; i < MC_HASH_SIZE; i++)
9892                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9893                                        mc_filter[i]);
9894                 }
9895         }
9896
9897         bp->rx_mode = rx_mode;
9898         bnx2x_set_storm_rx_mode(bp);
9899 }
9900
9901 /* called with rtnl_lock */
9902 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9903 {
9904         struct sockaddr *addr = p;
9905         struct bnx2x *bp = netdev_priv(dev);
9906
9907         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9908                 return -EINVAL;
9909
9910         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9911         if (netif_running(dev)) {
9912                 if (CHIP_IS_E1(bp))
9913                         bnx2x_set_mac_addr_e1(bp, 1);
9914                 else
9915                         bnx2x_set_mac_addr_e1h(bp, 1);
9916         }
9917
9918         return 0;
9919 }
9920
9921 /* called with rtnl_lock */
9922 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9923 {
9924         struct mii_ioctl_data *data = if_mii(ifr);
9925         struct bnx2x *bp = netdev_priv(dev);
9926         int port = BP_PORT(bp);
9927         int err;
9928
9929         switch (cmd) {
9930         case SIOCGMIIPHY:
9931                 data->phy_id = bp->port.phy_addr;
9932
9933                 /* fallthrough */
9934
9935         case SIOCGMIIREG: {
9936                 u16 mii_regval;
9937
9938                 if (!netif_running(dev))
9939                         return -EAGAIN;
9940
9941                 mutex_lock(&bp->port.phy_mutex);
9942                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9943                                       DEFAULT_PHY_DEV_ADDR,
9944                                       (data->reg_num & 0x1f), &mii_regval);
9945                 data->val_out = mii_regval;
9946                 mutex_unlock(&bp->port.phy_mutex);
9947                 return err;
9948         }
9949
9950         case SIOCSMIIREG:
9951                 if (!capable(CAP_NET_ADMIN))
9952                         return -EPERM;
9953
9954                 if (!netif_running(dev))
9955                         return -EAGAIN;
9956
9957                 mutex_lock(&bp->port.phy_mutex);
9958                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9959                                        DEFAULT_PHY_DEV_ADDR,
9960                                        (data->reg_num & 0x1f), data->val_in);
9961                 mutex_unlock(&bp->port.phy_mutex);
9962                 return err;
9963
9964         default:
9965                 /* do nothing */
9966                 break;
9967         }
9968
9969         return -EOPNOTSUPP;
9970 }
9971
9972 /* called with rtnl_lock */
9973 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9974 {
9975         struct bnx2x *bp = netdev_priv(dev);
9976         int rc = 0;
9977
9978         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9979             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9980                 return -EINVAL;
9981
9982         /* This does not race with packet allocation
9983          * because the actual alloc size is
9984          * only updated as part of load
9985          */
9986         dev->mtu = new_mtu;
9987
9988         if (netif_running(dev)) {
9989                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9990                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9991         }
9992
9993         return rc;
9994 }
9995
9996 static void bnx2x_tx_timeout(struct net_device *dev)
9997 {
9998         struct bnx2x *bp = netdev_priv(dev);
9999
10000 #ifdef BNX2X_STOP_ON_ERROR
10001         if (!bp->panic)
10002                 bnx2x_panic();
10003 #endif
10004         /* This allows the netif to be shutdown gracefully before resetting */
10005         schedule_work(&bp->reset_task);
10006 }
10007
10008 #ifdef BCM_VLAN
10009 /* called with rtnl_lock */
10010 static void bnx2x_vlan_rx_register(struct net_device *dev,
10011                                    struct vlan_group *vlgrp)
10012 {
10013         struct bnx2x *bp = netdev_priv(dev);
10014
10015         bp->vlgrp = vlgrp;
10016         if (netif_running(dev))
10017                 bnx2x_set_client_config(bp);
10018 }
10019
10020 #endif
10021
10022 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10023 static void poll_bnx2x(struct net_device *dev)
10024 {
10025         struct bnx2x *bp = netdev_priv(dev);
10026
10027         disable_irq(bp->pdev->irq);
10028         bnx2x_interrupt(bp->pdev->irq, dev);
10029         enable_irq(bp->pdev->irq);
10030 }
10031 #endif
10032
10033 static const struct net_device_ops bnx2x_netdev_ops = {
10034         .ndo_open               = bnx2x_open,
10035         .ndo_stop               = bnx2x_close,
10036         .ndo_start_xmit         = bnx2x_start_xmit,
10037         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10038         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10039         .ndo_validate_addr      = eth_validate_addr,
10040         .ndo_do_ioctl           = bnx2x_ioctl,
10041         .ndo_change_mtu         = bnx2x_change_mtu,
10042         .ndo_tx_timeout         = bnx2x_tx_timeout,
10043 #ifdef BCM_VLAN
10044         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10045 #endif
10046 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10047         .ndo_poll_controller    = poll_bnx2x,
10048 #endif
10049 };
10050
10051
10052 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10053                                     struct net_device *dev)
10054 {
10055         struct bnx2x *bp;
10056         int rc;
10057
10058         SET_NETDEV_DEV(dev, &pdev->dev);
10059         bp = netdev_priv(dev);
10060
10061         bp->dev = dev;
10062         bp->pdev = pdev;
10063         bp->flags = 0;
10064         bp->func = PCI_FUNC(pdev->devfn);
10065
10066         rc = pci_enable_device(pdev);
10067         if (rc) {
10068                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10069                 goto err_out;
10070         }
10071
10072         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10073                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10074                        " aborting\n");
10075                 rc = -ENODEV;
10076                 goto err_out_disable;
10077         }
10078
10079         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10080                 printk(KERN_ERR PFX "Cannot find second PCI device"
10081                        " base address, aborting\n");
10082                 rc = -ENODEV;
10083                 goto err_out_disable;
10084         }
10085
10086         if (atomic_read(&pdev->enable_cnt) == 1) {
10087                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10088                 if (rc) {
10089                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10090                                " aborting\n");
10091                         goto err_out_disable;
10092                 }
10093
10094                 pci_set_master(pdev);
10095                 pci_save_state(pdev);
10096         }
10097
10098         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10099         if (bp->pm_cap == 0) {
10100                 printk(KERN_ERR PFX "Cannot find power management"
10101                        " capability, aborting\n");
10102                 rc = -EIO;
10103                 goto err_out_release;
10104         }
10105
10106         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10107         if (bp->pcie_cap == 0) {
10108                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10109                        " aborting\n");
10110                 rc = -EIO;
10111                 goto err_out_release;
10112         }
10113
10114         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10115                 bp->flags |= USING_DAC_FLAG;
10116                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10117                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10118                                " failed, aborting\n");
10119                         rc = -EIO;
10120                         goto err_out_release;
10121                 }
10122
10123         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10124                 printk(KERN_ERR PFX "System does not support DMA,"
10125                        " aborting\n");
10126                 rc = -EIO;
10127                 goto err_out_release;
10128         }
10129
10130         dev->mem_start = pci_resource_start(pdev, 0);
10131         dev->base_addr = dev->mem_start;
10132         dev->mem_end = pci_resource_end(pdev, 0);
10133
10134         dev->irq = pdev->irq;
10135
10136         bp->regview = pci_ioremap_bar(pdev, 0);
10137         if (!bp->regview) {
10138                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10139                 rc = -ENOMEM;
10140                 goto err_out_release;
10141         }
10142
10143         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10144                                         min_t(u64, BNX2X_DB_SIZE,
10145                                               pci_resource_len(pdev, 2)));
10146         if (!bp->doorbells) {
10147                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10148                 rc = -ENOMEM;
10149                 goto err_out_unmap;
10150         }
10151
10152         bnx2x_set_power_state(bp, PCI_D0);
10153
10154         /* clean indirect addresses */
10155         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10156                                PCICFG_VENDOR_ID_OFFSET);
10157         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10158         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10159         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10160         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10161
10162         dev->watchdog_timeo = TX_TIMEOUT;
10163
10164         dev->netdev_ops = &bnx2x_netdev_ops;
10165         dev->ethtool_ops = &bnx2x_ethtool_ops;
10166         dev->features |= NETIF_F_SG;
10167         dev->features |= NETIF_F_HW_CSUM;
10168         if (bp->flags & USING_DAC_FLAG)
10169                 dev->features |= NETIF_F_HIGHDMA;
10170 #ifdef BCM_VLAN
10171         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10172 #endif
10173         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10174         dev->features |= NETIF_F_TSO6;
10175
10176         return 0;
10177
10178 err_out_unmap:
10179         if (bp->regview) {
10180                 iounmap(bp->regview);
10181                 bp->regview = NULL;
10182         }
10183         if (bp->doorbells) {
10184                 iounmap(bp->doorbells);
10185                 bp->doorbells = NULL;
10186         }
10187
10188 err_out_release:
10189         if (atomic_read(&pdev->enable_cnt) == 1)
10190                 pci_release_regions(pdev);
10191
10192 err_out_disable:
10193         pci_disable_device(pdev);
10194         pci_set_drvdata(pdev, NULL);
10195
10196 err_out:
10197         return rc;
10198 }
10199
10200 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10201 {
10202         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10203
10204         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10205         return val;
10206 }
10207
10208 /* return value of 1=2.5GHz 2=5GHz */
10209 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10210 {
10211         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10212
10213         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10214         return val;
10215 }
10216
10217 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10218                                     const struct pci_device_id *ent)
10219 {
10220         static int version_printed;
10221         struct net_device *dev = NULL;
10222         struct bnx2x *bp;
10223         int rc;
10224
10225         if (version_printed++ == 0)
10226                 printk(KERN_INFO "%s", version);
10227
10228         /* dev zeroed in init_etherdev */
10229         dev = alloc_etherdev(sizeof(*bp));
10230         if (!dev) {
10231                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10232                 return -ENOMEM;
10233         }
10234
10235         bp = netdev_priv(dev);
10236         bp->msglevel = debug;
10237
10238         rc = bnx2x_init_dev(pdev, dev);
10239         if (rc < 0) {
10240                 free_netdev(dev);
10241                 return rc;
10242         }
10243
10244         rc = register_netdev(dev);
10245         if (rc) {
10246                 dev_err(&pdev->dev, "Cannot register net device\n");
10247                 goto init_one_exit;
10248         }
10249
10250         pci_set_drvdata(pdev, dev);
10251
10252         rc = bnx2x_init_bp(bp);
10253         if (rc) {
10254                 unregister_netdev(dev);
10255                 goto init_one_exit;
10256         }
10257
10258         netif_carrier_off(dev);
10259
10260         bp->common.name = board_info[ent->driver_data].name;
10261         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10262                " IRQ %d, ", dev->name, bp->common.name,
10263                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10264                bnx2x_get_pcie_width(bp),
10265                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10266                dev->base_addr, bp->pdev->irq);
10267         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10268         return 0;
10269
10270 init_one_exit:
10271         if (bp->regview)
10272                 iounmap(bp->regview);
10273
10274         if (bp->doorbells)
10275                 iounmap(bp->doorbells);
10276
10277         free_netdev(dev);
10278
10279         if (atomic_read(&pdev->enable_cnt) == 1)
10280                 pci_release_regions(pdev);
10281
10282         pci_disable_device(pdev);
10283         pci_set_drvdata(pdev, NULL);
10284
10285         return rc;
10286 }
10287
10288 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10289 {
10290         struct net_device *dev = pci_get_drvdata(pdev);
10291         struct bnx2x *bp;
10292
10293         if (!dev) {
10294                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10295                 return;
10296         }
10297         bp = netdev_priv(dev);
10298
10299         unregister_netdev(dev);
10300
10301         if (bp->regview)
10302                 iounmap(bp->regview);
10303
10304         if (bp->doorbells)
10305                 iounmap(bp->doorbells);
10306
10307         free_netdev(dev);
10308
10309         if (atomic_read(&pdev->enable_cnt) == 1)
10310                 pci_release_regions(pdev);
10311
10312         pci_disable_device(pdev);
10313         pci_set_drvdata(pdev, NULL);
10314 }
10315
10316 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10317 {
10318         struct net_device *dev = pci_get_drvdata(pdev);
10319         struct bnx2x *bp;
10320
10321         if (!dev) {
10322                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10323                 return -ENODEV;
10324         }
10325         bp = netdev_priv(dev);
10326
10327         rtnl_lock();
10328
10329         pci_save_state(pdev);
10330
10331         if (!netif_running(dev)) {
10332                 rtnl_unlock();
10333                 return 0;
10334         }
10335
10336         netif_device_detach(dev);
10337
10338         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10339
10340         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10341
10342         rtnl_unlock();
10343
10344         return 0;
10345 }
10346
10347 static int bnx2x_resume(struct pci_dev *pdev)
10348 {
10349         struct net_device *dev = pci_get_drvdata(pdev);
10350         struct bnx2x *bp;
10351         int rc;
10352
10353         if (!dev) {
10354                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10355                 return -ENODEV;
10356         }
10357         bp = netdev_priv(dev);
10358
10359         rtnl_lock();
10360
10361         pci_restore_state(pdev);
10362
10363         if (!netif_running(dev)) {
10364                 rtnl_unlock();
10365                 return 0;
10366         }
10367
10368         bnx2x_set_power_state(bp, PCI_D0);
10369         netif_device_attach(dev);
10370
10371         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10372
10373         rtnl_unlock();
10374
10375         return rc;
10376 }
10377
10378 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10379 {
10380         int i;
10381
10382         bp->state = BNX2X_STATE_ERROR;
10383
10384         bp->rx_mode = BNX2X_RX_MODE_NONE;
10385
10386         bnx2x_netif_stop(bp, 0);
10387
10388         del_timer_sync(&bp->timer);
10389         bp->stats_state = STATS_STATE_DISABLED;
10390         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10391
10392         /* Release IRQs */
10393         bnx2x_free_irq(bp);
10394
10395         if (CHIP_IS_E1(bp)) {
10396                 struct mac_configuration_cmd *config =
10397                                                 bnx2x_sp(bp, mcast_config);
10398
10399                 for (i = 0; i < config->hdr.length_6b; i++)
10400                         CAM_INVALIDATE(config->config_table[i]);
10401         }
10402
10403         /* Free SKBs, SGEs, TPA pool and driver internals */
10404         bnx2x_free_skbs(bp);
10405         for_each_queue(bp, i)
10406                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10407         bnx2x_free_mem(bp);
10408
10409         bp->state = BNX2X_STATE_CLOSED;
10410
10411         netif_carrier_off(bp->dev);
10412
10413         return 0;
10414 }
10415
10416 static void bnx2x_eeh_recover(struct bnx2x *bp)
10417 {
10418         u32 val;
10419
10420         mutex_init(&bp->port.phy_mutex);
10421
10422         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10423         bp->link_params.shmem_base = bp->common.shmem_base;
10424         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10425
10426         if (!bp->common.shmem_base ||
10427             (bp->common.shmem_base < 0xA0000) ||
10428             (bp->common.shmem_base >= 0xC0000)) {
10429                 BNX2X_DEV_INFO("MCP not active\n");
10430                 bp->flags |= NO_MCP_FLAG;
10431                 return;
10432         }
10433
10434         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10435         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10436                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10437                 BNX2X_ERR("BAD MCP validity signature\n");
10438
10439         if (!BP_NOMCP(bp)) {
10440                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10441                               & DRV_MSG_SEQ_NUMBER_MASK);
10442                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10443         }
10444 }
10445
10446 /**
10447  * bnx2x_io_error_detected - called when PCI error is detected
10448  * @pdev: Pointer to PCI device
10449  * @state: The current pci connection state
10450  *
10451  * This function is called after a PCI bus error affecting
10452  * this device has been detected.
10453  */
10454 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10455                                                 pci_channel_state_t state)
10456 {
10457         struct net_device *dev = pci_get_drvdata(pdev);
10458         struct bnx2x *bp = netdev_priv(dev);
10459
10460         rtnl_lock();
10461
10462         netif_device_detach(dev);
10463
10464         if (netif_running(dev))
10465                 bnx2x_eeh_nic_unload(bp);
10466
10467         pci_disable_device(pdev);
10468
10469         rtnl_unlock();
10470
10471         /* Request a slot reset */
10472         return PCI_ERS_RESULT_NEED_RESET;
10473 }
10474
10475 /**
10476  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10477  * @pdev: Pointer to PCI device
10478  *
10479  * Restart the card from scratch, as if from a cold-boot.
10480  */
10481 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10482 {
10483         struct net_device *dev = pci_get_drvdata(pdev);
10484         struct bnx2x *bp = netdev_priv(dev);
10485
10486         rtnl_lock();
10487
10488         if (pci_enable_device(pdev)) {
10489                 dev_err(&pdev->dev,
10490                         "Cannot re-enable PCI device after reset\n");
10491                 rtnl_unlock();
10492                 return PCI_ERS_RESULT_DISCONNECT;
10493         }
10494
10495         pci_set_master(pdev);
10496         pci_restore_state(pdev);
10497
10498         if (netif_running(dev))
10499                 bnx2x_set_power_state(bp, PCI_D0);
10500
10501         rtnl_unlock();
10502
10503         return PCI_ERS_RESULT_RECOVERED;
10504 }
10505
10506 /**
10507  * bnx2x_io_resume - called when traffic can start flowing again
10508  * @pdev: Pointer to PCI device
10509  *
10510  * This callback is called when the error recovery driver tells us that
10511  * its OK to resume normal operation.
10512  */
10513 static void bnx2x_io_resume(struct pci_dev *pdev)
10514 {
10515         struct net_device *dev = pci_get_drvdata(pdev);
10516         struct bnx2x *bp = netdev_priv(dev);
10517
10518         rtnl_lock();
10519
10520         bnx2x_eeh_recover(bp);
10521
10522         if (netif_running(dev))
10523                 bnx2x_nic_load(bp, LOAD_NORMAL);
10524
10525         netif_device_attach(dev);
10526
10527         rtnl_unlock();
10528 }
10529
10530 static struct pci_error_handlers bnx2x_err_handler = {
10531         .error_detected = bnx2x_io_error_detected,
10532         .slot_reset = bnx2x_io_slot_reset,
10533         .resume = bnx2x_io_resume,
10534 };
10535
10536 static struct pci_driver bnx2x_pci_driver = {
10537         .name        = DRV_MODULE_NAME,
10538         .id_table    = bnx2x_pci_tbl,
10539         .probe       = bnx2x_init_one,
10540         .remove      = __devexit_p(bnx2x_remove_one),
10541         .suspend     = bnx2x_suspend,
10542         .resume      = bnx2x_resume,
10543         .err_handler = &bnx2x_err_handler,
10544 };
10545
10546 static int __init bnx2x_init(void)
10547 {
10548         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10549         if (bnx2x_wq == NULL) {
10550                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10551                 return -ENOMEM;
10552         }
10553
10554         return pci_register_driver(&bnx2x_pci_driver);
10555 }
10556
10557 static void __exit bnx2x_cleanup(void)
10558 {
10559         pci_unregister_driver(&bnx2x_pci_driver);
10560
10561         destroy_workqueue(bnx2x_wq);
10562 }
10563
10564 module_init(bnx2x_init);
10565 module_exit(bnx2x_cleanup);
10566