]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/bnx2x_main.c
Merge branch 'misc' into devel
[mv-sheeva.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.53-2"
61 #define DRV_MODULE_RELDATE      "2010/21/07"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags, cqe_fp_status_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601                 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1602
1603                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1604                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1605                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609
1610                 /* is this a slowpath msg? */
1611                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612                         bnx2x_sp_event(fp, cqe);
1613                         goto next_cqe;
1614
1615                 /* this is an rx packet */
1616                 } else {
1617                         rx_buf = &fp->rx_buf_ring[bd_cons];
1618                         skb = rx_buf->skb;
1619                         prefetch(skb);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(((char *)(skb)) + 128);
1671
1672                         /* is this an error packet? */
1673                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674                                 DP(NETIF_MSG_RX_ERR,
1675                                    "ERROR  flags %x  rx packet %u\n",
1676                                    cqe_fp_flags, sw_comp_cons);
1677                                 fp->eth_q_stats.rx_err_discard_pkt++;
1678                                 goto reuse_rx;
1679                         }
1680
1681                         /* Since we don't have a jumbo ring
1682                          * copy small packets if mtu > 1500
1683                          */
1684                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685                             (len <= RX_COPY_THRESH)) {
1686                                 struct sk_buff *new_skb;
1687
1688                                 new_skb = netdev_alloc_skb(bp->dev,
1689                                                            len + pad);
1690                                 if (new_skb == NULL) {
1691                                         DP(NETIF_MSG_RX_ERR,
1692                                            "ERROR  packet dropped "
1693                                            "because of alloc failure\n");
1694                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1695                                         goto reuse_rx;
1696                                 }
1697
1698                                 /* aligned copy */
1699                                 skb_copy_from_linear_data_offset(skb, pad,
1700                                                     new_skb->data + pad, len);
1701                                 skb_reserve(new_skb, pad);
1702                                 skb_put(new_skb, len);
1703
1704                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706                                 skb = new_skb;
1707
1708                         } else
1709                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710                                 dma_unmap_single(&bp->pdev->dev,
1711                                         dma_unmap_addr(rx_buf, mapping),
1712                                                  bp->rx_buf_size,
1713                                                  DMA_FROM_DEVICE);
1714                                 skb_reserve(skb, pad);
1715                                 skb_put(skb, len);
1716
1717                         } else {
1718                                 DP(NETIF_MSG_RX_ERR,
1719                                    "ERROR  packet dropped because "
1720                                    "of alloc failure\n");
1721                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1722 reuse_rx:
1723                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724                                 goto next_rx;
1725                         }
1726
1727                         skb->protocol = eth_type_trans(skb, bp->dev);
1728
1729                         if ((bp->dev->features & NETIF_F_RXHASH) &&
1730                             (cqe_fp_status_flags &
1731                              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732                                 skb->rxhash = le32_to_cpu(
1733                                     cqe->fast_path_cqe.rss_hash_result);
1734
1735                         skb->ip_summed = CHECKSUM_NONE;
1736                         if (bp->rx_csum) {
1737                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1739                                 else
1740                                         fp->eth_q_stats.hw_csum_err++;
1741                         }
1742                 }
1743
1744                 skb_record_rx_queue(skb, fp->index);
1745
1746 #ifdef BCM_VLAN
1747                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749                      PARSING_FLAGS_VLAN))
1750                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1751                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1752                 else
1753 #endif
1754                         napi_gro_receive(&fp->napi, skb);
1755
1756
1757 next_rx:
1758                 rx_buf->skb = NULL;
1759
1760                 bd_cons = NEXT_RX_IDX(bd_cons);
1761                 bd_prod = NEXT_RX_IDX(bd_prod);
1762                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763                 rx_pkt++;
1764 next_cqe:
1765                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1767
1768                 if (rx_pkt == budget)
1769                         break;
1770         } /* while */
1771
1772         fp->rx_bd_cons = bd_cons;
1773         fp->rx_bd_prod = bd_prod_fw;
1774         fp->rx_comp_cons = sw_comp_cons;
1775         fp->rx_comp_prod = sw_comp_prod;
1776
1777         /* Update producers */
1778         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779                              fp->rx_sge_prod);
1780
1781         fp->rx_pkt += rx_pkt;
1782         fp->rx_calls++;
1783
1784         return rx_pkt;
1785 }
1786
1787 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788 {
1789         struct bnx2x_fastpath *fp = fp_cookie;
1790         struct bnx2x *bp = fp->bp;
1791
1792         /* Return here if interrupt is disabled */
1793         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795                 return IRQ_HANDLED;
1796         }
1797
1798         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799            fp->index, fp->sb_id);
1800         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1801
1802 #ifdef BNX2X_STOP_ON_ERROR
1803         if (unlikely(bp->panic))
1804                 return IRQ_HANDLED;
1805 #endif
1806
1807         /* Handle Rx and Tx according to MSI-X vector */
1808         prefetch(fp->rx_cons_sb);
1809         prefetch(fp->tx_cons_sb);
1810         prefetch(&fp->status_blk->u_status_block.status_block_index);
1811         prefetch(&fp->status_blk->c_status_block.status_block_index);
1812         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1813
1814         return IRQ_HANDLED;
1815 }
1816
1817 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818 {
1819         struct bnx2x *bp = netdev_priv(dev_instance);
1820         u16 status = bnx2x_ack_int(bp);
1821         u16 mask;
1822         int i;
1823
1824         /* Return here if interrupt is shared and it's not for us */
1825         if (unlikely(status == 0)) {
1826                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1827                 return IRQ_NONE;
1828         }
1829         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1830
1831         /* Return here if interrupt is disabled */
1832         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1834                 return IRQ_HANDLED;
1835         }
1836
1837 #ifdef BNX2X_STOP_ON_ERROR
1838         if (unlikely(bp->panic))
1839                 return IRQ_HANDLED;
1840 #endif
1841
1842         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843                 struct bnx2x_fastpath *fp = &bp->fp[i];
1844
1845                 mask = 0x2 << fp->sb_id;
1846                 if (status & mask) {
1847                         /* Handle Rx and Tx according to SB id */
1848                         prefetch(fp->rx_cons_sb);
1849                         prefetch(&fp->status_blk->u_status_block.
1850                                                 status_block_index);
1851                         prefetch(fp->tx_cons_sb);
1852                         prefetch(&fp->status_blk->c_status_block.
1853                                                 status_block_index);
1854                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855                         status &= ~mask;
1856                 }
1857         }
1858
1859 #ifdef BCM_CNIC
1860         mask = 0x2 << CNIC_SB_ID(bp);
1861         if (status & (mask | 0x1)) {
1862                 struct cnic_ops *c_ops = NULL;
1863
1864                 rcu_read_lock();
1865                 c_ops = rcu_dereference(bp->cnic_ops);
1866                 if (c_ops)
1867                         c_ops->cnic_handler(bp->cnic_data, NULL);
1868                 rcu_read_unlock();
1869
1870                 status &= ~mask;
1871         }
1872 #endif
1873
1874         if (unlikely(status & 0x1)) {
1875                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1876
1877                 status &= ~0x1;
1878                 if (!status)
1879                         return IRQ_HANDLED;
1880         }
1881
1882         if (unlikely(status))
1883                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884                    status);
1885
1886         return IRQ_HANDLED;
1887 }
1888
1889 /* end of fast path */
1890
1891 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1892
1893 /* Link */
1894
1895 /*
1896  * General service functions
1897  */
1898
1899 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1900 {
1901         u32 lock_status;
1902         u32 resource_bit = (1 << resource);
1903         int func = BP_FUNC(bp);
1904         u32 hw_lock_control_reg;
1905         int cnt;
1906
1907         /* Validating that the resource is within range */
1908         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1909                 DP(NETIF_MSG_HW,
1910                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1912                 return -EINVAL;
1913         }
1914
1915         if (func <= 5) {
1916                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1917         } else {
1918                 hw_lock_control_reg =
1919                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1920         }
1921
1922         /* Validating that the resource is not already taken */
1923         lock_status = REG_RD(bp, hw_lock_control_reg);
1924         if (lock_status & resource_bit) {
1925                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1926                    lock_status, resource_bit);
1927                 return -EEXIST;
1928         }
1929
1930         /* Try for 5 second every 5ms */
1931         for (cnt = 0; cnt < 1000; cnt++) {
1932                 /* Try to acquire the lock */
1933                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934                 lock_status = REG_RD(bp, hw_lock_control_reg);
1935                 if (lock_status & resource_bit)
1936                         return 0;
1937
1938                 msleep(5);
1939         }
1940         DP(NETIF_MSG_HW, "Timeout\n");
1941         return -EAGAIN;
1942 }
1943
1944 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1945 {
1946         u32 lock_status;
1947         u32 resource_bit = (1 << resource);
1948         int func = BP_FUNC(bp);
1949         u32 hw_lock_control_reg;
1950
1951         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1953         /* Validating that the resource is within range */
1954         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1955                 DP(NETIF_MSG_HW,
1956                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958                 return -EINVAL;
1959         }
1960
1961         if (func <= 5) {
1962                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963         } else {
1964                 hw_lock_control_reg =
1965                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966         }
1967
1968         /* Validating that the resource is currently taken */
1969         lock_status = REG_RD(bp, hw_lock_control_reg);
1970         if (!(lock_status & resource_bit)) {
1971                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1972                    lock_status, resource_bit);
1973                 return -EFAULT;
1974         }
1975
1976         REG_WR(bp, hw_lock_control_reg, resource_bit);
1977         return 0;
1978 }
1979
1980 /* HW Lock for shared dual port PHYs */
1981 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1982 {
1983         mutex_lock(&bp->port.phy_mutex);
1984
1985         if (bp->port.need_hw_lock)
1986                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1987 }
1988
1989 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1990 {
1991         if (bp->port.need_hw_lock)
1992                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1993
1994         mutex_unlock(&bp->port.phy_mutex);
1995 }
1996
1997 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998 {
1999         /* The GPIO should be swapped if swap register is set and active */
2000         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002         int gpio_shift = gpio_num +
2003                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004         u32 gpio_mask = (1 << gpio_shift);
2005         u32 gpio_reg;
2006         int value;
2007
2008         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010                 return -EINVAL;
2011         }
2012
2013         /* read GPIO value */
2014         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2015
2016         /* get the requested pin value */
2017         if ((gpio_reg & gpio_mask) == gpio_mask)
2018                 value = 1;
2019         else
2020                 value = 0;
2021
2022         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2023
2024         return value;
2025 }
2026
2027 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2028 {
2029         /* The GPIO should be swapped if swap register is set and active */
2030         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2031                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2032         int gpio_shift = gpio_num +
2033                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034         u32 gpio_mask = (1 << gpio_shift);
2035         u32 gpio_reg;
2036
2037         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2039                 return -EINVAL;
2040         }
2041
2042         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043         /* read GPIO and mask except the float bits */
2044         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2045
2046         switch (mode) {
2047         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049                    gpio_num, gpio_shift);
2050                 /* clear FLOAT and set CLR */
2051                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2053                 break;
2054
2055         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057                    gpio_num, gpio_shift);
2058                 /* clear FLOAT and set SET */
2059                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061                 break;
2062
2063         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2064                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065                    gpio_num, gpio_shift);
2066                 /* set FLOAT */
2067                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2076
2077         return 0;
2078 }
2079
2080 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081 {
2082         /* The GPIO should be swapped if swap register is set and active */
2083         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085         int gpio_shift = gpio_num +
2086                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087         u32 gpio_mask = (1 << gpio_shift);
2088         u32 gpio_reg;
2089
2090         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2092                 return -EINVAL;
2093         }
2094
2095         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096         /* read GPIO int */
2097         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2098
2099         switch (mode) {
2100         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102                                    "output low\n", gpio_num, gpio_shift);
2103                 /* clear SET and set CLR */
2104                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2106                 break;
2107
2108         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110                                    "output high\n", gpio_num, gpio_shift);
2111                 /* clear CLR and set SET */
2112                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2114                 break;
2115
2116         default:
2117                 break;
2118         }
2119
2120         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123         return 0;
2124 }
2125
2126 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2127 {
2128         u32 spio_mask = (1 << spio_num);
2129         u32 spio_reg;
2130
2131         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132             (spio_num > MISC_REGISTERS_SPIO_7)) {
2133                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2134                 return -EINVAL;
2135         }
2136
2137         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138         /* read SPIO and mask except the float bits */
2139         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2140
2141         switch (mode) {
2142         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2143                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144                 /* clear FLOAT and set CLR */
2145                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2147                 break;
2148
2149         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2150                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151                 /* clear FLOAT and set SET */
2152                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2154                 break;
2155
2156         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2158                 /* set FLOAT */
2159                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160                 break;
2161
2162         default:
2163                 break;
2164         }
2165
2166         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2168
2169         return 0;
2170 }
2171
2172 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2173 {
2174         switch (bp->link_vars.ieee_fc &
2175                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2177                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2178                                           ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2182                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2183                                          ADVERTISED_Pause);
2184                 break;
2185
2186         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2187                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2188                 break;
2189
2190         default:
2191                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192                                           ADVERTISED_Pause);
2193                 break;
2194         }
2195 }
2196
2197 static void bnx2x_link_report(struct bnx2x *bp)
2198 {
2199         if (bp->flags & MF_FUNC_DIS) {
2200                 netif_carrier_off(bp->dev);
2201                 netdev_err(bp->dev, "NIC Link is Down\n");
2202                 return;
2203         }
2204
2205         if (bp->link_vars.link_up) {
2206                 u16 line_speed;
2207
2208                 if (bp->state == BNX2X_STATE_OPEN)
2209                         netif_carrier_on(bp->dev);
2210                 netdev_info(bp->dev, "NIC Link is Up, ");
2211
2212                 line_speed = bp->link_vars.line_speed;
2213                 if (IS_E1HMF(bp)) {
2214                         u16 vn_max_rate;
2215
2216                         vn_max_rate =
2217                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219                         if (vn_max_rate < line_speed)
2220                                 line_speed = vn_max_rate;
2221                 }
2222                 pr_cont("%d Mbps ", line_speed);
2223
2224                 if (bp->link_vars.duplex == DUPLEX_FULL)
2225                         pr_cont("full duplex");
2226                 else
2227                         pr_cont("half duplex");
2228
2229                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231                                 pr_cont(", receive ");
2232                                 if (bp->link_vars.flow_ctrl &
2233                                     BNX2X_FLOW_CTRL_TX)
2234                                         pr_cont("& transmit ");
2235                         } else {
2236                                 pr_cont(", transmit ");
2237                         }
2238                         pr_cont("flow control ON");
2239                 }
2240                 pr_cont("\n");
2241
2242         } else { /* link_down */
2243                 netif_carrier_off(bp->dev);
2244                 netdev_err(bp->dev, "NIC Link is Down\n");
2245         }
2246 }
2247
2248 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2249 {
2250         if (!BP_NOMCP(bp)) {
2251                 u8 rc;
2252
2253                 /* Initialize link parameters structure variables */
2254                 /* It is recommended to turn off RX FC for jumbo frames
2255                    for better performance */
2256                 if (bp->dev->mtu > 5000)
2257                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2258                 else
2259                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2260
2261                 bnx2x_acquire_phy_lock(bp);
2262
2263                 if (load_mode == LOAD_DIAG)
2264                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2265
2266                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2267
2268                 bnx2x_release_phy_lock(bp);
2269
2270                 bnx2x_calc_fc_adv(bp);
2271
2272                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2274                         bnx2x_link_report(bp);
2275                 }
2276
2277                 return rc;
2278         }
2279         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2280         return -EINVAL;
2281 }
2282
2283 static void bnx2x_link_set(struct bnx2x *bp)
2284 {
2285         if (!BP_NOMCP(bp)) {
2286                 bnx2x_acquire_phy_lock(bp);
2287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2288                 bnx2x_release_phy_lock(bp);
2289
2290                 bnx2x_calc_fc_adv(bp);
2291         } else
2292                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2293 }
2294
2295 static void bnx2x__link_reset(struct bnx2x *bp)
2296 {
2297         if (!BP_NOMCP(bp)) {
2298                 bnx2x_acquire_phy_lock(bp);
2299                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2300                 bnx2x_release_phy_lock(bp);
2301         } else
2302                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2303 }
2304
2305 static u8 bnx2x_link_test(struct bnx2x *bp)
2306 {
2307         u8 rc = 0;
2308
2309         if (!BP_NOMCP(bp)) {
2310                 bnx2x_acquire_phy_lock(bp);
2311                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312                 bnx2x_release_phy_lock(bp);
2313         } else
2314                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2315
2316         return rc;
2317 }
2318
2319 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2320 {
2321         u32 r_param = bp->link_vars.line_speed / 8;
2322         u32 fair_periodic_timeout_usec;
2323         u32 t_fair;
2324
2325         memset(&(bp->cmng.rs_vars), 0,
2326                sizeof(struct rate_shaping_vars_per_port));
2327         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2328
2329         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2331
2332         /* this is the threshold below which no timer arming will occur
2333            1.25 coefficient is for the threshold to be a little bigger
2334            than the real time, to compensate for timer in-accuracy */
2335         bp->cmng.rs_vars.rs_threshold =
2336                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2337
2338         /* resolution of fairness timer */
2339         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2342
2343         /* this is the threshold below which we won't arm the timer anymore */
2344         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2345
2346         /* we multiply by 1e3/8 to get bytes/msec.
2347            We don't want the credits to pass a credit
2348            of the t_fair*FAIR_MEM (algorithm resolution) */
2349         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350         /* since each tick is 4 usec */
2351         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2352 }
2353
2354 /* Calculates the sum of vn_min_rates.
2355    It's needed for further normalizing of the min_rates.
2356    Returns:
2357      sum of vn_min_rates.
2358        or
2359      0 - if all the min_rates are 0.
2360      In the later case fainess algorithm should be deactivated.
2361      If not all min_rates are zero then those that are zeroes will be set to 1.
2362  */
2363 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2364 {
2365         int all_zero = 1;
2366         int port = BP_PORT(bp);
2367         int vn;
2368
2369         bp->vn_weight_sum = 0;
2370         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371                 int func = 2*vn + port;
2372                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2375
2376                 /* Skip hidden vns */
2377                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2378                         continue;
2379
2380                 /* If min rate is zero - set it to 1 */
2381                 if (!vn_min_rate)
2382                         vn_min_rate = DEF_MIN_RATE;
2383                 else
2384                         all_zero = 0;
2385
2386                 bp->vn_weight_sum += vn_min_rate;
2387         }
2388
2389         /* ... only if all min rates are zeros - disable fairness */
2390         if (all_zero) {
2391                 bp->cmng.flags.cmng_enables &=
2392                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394                    "  fairness will be disabled\n");
2395         } else
2396                 bp->cmng.flags.cmng_enables |=
2397                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2398 }
2399
2400 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2401 {
2402         struct rate_shaping_vars_per_vn m_rs_vn;
2403         struct fairness_vars_per_vn m_fair_vn;
2404         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405         u16 vn_min_rate, vn_max_rate;
2406         int i;
2407
2408         /* If function is hidden - set min and max to zeroes */
2409         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2410                 vn_min_rate = 0;
2411                 vn_max_rate = 0;
2412
2413         } else {
2414                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2416                 /* If min rate is zero - set it to 1 */
2417                 if (!vn_min_rate)
2418                         vn_min_rate = DEF_MIN_RATE;
2419                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2421         }
2422         DP(NETIF_MSG_IFUP,
2423            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2424            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2425
2426         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2428
2429         /* global vn counter - maximal Mbps for this vn */
2430         m_rs_vn.vn_counter.rate = vn_max_rate;
2431
2432         /* quota - number of bytes transmitted in this period */
2433         m_rs_vn.vn_counter.quota =
2434                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2435
2436         if (bp->vn_weight_sum) {
2437                 /* credit for each period of the fairness algorithm:
2438                    number of bytes in T_FAIR (the vn share the port rate).
2439                    vn_weight_sum should not be larger than 10000, thus
2440                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2441                    than zero */
2442                 m_fair_vn.vn_credit_delta =
2443                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444                                                    (8 * bp->vn_weight_sum))),
2445                               (bp->cmng.fair_vars.fair_threshold * 2));
2446                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2447                    m_fair_vn.vn_credit_delta);
2448         }
2449
2450         /* Store it to internal memory */
2451         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_rs_vn))[i]);
2455
2456         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459                        ((u32 *)(&m_fair_vn))[i]);
2460 }
2461
2462
2463 /* This function is called upon link interrupt */
2464 static void bnx2x_link_attn(struct bnx2x *bp)
2465 {
2466         u32 prev_link_status = bp->link_vars.link_status;
2467         /* Make sure that we are synced with the current statistics */
2468         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
2470         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2471
2472         if (bp->link_vars.link_up) {
2473
2474                 /* dropless flow control */
2475                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2476                         int port = BP_PORT(bp);
2477                         u32 pause_enabled = 0;
2478
2479                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2480                                 pause_enabled = 1;
2481
2482                         REG_WR(bp, BAR_USTRORM_INTMEM +
2483                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2484                                pause_enabled);
2485                 }
2486
2487                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488                         struct host_port_stats *pstats;
2489
2490                         pstats = bnx2x_sp(bp, port_stats);
2491                         /* reset old bmac stats */
2492                         memset(&(pstats->mac_stx[0]), 0,
2493                                sizeof(struct mac_stx));
2494                 }
2495                 if (bp->state == BNX2X_STATE_OPEN)
2496                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2497         }
2498
2499         /* indicate link status only if link status actually changed */
2500         if (prev_link_status != bp->link_vars.link_status)
2501                 bnx2x_link_report(bp);
2502
2503         if (IS_E1HMF(bp)) {
2504                 int port = BP_PORT(bp);
2505                 int func;
2506                 int vn;
2507
2508                 /* Set the attention towards other drivers on the same port */
2509                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510                         if (vn == BP_E1HVN(bp))
2511                                 continue;
2512
2513                         func = ((vn << 1) | port);
2514                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2516                 }
2517
2518                 if (bp->link_vars.link_up) {
2519                         int i;
2520
2521                         /* Init rate shaping and fairness contexts */
2522                         bnx2x_init_port_minmax(bp);
2523
2524                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2525                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2526
2527                         /* Store it to internal memory */
2528                         for (i = 0;
2529                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532                                        ((u32 *)(&bp->cmng))[i]);
2533                 }
2534         }
2535 }
2536
2537 static void bnx2x__link_status_update(struct bnx2x *bp)
2538 {
2539         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2540                 return;
2541
2542         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2543
2544         if (bp->link_vars.link_up)
2545                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2546         else
2547                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2548
2549         bnx2x_calc_vn_weight_sum(bp);
2550
2551         /* indicate link status */
2552         bnx2x_link_report(bp);
2553 }
2554
2555 static void bnx2x_pmf_update(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558         u32 val;
2559
2560         bp->port.pmf = 1;
2561         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2562
2563         /* enable nig attention */
2564         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2567
2568         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 }
2570
2571 /* end of Link */
2572
2573 /* slow path */
2574
2575 /*
2576  * General service functions
2577  */
2578
2579 /* send the MCP a request, block until there is a reply */
2580 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2581 {
2582         int func = BP_FUNC(bp);
2583         u32 seq = ++bp->fw_seq;
2584         u32 rc = 0;
2585         u32 cnt = 1;
2586         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2587
2588         mutex_lock(&bp->fw_mb_mutex);
2589         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2591
2592         do {
2593                 /* let the FW do it's magic ... */
2594                 msleep(delay);
2595
2596                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2597
2598                 /* Give the FW up to 5 second (500*10ms) */
2599         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2600
2601         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602            cnt*delay, rc, seq);
2603
2604         /* is this a reply to our command? */
2605         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606                 rc &= FW_MSG_CODE_MASK;
2607         else {
2608                 /* FW BUG! */
2609                 BNX2X_ERR("FW failed to respond!\n");
2610                 bnx2x_fw_dump(bp);
2611                 rc = 0;
2612         }
2613         mutex_unlock(&bp->fw_mb_mutex);
2614
2615         return rc;
2616 }
2617
2618 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619 static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         netif_tx_disable(bp->dev);
2626
2627         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2629         netif_carrier_off(bp->dev);
2630 }
2631
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2633 {
2634         int port = BP_PORT(bp);
2635
2636         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2638         /* Tx queue should be only reenabled */
2639         netif_tx_wake_all_queues(bp->dev);
2640
2641         /*
2642          * Should not call netif_carrier_on since it will be called if the link
2643          * is up when checking for link state
2644          */
2645 }
2646
2647 static void bnx2x_update_min_max(struct bnx2x *bp)
2648 {
2649         int port = BP_PORT(bp);
2650         int vn, i;
2651
2652         /* Init rate shaping and fairness contexts */
2653         bnx2x_init_port_minmax(bp);
2654
2655         bnx2x_calc_vn_weight_sum(bp);
2656
2657         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2659
2660         if (bp->port.pmf) {
2661                 int func;
2662
2663                 /* Set the attention towards other drivers on the same port */
2664                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665                         if (vn == BP_E1HVN(bp))
2666                                 continue;
2667
2668                         func = ((vn << 1) | port);
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2671                 }
2672
2673                 /* Store it to internal memory */
2674                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2676                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677                                ((u32 *)(&bp->cmng))[i]);
2678         }
2679 }
2680
2681 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2682 {
2683         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2684
2685         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2686
2687                 /*
2688                  * This is the only place besides the function initialization
2689                  * where the bp->flags can change so it is done without any
2690                  * locks
2691                  */
2692                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2694                         bp->flags |= MF_FUNC_DIS;
2695
2696                         bnx2x_e1h_disable(bp);
2697                 } else {
2698                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2699                         bp->flags &= ~MF_FUNC_DIS;
2700
2701                         bnx2x_e1h_enable(bp);
2702                 }
2703                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2704         }
2705         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2706
2707                 bnx2x_update_min_max(bp);
2708                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2709         }
2710
2711         /* Report results to MCP */
2712         if (dcc_event)
2713                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2714         else
2715                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2716 }
2717
2718 /* must be called under the spq lock */
2719 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2720 {
2721         struct eth_spe *next_spe = bp->spq_prod_bd;
2722
2723         if (bp->spq_prod_bd == bp->spq_last_bd) {
2724                 bp->spq_prod_bd = bp->spq;
2725                 bp->spq_prod_idx = 0;
2726                 DP(NETIF_MSG_TIMER, "end of spq\n");
2727         } else {
2728                 bp->spq_prod_bd++;
2729                 bp->spq_prod_idx++;
2730         }
2731         return next_spe;
2732 }
2733
2734 /* must be called under the spq lock */
2735 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2736 {
2737         int func = BP_FUNC(bp);
2738
2739         /* Make sure that BD data is updated before writing the producer */
2740         wmb();
2741
2742         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2743                bp->spq_prod_idx);
2744         mmiowb();
2745 }
2746
2747 /* the slow path queue is odd since completions arrive on the fastpath ring */
2748 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749                          u32 data_hi, u32 data_lo, int common)
2750 {
2751         struct eth_spe *spe;
2752
2753 #ifdef BNX2X_STOP_ON_ERROR
2754         if (unlikely(bp->panic))
2755                 return -EIO;
2756 #endif
2757
2758         spin_lock_bh(&bp->spq_lock);
2759
2760         if (!bp->spq_left) {
2761                 BNX2X_ERR("BUG! SPQ ring full!\n");
2762                 spin_unlock_bh(&bp->spq_lock);
2763                 bnx2x_panic();
2764                 return -EBUSY;
2765         }
2766
2767         spe = bnx2x_sp_get_next(bp);
2768
2769         /* CID needs port number to be encoded int it */
2770         spe->hdr.conn_and_cmd_data =
2771                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2772                                     HW_CID(bp, cid));
2773         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2774         if (common)
2775                 spe->hdr.type |=
2776                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2777
2778         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2780
2781         bp->spq_left--;
2782
2783         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2785            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786            (u32)(U64_LO(bp->spq_mapping) +
2787            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2790         bnx2x_sp_prod_update(bp);
2791         spin_unlock_bh(&bp->spq_lock);
2792         return 0;
2793 }
2794
2795 /* acquire split MCP access lock register */
2796 static int bnx2x_acquire_alr(struct bnx2x *bp)
2797 {
2798         u32 j, val;
2799         int rc = 0;
2800
2801         might_sleep();
2802         for (j = 0; j < 1000; j++) {
2803                 val = (1UL << 31);
2804                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806                 if (val & (1L << 31))
2807                         break;
2808
2809                 msleep(5);
2810         }
2811         if (!(val & (1L << 31))) {
2812                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2813                 rc = -EBUSY;
2814         }
2815
2816         return rc;
2817 }
2818
2819 /* release split MCP access lock register */
2820 static void bnx2x_release_alr(struct bnx2x *bp)
2821 {
2822         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2823 }
2824
2825 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2826 {
2827         struct host_def_status_block *def_sb = bp->def_status_blk;
2828         u16 rc = 0;
2829
2830         barrier(); /* status block is written to by the chip */
2831         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2833                 rc |= 1;
2834         }
2835         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2837                 rc |= 2;
2838         }
2839         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2841                 rc |= 4;
2842         }
2843         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2845                 rc |= 8;
2846         }
2847         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2849                 rc |= 16;
2850         }
2851         return rc;
2852 }
2853
2854 /*
2855  * slow path service functions
2856  */
2857
2858 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2859 {
2860         int port = BP_PORT(bp);
2861         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862                        COMMAND_REG_ATTN_BITS_SET);
2863         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2865         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866                                        NIG_REG_MASK_INTERRUPT_PORT0;
2867         u32 aeu_mask;
2868         u32 nig_mask = 0;
2869
2870         if (bp->attn_state & asserted)
2871                 BNX2X_ERR("IGU ERROR\n");
2872
2873         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874         aeu_mask = REG_RD(bp, aeu_addr);
2875
2876         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2877            aeu_mask, asserted);
2878         aeu_mask &= ~(asserted & 0x3ff);
2879         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2880
2881         REG_WR(bp, aeu_addr, aeu_mask);
2882         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2883
2884         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885         bp->attn_state |= asserted;
2886         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887
2888         if (asserted & ATTN_HARD_WIRED_MASK) {
2889                 if (asserted & ATTN_NIG_FOR_FUNC) {
2890
2891                         bnx2x_acquire_phy_lock(bp);
2892
2893                         /* save nig interrupt mask */
2894                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2895                         REG_WR(bp, nig_int_mask_addr, 0);
2896
2897                         bnx2x_link_attn(bp);
2898
2899                         /* handle unicore attn? */
2900                 }
2901                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2903
2904                 if (asserted & GPIO_2_FUNC)
2905                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2906
2907                 if (asserted & GPIO_3_FUNC)
2908                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2909
2910                 if (asserted & GPIO_4_FUNC)
2911                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2912
2913                 if (port == 0) {
2914                         if (asserted & ATTN_GENERAL_ATTN_1) {
2915                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2917                         }
2918                         if (asserted & ATTN_GENERAL_ATTN_2) {
2919                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2921                         }
2922                         if (asserted & ATTN_GENERAL_ATTN_3) {
2923                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2925                         }
2926                 } else {
2927                         if (asserted & ATTN_GENERAL_ATTN_4) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2930                         }
2931                         if (asserted & ATTN_GENERAL_ATTN_5) {
2932                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2934                         }
2935                         if (asserted & ATTN_GENERAL_ATTN_6) {
2936                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2938                         }
2939                 }
2940
2941         } /* if hardwired */
2942
2943         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2944            asserted, hc_addr);
2945         REG_WR(bp, hc_addr, asserted);
2946
2947         /* now set back the mask */
2948         if (asserted & ATTN_NIG_FOR_FUNC) {
2949                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950                 bnx2x_release_phy_lock(bp);
2951         }
2952 }
2953
2954 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955 {
2956         int port = BP_PORT(bp);
2957
2958         /* mark the failure */
2959         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962                  bp->link_params.ext_phy_config);
2963
2964         /* log the failure */
2965         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966                " the driver to shutdown the card to prevent permanent"
2967                " damage.  Please contact OEM Support for assistance\n");
2968 }
2969
2970 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2971 {
2972         int port = BP_PORT(bp);
2973         int reg_offset;
2974         u32 val, swap_val, swap_override;
2975
2976         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2978
2979         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("SPIO5 hw attention\n");
2986
2987                 /* Fan failure attention */
2988                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2990                         /* Low power mode is controlled by GPIO 2 */
2991                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2992                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993                         /* The PHY reset is controlled by GPIO 1 */
2994                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2996                         break;
2997
2998                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999                         /* The PHY reset is controlled by GPIO 1 */
3000                         /* fake the port number to cancel the swap done in
3001                            set_gpio() */
3002                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004                         port = (swap_val && swap_override) ^ 1;
3005                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007                         break;
3008
3009                 default:
3010                         break;
3011                 }
3012                 bnx2x_fan_failure(bp);
3013         }
3014
3015         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017                 bnx2x_acquire_phy_lock(bp);
3018                 bnx2x_handle_module_detect_int(&bp->link_params);
3019                 bnx2x_release_phy_lock(bp);
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024                 val = REG_RD(bp, reg_offset);
3025                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026                 REG_WR(bp, reg_offset, val);
3027
3028                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3030                 bnx2x_panic();
3031         }
3032 }
3033
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035 {
3036         u32 val;
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042                 /* DORQ discard attention */
3043                 if (val & 0x2)
3044                         BNX2X_ERR("FATAL error from DORQ\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073                 /* CFC error attention */
3074                 if (val & 0x2)
3075                         BNX2X_ERR("FATAL error from CFC\n");
3076         }
3077
3078         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082                 /* RQ_USDMDP_FIFO_OVERFLOW */
3083                 if (val & 0x18000)
3084                         BNX2X_ERR("FATAL error from PXP\n");
3085         }
3086
3087         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3088
3089                 int port = BP_PORT(bp);
3090                 int reg_offset;
3091
3092                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3094
3095                 val = REG_RD(bp, reg_offset);
3096                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097                 REG_WR(bp, reg_offset, val);
3098
3099                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3100                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3101                 bnx2x_panic();
3102         }
3103 }
3104
3105 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3106 {
3107         u32 val;
3108
3109         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3110
3111                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112                         int func = BP_FUNC(bp);
3113
3114                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3115                         bp->mf_config = SHMEM_RD(bp,
3116                                            mf_cfg.func_mf_config[func].config);
3117                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3118                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3119                                 bnx2x_dcc_event(bp,
3120                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3121                         bnx2x__link_status_update(bp);
3122                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3123                                 bnx2x_pmf_update(bp);
3124
3125                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3126
3127                         BNX2X_ERR("MC assert!\n");
3128                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3132                         bnx2x_panic();
3133
3134                 } else if (attn & BNX2X_MCP_ASSERT) {
3135
3136                         BNX2X_ERR("MCP assert!\n");
3137                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3138                         bnx2x_fw_dump(bp);
3139
3140                 } else
3141                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3142         }
3143
3144         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3145                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146                 if (attn & BNX2X_GRC_TIMEOUT) {
3147                         val = CHIP_IS_E1H(bp) ?
3148                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3150                 }
3151                 if (attn & BNX2X_GRC_RSV) {
3152                         val = CHIP_IS_E1H(bp) ?
3153                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3155                 }
3156                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3157         }
3158 }
3159
3160 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3169 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170 /*
3171  * should be run under rtnl lock
3172  */
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174 {
3175         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178         barrier();
3179         mmiowb();
3180 }
3181
3182 /*
3183  * should be run under rtnl lock
3184  */
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186 {
3187         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188         val |= (1 << 16);
3189         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190         barrier();
3191         mmiowb();
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198 {
3199         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202 }
3203
3204 /*
3205  * should be run under rtnl lock
3206  */
3207 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208 {
3209         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215         barrier();
3216         mmiowb();
3217 }
3218
3219 /*
3220  * should be run under rtnl lock
3221  */
3222 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223 {
3224         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230         barrier();
3231         mmiowb();
3232
3233         return val1;
3234 }
3235
3236 /*
3237  * should be run under rtnl lock
3238  */
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240 {
3241         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242 }
3243
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245 {
3246         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248 }
3249
3250 static inline void _print_next_block(int idx, const char *blk)
3251 {
3252         if (idx)
3253                 pr_cont(", ");
3254         pr_cont("%s", blk);
3255 }
3256
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258 {
3259         int i = 0;
3260         u32 cur_bit = 0;
3261         for (i = 0; sig; i++) {
3262                 cur_bit = ((u32)0x1 << i);
3263                 if (sig & cur_bit) {
3264                         switch (cur_bit) {
3265                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "BRB");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "PARSER");
3270                                 break;
3271                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272                                 _print_next_block(par_num++, "TSDM");
3273                                 break;
3274                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275                                 _print_next_block(par_num++, "SEARCHER");
3276                                 break;
3277                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278                                 _print_next_block(par_num++, "TSEMI");
3279                                 break;
3280                         }
3281
3282                         /* Clear the bit */
3283                         sig &= ~cur_bit;
3284                 }
3285         }
3286
3287         return par_num;
3288 }
3289
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291 {
3292         int i = 0;
3293         u32 cur_bit = 0;
3294         for (i = 0; sig; i++) {
3295                 cur_bit = ((u32)0x1 << i);
3296                 if (sig & cur_bit) {
3297                         switch (cur_bit) {
3298                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "PBCLIENT");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "QM");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "XSDM");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "XSEMI");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "DOORBELLQ");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "VAUX PCI CORE");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "DEBUG");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "USDM");
3321                                 break;
3322                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323                                 _print_next_block(par_num++, "USEMI");
3324                                 break;
3325                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326                                 _print_next_block(par_num++, "UPB");
3327                                 break;
3328                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329                                 _print_next_block(par_num++, "CSDM");
3330                                 break;
3331                         }
3332
3333                         /* Clear the bit */
3334                         sig &= ~cur_bit;
3335                 }
3336         }
3337
3338         return par_num;
3339 }
3340
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342 {
3343         int i = 0;
3344         u32 cur_bit = 0;
3345         for (i = 0; sig; i++) {
3346                 cur_bit = ((u32)0x1 << i);
3347                 if (sig & cur_bit) {
3348                         switch (cur_bit) {
3349                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CSEMI");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "PXP");
3354                                 break;
3355                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356                                 _print_next_block(par_num++,
3357                                         "PXPPCICLOCKCLIENT");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "CFC");
3361                                 break;
3362                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363                                 _print_next_block(par_num++, "CDU");
3364                                 break;
3365                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366                                 _print_next_block(par_num++, "IGU");
3367                                 break;
3368                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369                                 _print_next_block(par_num++, "MISC");
3370                                 break;
3371                         }
3372
3373                         /* Clear the bit */
3374                         sig &= ~cur_bit;
3375                 }
3376         }
3377
3378         return par_num;
3379 }
3380
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382 {
3383         int i = 0;
3384         u32 cur_bit = 0;
3385         for (i = 0; sig; i++) {
3386                 cur_bit = ((u32)0x1 << i);
3387                 if (sig & cur_bit) {
3388                         switch (cur_bit) {
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390                                 _print_next_block(par_num++, "MCP ROM");
3391                                 break;
3392                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393                                 _print_next_block(par_num++, "MCP UMP RX");
3394                                 break;
3395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396                                 _print_next_block(par_num++, "MCP UMP TX");
3397                                 break;
3398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399                                 _print_next_block(par_num++, "MCP SCPAD");
3400                                 break;
3401                         }
3402
3403                         /* Clear the bit */
3404                         sig &= ~cur_bit;
3405                 }
3406         }
3407
3408         return par_num;
3409 }
3410
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412                                      u32 sig2, u32 sig3)
3413 {
3414         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416                 int par_num = 0;
3417                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418                         "[0]:0x%08x [1]:0x%08x "
3419                         "[2]:0x%08x [3]:0x%08x\n",
3420                           sig0 & HW_PRTY_ASSERT_SET_0,
3421                           sig1 & HW_PRTY_ASSERT_SET_1,
3422                           sig2 & HW_PRTY_ASSERT_SET_2,
3423                           sig3 & HW_PRTY_ASSERT_SET_3);
3424                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425                        bp->dev->name);
3426                 par_num = bnx2x_print_blocks_with_parity0(
3427                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428                 par_num = bnx2x_print_blocks_with_parity1(
3429                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430                 par_num = bnx2x_print_blocks_with_parity2(
3431                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432                 par_num = bnx2x_print_blocks_with_parity3(
3433                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434                 printk("\n");
3435                 return true;
3436         } else
3437                 return false;
3438 }
3439
3440 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441 {
3442         struct attn_route attn;
3443         int port = BP_PORT(bp);
3444
3445         attn.sig[0] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447                              port*4);
3448         attn.sig[1] = REG_RD(bp,
3449                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450                              port*4);
3451         attn.sig[2] = REG_RD(bp,
3452                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453                              port*4);
3454         attn.sig[3] = REG_RD(bp,
3455                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456                              port*4);
3457
3458         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459                                         attn.sig[3]);
3460 }
3461
3462 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463 {
3464         struct attn_route attn, *group_mask;
3465         int port = BP_PORT(bp);
3466         int index;
3467         u32 reg_addr;
3468         u32 val;
3469         u32 aeu_mask;
3470
3471         /* need to take HW lock because MCP or other port might also
3472            try to handle this event */
3473         bnx2x_acquire_alr(bp);
3474
3475         if (bnx2x_chk_parity_attn(bp)) {
3476                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477                 bnx2x_set_reset_in_progress(bp);
3478                 schedule_delayed_work(&bp->reset_task, 0);
3479                 /* Disable HW interrupts */
3480                 bnx2x_int_disable(bp);
3481                 bnx2x_release_alr(bp);
3482                 /* In case of parity errors don't handle attentions so that
3483                  * other function would "see" parity errors.
3484                  */
3485                 return;
3486         }
3487
3488         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3492         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3494
3495         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496                 if (deasserted & (1 << index)) {
3497                         group_mask = &bp->attn_group[index];
3498
3499                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3500                            index, group_mask->sig[0], group_mask->sig[1],
3501                            group_mask->sig[2], group_mask->sig[3]);
3502
3503                         bnx2x_attn_int_deasserted3(bp,
3504                                         attn.sig[3] & group_mask->sig[3]);
3505                         bnx2x_attn_int_deasserted1(bp,
3506                                         attn.sig[1] & group_mask->sig[1]);
3507                         bnx2x_attn_int_deasserted2(bp,
3508                                         attn.sig[2] & group_mask->sig[2]);
3509                         bnx2x_attn_int_deasserted0(bp,
3510                                         attn.sig[0] & group_mask->sig[0]);
3511                 }
3512         }
3513
3514         bnx2x_release_alr(bp);
3515
3516         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3517
3518         val = ~deasserted;
3519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3520            val, reg_addr);
3521         REG_WR(bp, reg_addr, val);
3522
3523         if (~bp->attn_state & deasserted)
3524                 BNX2X_ERR("IGU ERROR\n");
3525
3526         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3528
3529         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530         aeu_mask = REG_RD(bp, reg_addr);
3531
3532         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3533            aeu_mask, deasserted);
3534         aeu_mask |= (deasserted & 0x3ff);
3535         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3536
3537         REG_WR(bp, reg_addr, aeu_mask);
3538         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3539
3540         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541         bp->attn_state &= ~deasserted;
3542         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3543 }
3544
3545 static void bnx2x_attn_int(struct bnx2x *bp)
3546 {
3547         /* read local copy of bits */
3548         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3549                                                                 attn_bits);
3550         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551                                                                 attn_bits_ack);
3552         u32 attn_state = bp->attn_state;
3553
3554         /* look for changed bits */
3555         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3556         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3557
3558         DP(NETIF_MSG_HW,
3559            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3560            attn_bits, attn_ack, asserted, deasserted);
3561
3562         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3563                 BNX2X_ERR("BAD attention state\n");
3564
3565         /* handle bits that were raised */
3566         if (asserted)
3567                 bnx2x_attn_int_asserted(bp, asserted);
3568
3569         if (deasserted)
3570                 bnx2x_attn_int_deasserted(bp, deasserted);
3571 }
3572
3573 static void bnx2x_sp_task(struct work_struct *work)
3574 {
3575         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3576         u16 status;
3577
3578         /* Return here if interrupt is disabled */
3579         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3580                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3581                 return;
3582         }
3583
3584         status = bnx2x_update_dsb_idx(bp);
3585 /*      if (status == 0)                                     */
3586 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3587
3588         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589
3590         /* HW attentions */
3591         if (status & 0x1) {
3592                 bnx2x_attn_int(bp);
3593                 status &= ~0x1;
3594         }
3595
3596         /* CStorm events: STAT_QUERY */
3597         if (status & 0x2) {
3598                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599                 status &= ~0x2;
3600         }
3601
3602         if (unlikely(status))
3603                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604                    status);
3605
3606         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3607                      IGU_INT_NOP, 1);
3608         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3609                      IGU_INT_NOP, 1);
3610         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3611                      IGU_INT_NOP, 1);
3612         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3613                      IGU_INT_NOP, 1);
3614         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3615                      IGU_INT_ENABLE, 1);
3616 }
3617
3618 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619 {
3620         struct net_device *dev = dev_instance;
3621         struct bnx2x *bp = netdev_priv(dev);
3622
3623         /* Return here if interrupt is disabled */
3624         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3625                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3626                 return IRQ_HANDLED;
3627         }
3628
3629         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3630
3631 #ifdef BNX2X_STOP_ON_ERROR
3632         if (unlikely(bp->panic))
3633                 return IRQ_HANDLED;
3634 #endif
3635
3636 #ifdef BCM_CNIC
3637         {
3638                 struct cnic_ops *c_ops;
3639
3640                 rcu_read_lock();
3641                 c_ops = rcu_dereference(bp->cnic_ops);
3642                 if (c_ops)
3643                         c_ops->cnic_handler(bp->cnic_data, NULL);
3644                 rcu_read_unlock();
3645         }
3646 #endif
3647         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3648
3649         return IRQ_HANDLED;
3650 }
3651
3652 /* end of slow path */
3653
3654 /* Statistics */
3655
3656 /****************************************************************************
3657 * Macros
3658 ****************************************************************************/
3659
3660 /* sum[hi:lo] += add[hi:lo] */
3661 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662         do { \
3663                 s_lo += a_lo; \
3664                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3665         } while (0)
3666
3667 /* difference = minuend - subtrahend */
3668 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669         do { \
3670                 if (m_lo < s_lo) { \
3671                         /* underflow */ \
3672                         d_hi = m_hi - s_hi; \
3673                         if (d_hi > 0) { \
3674                                 /* we can 'loan' 1 */ \
3675                                 d_hi--; \
3676                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3677                         } else { \
3678                                 /* m_hi <= s_hi */ \
3679                                 d_hi = 0; \
3680                                 d_lo = 0; \
3681                         } \
3682                 } else { \
3683                         /* m_lo >= s_lo */ \
3684                         if (m_hi < s_hi) { \
3685                                 d_hi = 0; \
3686                                 d_lo = 0; \
3687                         } else { \
3688                                 /* m_hi >= s_hi */ \
3689                                 d_hi = m_hi - s_hi; \
3690                                 d_lo = m_lo - s_lo; \
3691                         } \
3692                 } \
3693         } while (0)
3694
3695 #define UPDATE_STAT64(s, t) \
3696         do { \
3697                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702                        pstats->mac_stx[1].t##_lo, diff.lo); \
3703         } while (0)
3704
3705 #define UPDATE_STAT64_NIG(s, t) \
3706         do { \
3707                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708                         diff.lo, new->s##_lo, old->s##_lo); \
3709                 ADD_64(estats->t##_hi, diff.hi, \
3710                        estats->t##_lo, diff.lo); \
3711         } while (0)
3712
3713 /* sum[hi:lo] += add */
3714 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3715         do { \
3716                 s_lo += a; \
3717                 s_hi += (s_lo < a) ? 1 : 0; \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_STAT(s) \
3721         do { \
3722                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723                               pstats->mac_stx[1].s##_lo, \
3724                               new->s); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_TSTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730                 old_tclient->s = tclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_USTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737                 old_uclient->s = uclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 #define UPDATE_EXTEND_XSTAT(s, t) \
3742         do { \
3743                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744                 old_xclient->s = xclient->s; \
3745                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746         } while (0)
3747
3748 /* minuend -= subtrahend */
3749 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750         do { \
3751                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752         } while (0)
3753
3754 /* minuend[hi:lo] -= subtrahend */
3755 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3756         do { \
3757                 SUB_64(m_hi, 0, m_lo, s); \
3758         } while (0)
3759
3760 #define SUB_EXTEND_USTAT(s, t) \
3761         do { \
3762                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3764         } while (0)
3765
3766 /*
3767  * General service functions
3768  */
3769
3770 static inline long bnx2x_hilo(u32 *hiref)
3771 {
3772         u32 lo = *(hiref + 1);
3773 #if (BITS_PER_LONG == 64)
3774         u32 hi = *hiref;
3775
3776         return HILO_U64(hi, lo);
3777 #else
3778         return lo;
3779 #endif
3780 }
3781
3782 /*
3783  * Init service functions
3784  */
3785
3786 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787 {
3788         if (!bp->stats_pending) {
3789                 struct eth_query_ramrod_data ramrod_data = {0};
3790                 int i, rc;
3791
3792                 spin_lock_bh(&bp->stats_lock);
3793
3794                 ramrod_data.drv_counter = bp->stats_counter++;
3795                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3796                 for_each_queue(bp, i)
3797                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3798
3799                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3800                                    ((u32 *)&ramrod_data)[1],
3801                                    ((u32 *)&ramrod_data)[0], 0);
3802                 if (rc == 0) {
3803                         /* stats ramrod has it's own slot on the spq */
3804                         bp->spq_left++;
3805                         bp->stats_pending = 1;
3806                 }
3807
3808                 spin_unlock_bh(&bp->stats_lock);
3809         }
3810 }
3811
3812 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3813 {
3814         struct dmae_command *dmae = &bp->stats_dmae;
3815         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817         *stats_comp = DMAE_COMP_VAL;
3818         if (CHIP_REV_IS_SLOW(bp))
3819                 return;
3820
3821         /* loader */
3822         if (bp->executer_idx) {
3823                 int loader_idx = PMF_DMAE_C(bp);
3824
3825                 memset(dmae, 0, sizeof(struct dmae_command));
3826
3827                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3828                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3829                                 DMAE_CMD_DST_RESET |
3830 #ifdef __BIG_ENDIAN
3831                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3832 #else
3833                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3834 #endif
3835                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3836                                                DMAE_CMD_PORT_0) |
3837                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3838                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3839                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3840                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3841                                      sizeof(struct dmae_command) *
3842                                      (loader_idx + 1)) >> 2;
3843                 dmae->dst_addr_hi = 0;
3844                 dmae->len = sizeof(struct dmae_command) >> 2;
3845                 if (CHIP_IS_E1(bp))
3846                         dmae->len--;
3847                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3848                 dmae->comp_addr_hi = 0;
3849                 dmae->comp_val = 1;
3850
3851                 *stats_comp = 0;
3852                 bnx2x_post_dmae(bp, dmae, loader_idx);
3853
3854         } else if (bp->func_stx) {
3855                 *stats_comp = 0;
3856                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3857         }
3858 }
3859
3860 static int bnx2x_stats_comp(struct bnx2x *bp)
3861 {
3862         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3863         int cnt = 10;
3864
3865         might_sleep();
3866         while (*stats_comp != DMAE_COMP_VAL) {
3867                 if (!cnt) {
3868                         BNX2X_ERR("timeout waiting for stats finished\n");
3869                         break;
3870                 }
3871                 cnt--;
3872                 msleep(1);
3873         }
3874         return 1;
3875 }
3876
3877 /*
3878  * Statistics service functions
3879  */
3880
3881 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3882 {
3883         struct dmae_command *dmae;
3884         u32 opcode;
3885         int loader_idx = PMF_DMAE_C(bp);
3886         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3887
3888         /* sanity */
3889         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3890                 BNX2X_ERR("BUG!\n");
3891                 return;
3892         }
3893
3894         bp->executer_idx = 0;
3895
3896         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3897                   DMAE_CMD_C_ENABLE |
3898                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3899 #ifdef __BIG_ENDIAN
3900                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3901 #else
3902                   DMAE_CMD_ENDIANITY_DW_SWAP |
3903 #endif
3904                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3905                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3906
3907         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3909         dmae->src_addr_lo = bp->port.port_stx >> 2;
3910         dmae->src_addr_hi = 0;
3911         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3912         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3913         dmae->len = DMAE_LEN32_RD_MAX;
3914         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3915         dmae->comp_addr_hi = 0;
3916         dmae->comp_val = 1;
3917
3918         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3919         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3920         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3921         dmae->src_addr_hi = 0;
3922         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3923                                    DMAE_LEN32_RD_MAX * 4);
3924         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3925                                    DMAE_LEN32_RD_MAX * 4);
3926         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3927         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3928         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3929         dmae->comp_val = DMAE_COMP_VAL;
3930
3931         *stats_comp = 0;
3932         bnx2x_hw_stats_post(bp);
3933         bnx2x_stats_comp(bp);
3934 }
3935
3936 static void bnx2x_port_stats_init(struct bnx2x *bp)
3937 {
3938         struct dmae_command *dmae;
3939         int port = BP_PORT(bp);
3940         int vn = BP_E1HVN(bp);
3941         u32 opcode;
3942         int loader_idx = PMF_DMAE_C(bp);
3943         u32 mac_addr;
3944         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3945
3946         /* sanity */
3947         if (!bp->link_vars.link_up || !bp->port.pmf) {
3948                 BNX2X_ERR("BUG!\n");
3949                 return;
3950         }
3951
3952         bp->executer_idx = 0;
3953
3954         /* MCP */
3955         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3956                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3957                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3958 #ifdef __BIG_ENDIAN
3959                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3960 #else
3961                   DMAE_CMD_ENDIANITY_DW_SWAP |
3962 #endif
3963                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3964                   (vn << DMAE_CMD_E1HVN_SHIFT));
3965
3966         if (bp->port.port_stx) {
3967
3968                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3969                 dmae->opcode = opcode;
3970                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3971                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3972                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3973                 dmae->dst_addr_hi = 0;
3974                 dmae->len = sizeof(struct host_port_stats) >> 2;
3975                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3976                 dmae->comp_addr_hi = 0;
3977                 dmae->comp_val = 1;
3978         }
3979
3980         if (bp->func_stx) {
3981
3982                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3983                 dmae->opcode = opcode;
3984                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3985                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3986                 dmae->dst_addr_lo = bp->func_stx >> 2;
3987                 dmae->dst_addr_hi = 0;
3988                 dmae->len = sizeof(struct host_func_stats) >> 2;
3989                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990                 dmae->comp_addr_hi = 0;
3991                 dmae->comp_val = 1;
3992         }
3993
3994         /* MAC */
3995         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3996                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3997                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3998 #ifdef __BIG_ENDIAN
3999                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4000 #else
4001                   DMAE_CMD_ENDIANITY_DW_SWAP |
4002 #endif
4003                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4004                   (vn << DMAE_CMD_E1HVN_SHIFT));
4005
4006         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4007
4008                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4009                                    NIG_REG_INGRESS_BMAC0_MEM);
4010
4011                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4012                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4013                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4014                 dmae->opcode = opcode;
4015                 dmae->src_addr_lo = (mac_addr +
4016                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4017                 dmae->src_addr_hi = 0;
4018                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4019                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4020                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4021                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4022                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4023                 dmae->comp_addr_hi = 0;
4024                 dmae->comp_val = 1;
4025
4026                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4027                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4028                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029                 dmae->opcode = opcode;
4030                 dmae->src_addr_lo = (mac_addr +
4031                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4032                 dmae->src_addr_hi = 0;
4033                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4034                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4035                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4036                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4037                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4038                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4039                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4040                 dmae->comp_addr_hi = 0;
4041                 dmae->comp_val = 1;
4042
4043         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4044
4045                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4046
4047                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4048                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4049                 dmae->opcode = opcode;
4050                 dmae->src_addr_lo = (mac_addr +
4051                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4052                 dmae->src_addr_hi = 0;
4053                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4054                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4055                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4056                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4057                 dmae->comp_addr_hi = 0;
4058                 dmae->comp_val = 1;
4059
4060                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4061                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4062                 dmae->opcode = opcode;
4063                 dmae->src_addr_lo = (mac_addr +
4064                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4065                 dmae->src_addr_hi = 0;
4066                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4067                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4068                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4069                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4070                 dmae->len = 1;
4071                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4072                 dmae->comp_addr_hi = 0;
4073                 dmae->comp_val = 1;
4074
4075                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4076                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4077                 dmae->opcode = opcode;
4078                 dmae->src_addr_lo = (mac_addr +
4079                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4080                 dmae->src_addr_hi = 0;
4081                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4082                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4083                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4084                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4085                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4086                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4087                 dmae->comp_addr_hi = 0;
4088                 dmae->comp_val = 1;
4089         }
4090
4091         /* NIG */
4092         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093         dmae->opcode = opcode;
4094         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4095                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4096         dmae->src_addr_hi = 0;
4097         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4098         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4099         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4100         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4101         dmae->comp_addr_hi = 0;
4102         dmae->comp_val = 1;
4103
4104         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105         dmae->opcode = opcode;
4106         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4107                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4108         dmae->src_addr_hi = 0;
4109         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4110                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4111         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4112                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4113         dmae->len = (2*sizeof(u32)) >> 2;
4114         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4115         dmae->comp_addr_hi = 0;
4116         dmae->comp_val = 1;
4117
4118         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4119         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4120                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4121                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4122 #ifdef __BIG_ENDIAN
4123                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4124 #else
4125                         DMAE_CMD_ENDIANITY_DW_SWAP |
4126 #endif
4127                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4128                         (vn << DMAE_CMD_E1HVN_SHIFT));
4129         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4130                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4131         dmae->src_addr_hi = 0;
4132         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4133                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4134         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4135                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4136         dmae->len = (2*sizeof(u32)) >> 2;
4137         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4138         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4139         dmae->comp_val = DMAE_COMP_VAL;
4140
4141         *stats_comp = 0;
4142 }
4143
4144 static void bnx2x_func_stats_init(struct bnx2x *bp)
4145 {
4146         struct dmae_command *dmae = &bp->stats_dmae;
4147         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4148
4149         /* sanity */
4150         if (!bp->func_stx) {
4151                 BNX2X_ERR("BUG!\n");
4152                 return;
4153         }
4154
4155         bp->executer_idx = 0;
4156         memset(dmae, 0, sizeof(struct dmae_command));
4157
4158         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4159                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4160                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4161 #ifdef __BIG_ENDIAN
4162                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4163 #else
4164                         DMAE_CMD_ENDIANITY_DW_SWAP |
4165 #endif
4166                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4167                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4168         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4169         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4170         dmae->dst_addr_lo = bp->func_stx >> 2;
4171         dmae->dst_addr_hi = 0;
4172         dmae->len = sizeof(struct host_func_stats) >> 2;
4173         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4174         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4175         dmae->comp_val = DMAE_COMP_VAL;
4176
4177         *stats_comp = 0;
4178 }
4179
4180 static void bnx2x_stats_start(struct bnx2x *bp)
4181 {
4182         if (bp->port.pmf)
4183                 bnx2x_port_stats_init(bp);
4184
4185         else if (bp->func_stx)
4186                 bnx2x_func_stats_init(bp);
4187
4188         bnx2x_hw_stats_post(bp);
4189         bnx2x_storm_stats_post(bp);
4190 }
4191
4192 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4193 {
4194         bnx2x_stats_comp(bp);
4195         bnx2x_stats_pmf_update(bp);
4196         bnx2x_stats_start(bp);
4197 }
4198
4199 static void bnx2x_stats_restart(struct bnx2x *bp)
4200 {
4201         bnx2x_stats_comp(bp);
4202         bnx2x_stats_start(bp);
4203 }
4204
4205 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4206 {
4207         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4208         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4209         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4210         struct {
4211                 u32 lo;
4212                 u32 hi;
4213         } diff;
4214
4215         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4216         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4217         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4218         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4219         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4220         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4221         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4222         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4223         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4224         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4225         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4226         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4227         UPDATE_STAT64(tx_stat_gt127,
4228                                 tx_stat_etherstatspkts65octetsto127octets);
4229         UPDATE_STAT64(tx_stat_gt255,
4230                                 tx_stat_etherstatspkts128octetsto255octets);
4231         UPDATE_STAT64(tx_stat_gt511,
4232                                 tx_stat_etherstatspkts256octetsto511octets);
4233         UPDATE_STAT64(tx_stat_gt1023,
4234                                 tx_stat_etherstatspkts512octetsto1023octets);
4235         UPDATE_STAT64(tx_stat_gt1518,
4236                                 tx_stat_etherstatspkts1024octetsto1522octets);
4237         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4238         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4239         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4240         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4241         UPDATE_STAT64(tx_stat_gterr,
4242                                 tx_stat_dot3statsinternalmactransmiterrors);
4243         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4244
4245         estats->pause_frames_received_hi =
4246                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4247         estats->pause_frames_received_lo =
4248                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4249
4250         estats->pause_frames_sent_hi =
4251                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4252         estats->pause_frames_sent_lo =
4253                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4254 }
4255
4256 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4257 {
4258         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4259         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4260         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4261
4262         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4263         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4264         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4265         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4266         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4267         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4268         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4269         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4270         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4271         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4272         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4273         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4274         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4275         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4276         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4277         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4278         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4279         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4280         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4281         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4282         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4283         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4284         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4285         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4286         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4287         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4288         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4289         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4290         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4291         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4292         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4293
4294         estats->pause_frames_received_hi =
4295                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4296         estats->pause_frames_received_lo =
4297                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4298         ADD_64(estats->pause_frames_received_hi,
4299                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4300                estats->pause_frames_received_lo,
4301                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4302
4303         estats->pause_frames_sent_hi =
4304                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4305         estats->pause_frames_sent_lo =
4306                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4307         ADD_64(estats->pause_frames_sent_hi,
4308                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4309                estats->pause_frames_sent_lo,
4310                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4311 }
4312
4313 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4314 {
4315         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4316         struct nig_stats *old = &(bp->port.old_nig_stats);
4317         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4318         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4319         struct {
4320                 u32 lo;
4321                 u32 hi;
4322         } diff;
4323
4324         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4325                 bnx2x_bmac_stats_update(bp);
4326
4327         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4328                 bnx2x_emac_stats_update(bp);
4329
4330         else { /* unreached */
4331                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4332                 return -1;
4333         }
4334
4335         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4336                       new->brb_discard - old->brb_discard);
4337         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4338                       new->brb_truncate - old->brb_truncate);
4339
4340         UPDATE_STAT64_NIG(egress_mac_pkt0,
4341                                         etherstatspkts1024octetsto1522octets);
4342         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4343
4344         memcpy(old, new, sizeof(struct nig_stats));
4345
4346         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4347                sizeof(struct mac_stx));
4348         estats->brb_drop_hi = pstats->brb_drop_hi;
4349         estats->brb_drop_lo = pstats->brb_drop_lo;
4350
4351         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4352
4353         if (!BP_NOMCP(bp)) {
4354                 u32 nig_timer_max =
4355                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4356                 if (nig_timer_max != estats->nig_timer_max) {
4357                         estats->nig_timer_max = nig_timer_max;
4358                         BNX2X_ERR("NIG timer max (%u)\n",
4359                                   estats->nig_timer_max);
4360                 }
4361         }
4362
4363         return 0;
4364 }
4365
4366 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4367 {
4368         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4369         struct tstorm_per_port_stats *tport =
4370                                         &stats->tstorm_common.port_statistics;
4371         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4372         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4373         int i;
4374         u16 cur_stats_counter;
4375
4376         /* Make sure we use the value of the counter
4377          * used for sending the last stats ramrod.
4378          */
4379         spin_lock_bh(&bp->stats_lock);
4380         cur_stats_counter = bp->stats_counter - 1;
4381         spin_unlock_bh(&bp->stats_lock);
4382
4383         memcpy(&(fstats->total_bytes_received_hi),
4384                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4385                sizeof(struct host_func_stats) - 2*sizeof(u32));
4386         estats->error_bytes_received_hi = 0;
4387         estats->error_bytes_received_lo = 0;
4388         estats->etherstatsoverrsizepkts_hi = 0;
4389         estats->etherstatsoverrsizepkts_lo = 0;
4390         estats->no_buff_discard_hi = 0;
4391         estats->no_buff_discard_lo = 0;
4392
4393         for_each_queue(bp, i) {
4394                 struct bnx2x_fastpath *fp = &bp->fp[i];
4395                 int cl_id = fp->cl_id;
4396                 struct tstorm_per_client_stats *tclient =
4397                                 &stats->tstorm_common.client_statistics[cl_id];
4398                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4399                 struct ustorm_per_client_stats *uclient =
4400                                 &stats->ustorm_common.client_statistics[cl_id];
4401                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4402                 struct xstorm_per_client_stats *xclient =
4403                                 &stats->xstorm_common.client_statistics[cl_id];
4404                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4405                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4406                 u32 diff;
4407
4408                 /* are storm stats valid? */
4409                 if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
4410                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4411                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4412                            i, xclient->stats_counter, cur_stats_counter + 1);
4413                         return -1;
4414                 }
4415                 if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
4416                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4417                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4418                            i, tclient->stats_counter, cur_stats_counter + 1);
4419                         return -2;
4420                 }
4421                 if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
4422                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4423                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4424                            i, uclient->stats_counter, cur_stats_counter + 1);
4425                         return -4;
4426                 }
4427
4428                 qstats->total_bytes_received_hi =
4429                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4430                 qstats->total_bytes_received_lo =
4431                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4432
4433                 ADD_64(qstats->total_bytes_received_hi,
4434                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4435                        qstats->total_bytes_received_lo,
4436                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4437
4438                 ADD_64(qstats->total_bytes_received_hi,
4439                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4440                        qstats->total_bytes_received_lo,
4441                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4442
4443                 SUB_64(qstats->total_bytes_received_hi,
4444                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4445                        qstats->total_bytes_received_lo,
4446                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4447
4448                 SUB_64(qstats->total_bytes_received_hi,
4449                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4450                        qstats->total_bytes_received_lo,
4451                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4452
4453                 SUB_64(qstats->total_bytes_received_hi,
4454                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4455                        qstats->total_bytes_received_lo,
4456                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4457
4458                 qstats->valid_bytes_received_hi =
4459                                         qstats->total_bytes_received_hi;
4460                 qstats->valid_bytes_received_lo =
4461                                         qstats->total_bytes_received_lo;
4462
4463                 qstats->error_bytes_received_hi =
4464                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4465                 qstats->error_bytes_received_lo =
4466                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4467
4468                 ADD_64(qstats->total_bytes_received_hi,
4469                        qstats->error_bytes_received_hi,
4470                        qstats->total_bytes_received_lo,
4471                        qstats->error_bytes_received_lo);
4472
4473                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4474                                         total_unicast_packets_received);
4475                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4476                                         total_multicast_packets_received);
4477                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4478                                         total_broadcast_packets_received);
4479                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4480                                         etherstatsoverrsizepkts);
4481                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4482
4483                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4484                                         total_unicast_packets_received);
4485                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4486                                         total_multicast_packets_received);
4487                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4488                                         total_broadcast_packets_received);
4489                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4490                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4491                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4492
4493                 qstats->total_bytes_transmitted_hi =
4494                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4495                 qstats->total_bytes_transmitted_lo =
4496                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4497
4498                 ADD_64(qstats->total_bytes_transmitted_hi,
4499                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4500                        qstats->total_bytes_transmitted_lo,
4501                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4502
4503                 ADD_64(qstats->total_bytes_transmitted_hi,
4504                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4505                        qstats->total_bytes_transmitted_lo,
4506                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4507
4508                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4509                                         total_unicast_packets_transmitted);
4510                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4511                                         total_multicast_packets_transmitted);
4512                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4513                                         total_broadcast_packets_transmitted);
4514
4515                 old_tclient->checksum_discard = tclient->checksum_discard;
4516                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4517
4518                 ADD_64(fstats->total_bytes_received_hi,
4519                        qstats->total_bytes_received_hi,
4520                        fstats->total_bytes_received_lo,
4521                        qstats->total_bytes_received_lo);
4522                 ADD_64(fstats->total_bytes_transmitted_hi,
4523                        qstats->total_bytes_transmitted_hi,
4524                        fstats->total_bytes_transmitted_lo,
4525                        qstats->total_bytes_transmitted_lo);
4526                 ADD_64(fstats->total_unicast_packets_received_hi,
4527                        qstats->total_unicast_packets_received_hi,
4528                        fstats->total_unicast_packets_received_lo,
4529                        qstats->total_unicast_packets_received_lo);
4530                 ADD_64(fstats->total_multicast_packets_received_hi,
4531                        qstats->total_multicast_packets_received_hi,
4532                        fstats->total_multicast_packets_received_lo,
4533                        qstats->total_multicast_packets_received_lo);
4534                 ADD_64(fstats->total_broadcast_packets_received_hi,
4535                        qstats->total_broadcast_packets_received_hi,
4536                        fstats->total_broadcast_packets_received_lo,
4537                        qstats->total_broadcast_packets_received_lo);
4538                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4539                        qstats->total_unicast_packets_transmitted_hi,
4540                        fstats->total_unicast_packets_transmitted_lo,
4541                        qstats->total_unicast_packets_transmitted_lo);
4542                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4543                        qstats->total_multicast_packets_transmitted_hi,
4544                        fstats->total_multicast_packets_transmitted_lo,
4545                        qstats->total_multicast_packets_transmitted_lo);
4546                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4547                        qstats->total_broadcast_packets_transmitted_hi,
4548                        fstats->total_broadcast_packets_transmitted_lo,
4549                        qstats->total_broadcast_packets_transmitted_lo);
4550                 ADD_64(fstats->valid_bytes_received_hi,
4551                        qstats->valid_bytes_received_hi,
4552                        fstats->valid_bytes_received_lo,
4553                        qstats->valid_bytes_received_lo);
4554
4555                 ADD_64(estats->error_bytes_received_hi,
4556                        qstats->error_bytes_received_hi,
4557                        estats->error_bytes_received_lo,
4558                        qstats->error_bytes_received_lo);
4559                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4560                        qstats->etherstatsoverrsizepkts_hi,
4561                        estats->etherstatsoverrsizepkts_lo,
4562                        qstats->etherstatsoverrsizepkts_lo);
4563                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4564                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4565         }
4566
4567         ADD_64(fstats->total_bytes_received_hi,
4568                estats->rx_stat_ifhcinbadoctets_hi,
4569                fstats->total_bytes_received_lo,
4570                estats->rx_stat_ifhcinbadoctets_lo);
4571
4572         memcpy(estats, &(fstats->total_bytes_received_hi),
4573                sizeof(struct host_func_stats) - 2*sizeof(u32));
4574
4575         ADD_64(estats->etherstatsoverrsizepkts_hi,
4576                estats->rx_stat_dot3statsframestoolong_hi,
4577                estats->etherstatsoverrsizepkts_lo,
4578                estats->rx_stat_dot3statsframestoolong_lo);
4579         ADD_64(estats->error_bytes_received_hi,
4580                estats->rx_stat_ifhcinbadoctets_hi,
4581                estats->error_bytes_received_lo,
4582                estats->rx_stat_ifhcinbadoctets_lo);
4583
4584         if (bp->port.pmf) {
4585                 estats->mac_filter_discard =
4586                                 le32_to_cpu(tport->mac_filter_discard);
4587                 estats->xxoverflow_discard =
4588                                 le32_to_cpu(tport->xxoverflow_discard);
4589                 estats->brb_truncate_discard =
4590                                 le32_to_cpu(tport->brb_truncate_discard);
4591                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4592         }
4593
4594         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4595
4596         bp->stats_pending = 0;
4597
4598         return 0;
4599 }
4600
4601 static void bnx2x_net_stats_update(struct bnx2x *bp)
4602 {
4603         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4604         struct net_device_stats *nstats = &bp->dev->stats;
4605         int i;
4606
4607         nstats->rx_packets =
4608                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4609                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4610                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4611
4612         nstats->tx_packets =
4613                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4614                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4615                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4616
4617         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4618
4619         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4620
4621         nstats->rx_dropped = estats->mac_discard;
4622         for_each_queue(bp, i)
4623                 nstats->rx_dropped +=
4624                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4625
4626         nstats->tx_dropped = 0;
4627
4628         nstats->multicast =
4629                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4630
4631         nstats->collisions =
4632                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4633
4634         nstats->rx_length_errors =
4635                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4636                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4637         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4638                                  bnx2x_hilo(&estats->brb_truncate_hi);
4639         nstats->rx_crc_errors =
4640                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4641         nstats->rx_frame_errors =
4642                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4643         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4644         nstats->rx_missed_errors = estats->xxoverflow_discard;
4645
4646         nstats->rx_errors = nstats->rx_length_errors +
4647                             nstats->rx_over_errors +
4648                             nstats->rx_crc_errors +
4649                             nstats->rx_frame_errors +
4650                             nstats->rx_fifo_errors +
4651                             nstats->rx_missed_errors;
4652
4653         nstats->tx_aborted_errors =
4654                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4655                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4656         nstats->tx_carrier_errors =
4657                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4658         nstats->tx_fifo_errors = 0;
4659         nstats->tx_heartbeat_errors = 0;
4660         nstats->tx_window_errors = 0;
4661
4662         nstats->tx_errors = nstats->tx_aborted_errors +
4663                             nstats->tx_carrier_errors +
4664             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4665 }
4666
4667 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4668 {
4669         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4670         int i;
4671
4672         estats->driver_xoff = 0;
4673         estats->rx_err_discard_pkt = 0;
4674         estats->rx_skb_alloc_failed = 0;
4675         estats->hw_csum_err = 0;
4676         for_each_queue(bp, i) {
4677                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4678
4679                 estats->driver_xoff += qstats->driver_xoff;
4680                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4681                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4682                 estats->hw_csum_err += qstats->hw_csum_err;
4683         }
4684 }
4685
4686 static void bnx2x_stats_update(struct bnx2x *bp)
4687 {
4688         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4689
4690         if (*stats_comp != DMAE_COMP_VAL)
4691                 return;
4692
4693         if (bp->port.pmf)
4694                 bnx2x_hw_stats_update(bp);
4695
4696         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4697                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4698                 bnx2x_panic();
4699                 return;
4700         }
4701
4702         bnx2x_net_stats_update(bp);
4703         bnx2x_drv_stats_update(bp);
4704
4705         if (netif_msg_timer(bp)) {
4706                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4707                 int i;
4708
4709                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4710                        bp->dev->name,
4711                        estats->brb_drop_lo, estats->brb_truncate_lo);
4712
4713                 for_each_queue(bp, i) {
4714                         struct bnx2x_fastpath *fp = &bp->fp[i];
4715                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716
4717                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4718                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4719                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4720                                fp->rx_comp_cons),
4721                                le16_to_cpu(*fp->rx_cons_sb),
4722                                bnx2x_hilo(&qstats->
4723                                           total_unicast_packets_received_hi),
4724                                fp->rx_calls, fp->rx_pkt);
4725                 }
4726
4727                 for_each_queue(bp, i) {
4728                         struct bnx2x_fastpath *fp = &bp->fp[i];
4729                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4730                         struct netdev_queue *txq =
4731                                 netdev_get_tx_queue(bp->dev, i);
4732
4733                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4734                                           "  tx pkt(%lu) tx calls (%lu)"
4735                                           "  %s (Xoff events %u)\n",
4736                                fp->name, bnx2x_tx_avail(fp),
4737                                le16_to_cpu(*fp->tx_cons_sb),
4738                                bnx2x_hilo(&qstats->
4739                                           total_unicast_packets_transmitted_hi),
4740                                fp->tx_pkt,
4741                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4742                                qstats->driver_xoff);
4743                 }
4744         }
4745
4746         bnx2x_hw_stats_post(bp);
4747         bnx2x_storm_stats_post(bp);
4748 }
4749
4750 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4751 {
4752         struct dmae_command *dmae;
4753         u32 opcode;
4754         int loader_idx = PMF_DMAE_C(bp);
4755         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4756
4757         bp->executer_idx = 0;
4758
4759         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4760                   DMAE_CMD_C_ENABLE |
4761                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4762 #ifdef __BIG_ENDIAN
4763                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4764 #else
4765                   DMAE_CMD_ENDIANITY_DW_SWAP |
4766 #endif
4767                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4768                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4769
4770         if (bp->port.port_stx) {
4771
4772                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4773                 if (bp->func_stx)
4774                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4775                 else
4776                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4777                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4778                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4779                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4780                 dmae->dst_addr_hi = 0;
4781                 dmae->len = sizeof(struct host_port_stats) >> 2;
4782                 if (bp->func_stx) {
4783                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4784                         dmae->comp_addr_hi = 0;
4785                         dmae->comp_val = 1;
4786                 } else {
4787                         dmae->comp_addr_lo =
4788                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4789                         dmae->comp_addr_hi =
4790                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4791                         dmae->comp_val = DMAE_COMP_VAL;
4792
4793                         *stats_comp = 0;
4794                 }
4795         }
4796
4797         if (bp->func_stx) {
4798
4799                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4800                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4801                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4802                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4803                 dmae->dst_addr_lo = bp->func_stx >> 2;
4804                 dmae->dst_addr_hi = 0;
4805                 dmae->len = sizeof(struct host_func_stats) >> 2;
4806                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4807                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4808                 dmae->comp_val = DMAE_COMP_VAL;
4809
4810                 *stats_comp = 0;
4811         }
4812 }
4813
4814 static void bnx2x_stats_stop(struct bnx2x *bp)
4815 {
4816         int update = 0;
4817
4818         bnx2x_stats_comp(bp);
4819
4820         if (bp->port.pmf)
4821                 update = (bnx2x_hw_stats_update(bp) == 0);
4822
4823         update |= (bnx2x_storm_stats_update(bp) == 0);
4824
4825         if (update) {
4826                 bnx2x_net_stats_update(bp);
4827
4828                 if (bp->port.pmf)
4829                         bnx2x_port_stats_stop(bp);
4830
4831                 bnx2x_hw_stats_post(bp);
4832                 bnx2x_stats_comp(bp);
4833         }
4834 }
4835
4836 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4837 {
4838 }
4839
4840 static const struct {
4841         void (*action)(struct bnx2x *bp);
4842         enum bnx2x_stats_state next_state;
4843 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4844 /* state        event   */
4845 {
4846 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4847 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4848 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4849 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4850 },
4851 {
4852 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4853 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4854 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4855 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4856 }
4857 };
4858
4859 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4860 {
4861         enum bnx2x_stats_state state;
4862
4863         if (unlikely(bp->panic))
4864                 return;
4865
4866         /* Protect a state change flow */
4867         spin_lock_bh(&bp->stats_lock);
4868         state = bp->stats_state;
4869         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4870         spin_unlock_bh(&bp->stats_lock);
4871
4872         bnx2x_stats_stm[state][event].action(bp);
4873
4874         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4875                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4876                    state, event, bp->stats_state);
4877 }
4878
4879 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4880 {
4881         struct dmae_command *dmae;
4882         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4883
4884         /* sanity */
4885         if (!bp->port.pmf || !bp->port.port_stx) {
4886                 BNX2X_ERR("BUG!\n");
4887                 return;
4888         }
4889
4890         bp->executer_idx = 0;
4891
4892         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4893         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4894                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4895                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4896 #ifdef __BIG_ENDIAN
4897                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4898 #else
4899                         DMAE_CMD_ENDIANITY_DW_SWAP |
4900 #endif
4901                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4902                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4903         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4904         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4905         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4906         dmae->dst_addr_hi = 0;
4907         dmae->len = sizeof(struct host_port_stats) >> 2;
4908         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4909         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4910         dmae->comp_val = DMAE_COMP_VAL;
4911
4912         *stats_comp = 0;
4913         bnx2x_hw_stats_post(bp);
4914         bnx2x_stats_comp(bp);
4915 }
4916
4917 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4918 {
4919         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4920         int port = BP_PORT(bp);
4921         int func;
4922         u32 func_stx;
4923
4924         /* sanity */
4925         if (!bp->port.pmf || !bp->func_stx) {
4926                 BNX2X_ERR("BUG!\n");
4927                 return;
4928         }
4929
4930         /* save our func_stx */
4931         func_stx = bp->func_stx;
4932
4933         for (vn = VN_0; vn < vn_max; vn++) {
4934                 func = 2*vn + port;
4935
4936                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4937                 bnx2x_func_stats_init(bp);
4938                 bnx2x_hw_stats_post(bp);
4939                 bnx2x_stats_comp(bp);
4940         }
4941
4942         /* restore our func_stx */
4943         bp->func_stx = func_stx;
4944 }
4945
4946 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4947 {
4948         struct dmae_command *dmae = &bp->stats_dmae;
4949         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4950
4951         /* sanity */
4952         if (!bp->func_stx) {
4953                 BNX2X_ERR("BUG!\n");
4954                 return;
4955         }
4956
4957         bp->executer_idx = 0;
4958         memset(dmae, 0, sizeof(struct dmae_command));
4959
4960         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4961                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4962                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4963 #ifdef __BIG_ENDIAN
4964                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4965 #else
4966                         DMAE_CMD_ENDIANITY_DW_SWAP |
4967 #endif
4968                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4969                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4970         dmae->src_addr_lo = bp->func_stx >> 2;
4971         dmae->src_addr_hi = 0;
4972         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4973         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4974         dmae->len = sizeof(struct host_func_stats) >> 2;
4975         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4976         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4977         dmae->comp_val = DMAE_COMP_VAL;
4978
4979         *stats_comp = 0;
4980         bnx2x_hw_stats_post(bp);
4981         bnx2x_stats_comp(bp);
4982 }
4983
4984 static void bnx2x_stats_init(struct bnx2x *bp)
4985 {
4986         int port = BP_PORT(bp);
4987         int func = BP_FUNC(bp);
4988         int i;
4989
4990         bp->stats_pending = 0;
4991         bp->executer_idx = 0;
4992         bp->stats_counter = 0;
4993
4994         /* port and func stats for management */
4995         if (!BP_NOMCP(bp)) {
4996                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4997                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4998
4999         } else {
5000                 bp->port.port_stx = 0;
5001                 bp->func_stx = 0;
5002         }
5003         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
5004            bp->port.port_stx, bp->func_stx);
5005
5006         /* port stats */
5007         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5008         bp->port.old_nig_stats.brb_discard =
5009                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5010         bp->port.old_nig_stats.brb_truncate =
5011                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5012         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5013                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5014         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5015                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5016
5017         /* function stats */
5018         for_each_queue(bp, i) {
5019                 struct bnx2x_fastpath *fp = &bp->fp[i];
5020
5021                 memset(&fp->old_tclient, 0,
5022                        sizeof(struct tstorm_per_client_stats));
5023                 memset(&fp->old_uclient, 0,
5024                        sizeof(struct ustorm_per_client_stats));
5025                 memset(&fp->old_xclient, 0,
5026                        sizeof(struct xstorm_per_client_stats));
5027                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5028         }
5029
5030         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5031         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5032
5033         bp->stats_state = STATS_STATE_DISABLED;
5034
5035         if (bp->port.pmf) {
5036                 if (bp->port.port_stx)
5037                         bnx2x_port_stats_base_init(bp);
5038
5039                 if (bp->func_stx)
5040                         bnx2x_func_stats_base_init(bp);
5041
5042         } else if (bp->func_stx)
5043                 bnx2x_func_stats_base_update(bp);
5044 }
5045
5046 static void bnx2x_timer(unsigned long data)
5047 {
5048         struct bnx2x *bp = (struct bnx2x *) data;
5049
5050         if (!netif_running(bp->dev))
5051                 return;
5052
5053         if (atomic_read(&bp->intr_sem) != 0)
5054                 goto timer_restart;
5055
5056         if (poll) {
5057                 struct bnx2x_fastpath *fp = &bp->fp[0];
5058                 int rc;
5059
5060                 bnx2x_tx_int(fp);
5061                 rc = bnx2x_rx_int(fp, 1000);
5062         }
5063
5064         if (!BP_NOMCP(bp)) {
5065                 int func = BP_FUNC(bp);
5066                 u32 drv_pulse;
5067                 u32 mcp_pulse;
5068
5069                 ++bp->fw_drv_pulse_wr_seq;
5070                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5071                 /* TBD - add SYSTEM_TIME */
5072                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5073                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5074
5075                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5076                              MCP_PULSE_SEQ_MASK);
5077                 /* The delta between driver pulse and mcp response
5078                  * should be 1 (before mcp response) or 0 (after mcp response)
5079                  */
5080                 if ((drv_pulse != mcp_pulse) &&
5081                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5082                         /* someone lost a heartbeat... */
5083                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5084                                   drv_pulse, mcp_pulse);
5085                 }
5086         }
5087
5088         if (bp->state == BNX2X_STATE_OPEN)
5089                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5090
5091 timer_restart:
5092         mod_timer(&bp->timer, jiffies + bp->current_interval);
5093 }
5094
5095 /* end of Statistics */
5096
5097 /* nic init */
5098
5099 /*
5100  * nic init service functions
5101  */
5102
5103 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5104 {
5105         int port = BP_PORT(bp);
5106
5107         /* "CSTORM" */
5108         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5109                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5110                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5111         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5113                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5114 }
5115
5116 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5117                           dma_addr_t mapping, int sb_id)
5118 {
5119         int port = BP_PORT(bp);
5120         int func = BP_FUNC(bp);
5121         int index;
5122         u64 section;
5123
5124         /* USTORM */
5125         section = ((u64)mapping) + offsetof(struct host_status_block,
5126                                             u_status_block);
5127         sb->u_status_block.status_block_id = sb_id;
5128
5129         REG_WR(bp, BAR_CSTRORM_INTMEM +
5130                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5131         REG_WR(bp, BAR_CSTRORM_INTMEM +
5132                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5133                U64_HI(section));
5134         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5135                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5136
5137         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5138                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5139                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5140
5141         /* CSTORM */
5142         section = ((u64)mapping) + offsetof(struct host_status_block,
5143                                             c_status_block);
5144         sb->c_status_block.status_block_id = sb_id;
5145
5146         REG_WR(bp, BAR_CSTRORM_INTMEM +
5147                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5148         REG_WR(bp, BAR_CSTRORM_INTMEM +
5149                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5150                U64_HI(section));
5151         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5152                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5153
5154         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5155                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5156                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5157
5158         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5159 }
5160
5161 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5162 {
5163         int func = BP_FUNC(bp);
5164
5165         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5166                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5167                         sizeof(struct tstorm_def_status_block)/4);
5168         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5169                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5170                         sizeof(struct cstorm_def_status_block_u)/4);
5171         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5173                         sizeof(struct cstorm_def_status_block_c)/4);
5174         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5175                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5176                         sizeof(struct xstorm_def_status_block)/4);
5177 }
5178
5179 static void bnx2x_init_def_sb(struct bnx2x *bp,
5180                               struct host_def_status_block *def_sb,
5181                               dma_addr_t mapping, int sb_id)
5182 {
5183         int port = BP_PORT(bp);
5184         int func = BP_FUNC(bp);
5185         int index, val, reg_offset;
5186         u64 section;
5187
5188         /* ATTN */
5189         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5190                                             atten_status_block);
5191         def_sb->atten_status_block.status_block_id = sb_id;
5192
5193         bp->attn_state = 0;
5194
5195         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5196                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5197
5198         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5199                 bp->attn_group[index].sig[0] = REG_RD(bp,
5200                                                      reg_offset + 0x10*index);
5201                 bp->attn_group[index].sig[1] = REG_RD(bp,
5202                                                reg_offset + 0x4 + 0x10*index);
5203                 bp->attn_group[index].sig[2] = REG_RD(bp,
5204                                                reg_offset + 0x8 + 0x10*index);
5205                 bp->attn_group[index].sig[3] = REG_RD(bp,
5206                                                reg_offset + 0xc + 0x10*index);
5207         }
5208
5209         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5210                              HC_REG_ATTN_MSG0_ADDR_L);
5211
5212         REG_WR(bp, reg_offset, U64_LO(section));
5213         REG_WR(bp, reg_offset + 4, U64_HI(section));
5214
5215         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5216
5217         val = REG_RD(bp, reg_offset);
5218         val |= sb_id;
5219         REG_WR(bp, reg_offset, val);
5220
5221         /* USTORM */
5222         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5223                                             u_def_status_block);
5224         def_sb->u_def_status_block.status_block_id = sb_id;
5225
5226         REG_WR(bp, BAR_CSTRORM_INTMEM +
5227                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5228         REG_WR(bp, BAR_CSTRORM_INTMEM +
5229                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5230                U64_HI(section));
5231         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5232                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5233
5234         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5235                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5236                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5237
5238         /* CSTORM */
5239         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5240                                             c_def_status_block);
5241         def_sb->c_def_status_block.status_block_id = sb_id;
5242
5243         REG_WR(bp, BAR_CSTRORM_INTMEM +
5244                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5245         REG_WR(bp, BAR_CSTRORM_INTMEM +
5246                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5247                U64_HI(section));
5248         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5249                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5250
5251         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5252                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5253                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5254
5255         /* TSTORM */
5256         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5257                                             t_def_status_block);
5258         def_sb->t_def_status_block.status_block_id = sb_id;
5259
5260         REG_WR(bp, BAR_TSTRORM_INTMEM +
5261                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5262         REG_WR(bp, BAR_TSTRORM_INTMEM +
5263                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5264                U64_HI(section));
5265         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5266                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5267
5268         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5269                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5270                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5271
5272         /* XSTORM */
5273         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5274                                             x_def_status_block);
5275         def_sb->x_def_status_block.status_block_id = sb_id;
5276
5277         REG_WR(bp, BAR_XSTRORM_INTMEM +
5278                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5279         REG_WR(bp, BAR_XSTRORM_INTMEM +
5280                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5281                U64_HI(section));
5282         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5283                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5284
5285         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5286                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5287                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5288
5289         bp->stats_pending = 0;
5290         bp->set_mac_pending = 0;
5291
5292         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5293 }
5294
5295 static void bnx2x_update_coalesce(struct bnx2x *bp)
5296 {
5297         int port = BP_PORT(bp);
5298         int i;
5299
5300         for_each_queue(bp, i) {
5301                 int sb_id = bp->fp[i].sb_id;
5302
5303                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5304                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5305                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5306                                                       U_SB_ETH_RX_CQ_INDEX),
5307                         bp->rx_ticks/(4 * BNX2X_BTR));
5308                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5309                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5310                                                        U_SB_ETH_RX_CQ_INDEX),
5311                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5312
5313                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5314                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5315                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5316                                                       C_SB_ETH_TX_CQ_INDEX),
5317                         bp->tx_ticks/(4 * BNX2X_BTR));
5318                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5319                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5320                                                        C_SB_ETH_TX_CQ_INDEX),
5321                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5322         }
5323 }
5324
5325 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5326                                        struct bnx2x_fastpath *fp, int last)
5327 {
5328         int i;
5329
5330         for (i = 0; i < last; i++) {
5331                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5332                 struct sk_buff *skb = rx_buf->skb;
5333
5334                 if (skb == NULL) {
5335                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5336                         continue;
5337                 }
5338
5339                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5340                         dma_unmap_single(&bp->pdev->dev,
5341                                          dma_unmap_addr(rx_buf, mapping),
5342                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5343
5344                 dev_kfree_skb(skb);
5345                 rx_buf->skb = NULL;
5346         }
5347 }
5348
5349 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5350 {
5351         int func = BP_FUNC(bp);
5352         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5353                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5354         u16 ring_prod, cqe_ring_prod;
5355         int i, j;
5356
5357         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5358         DP(NETIF_MSG_IFUP,
5359            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5360
5361         if (bp->flags & TPA_ENABLE_FLAG) {
5362
5363                 for_each_queue(bp, j) {
5364                         struct bnx2x_fastpath *fp = &bp->fp[j];
5365
5366                         for (i = 0; i < max_agg_queues; i++) {
5367                                 fp->tpa_pool[i].skb =
5368                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5369                                 if (!fp->tpa_pool[i].skb) {
5370                                         BNX2X_ERR("Failed to allocate TPA "
5371                                                   "skb pool for queue[%d] - "
5372                                                   "disabling TPA on this "
5373                                                   "queue!\n", j);
5374                                         bnx2x_free_tpa_pool(bp, fp, i);
5375                                         fp->disable_tpa = 1;
5376                                         break;
5377                                 }
5378                                 dma_unmap_addr_set((struct sw_rx_bd *)
5379                                                         &bp->fp->tpa_pool[i],
5380                                                    mapping, 0);
5381                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5382                         }
5383                 }
5384         }
5385
5386         for_each_queue(bp, j) {
5387                 struct bnx2x_fastpath *fp = &bp->fp[j];
5388
5389                 fp->rx_bd_cons = 0;
5390                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5391                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5392
5393                 /* "next page" elements initialization */
5394                 /* SGE ring */
5395                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5396                         struct eth_rx_sge *sge;
5397
5398                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5399                         sge->addr_hi =
5400                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5401                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5402                         sge->addr_lo =
5403                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5404                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405                 }
5406
5407                 bnx2x_init_sge_ring_bit_mask(fp);
5408
5409                 /* RX BD ring */
5410                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5411                         struct eth_rx_bd *rx_bd;
5412
5413                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5414                         rx_bd->addr_hi =
5415                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5416                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5417                         rx_bd->addr_lo =
5418                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5419                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5420                 }
5421
5422                 /* CQ ring */
5423                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5424                         struct eth_rx_cqe_next_page *nextpg;
5425
5426                         nextpg = (struct eth_rx_cqe_next_page *)
5427                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5428                         nextpg->addr_hi =
5429                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5430                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5431                         nextpg->addr_lo =
5432                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5433                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5434                 }
5435
5436                 /* Allocate SGEs and initialize the ring elements */
5437                 for (i = 0, ring_prod = 0;
5438                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5439
5440                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5441                                 BNX2X_ERR("was only able to allocate "
5442                                           "%d rx sges\n", i);
5443                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5444                                 /* Cleanup already allocated elements */
5445                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5446                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5447                                 fp->disable_tpa = 1;
5448                                 ring_prod = 0;
5449                                 break;
5450                         }
5451                         ring_prod = NEXT_SGE_IDX(ring_prod);
5452                 }
5453                 fp->rx_sge_prod = ring_prod;
5454
5455                 /* Allocate BDs and initialize BD ring */
5456                 fp->rx_comp_cons = 0;
5457                 cqe_ring_prod = ring_prod = 0;
5458                 for (i = 0; i < bp->rx_ring_size; i++) {
5459                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5460                                 BNX2X_ERR("was only able to allocate "
5461                                           "%d rx skbs on queue[%d]\n", i, j);
5462                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5463                                 break;
5464                         }
5465                         ring_prod = NEXT_RX_IDX(ring_prod);
5466                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5467                         WARN_ON(ring_prod <= i);
5468                 }
5469
5470                 fp->rx_bd_prod = ring_prod;
5471                 /* must not have more available CQEs than BDs */
5472                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5473                                          cqe_ring_prod);
5474                 fp->rx_pkt = fp->rx_calls = 0;
5475
5476                 /* Warning!
5477                  * this will generate an interrupt (to the TSTORM)
5478                  * must only be done after chip is initialized
5479                  */
5480                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5481                                      fp->rx_sge_prod);
5482                 if (j != 0)
5483                         continue;
5484
5485                 REG_WR(bp, BAR_USTRORM_INTMEM +
5486                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5487                        U64_LO(fp->rx_comp_mapping));
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5490                        U64_HI(fp->rx_comp_mapping));
5491         }
5492 }
5493
5494 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5495 {
5496         int i, j;
5497
5498         for_each_queue(bp, j) {
5499                 struct bnx2x_fastpath *fp = &bp->fp[j];
5500
5501                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5502                         struct eth_tx_next_bd *tx_next_bd =
5503                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5504
5505                         tx_next_bd->addr_hi =
5506                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5507                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5508                         tx_next_bd->addr_lo =
5509                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5510                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5511                 }
5512
5513                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5514                 fp->tx_db.data.zero_fill1 = 0;
5515                 fp->tx_db.data.prod = 0;
5516
5517                 fp->tx_pkt_prod = 0;
5518                 fp->tx_pkt_cons = 0;
5519                 fp->tx_bd_prod = 0;
5520                 fp->tx_bd_cons = 0;
5521                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5522                 fp->tx_pkt = 0;
5523         }
5524 }
5525
5526 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5527 {
5528         int func = BP_FUNC(bp);
5529
5530         spin_lock_init(&bp->spq_lock);
5531
5532         bp->spq_left = MAX_SPQ_PENDING;
5533         bp->spq_prod_idx = 0;
5534         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5535         bp->spq_prod_bd = bp->spq;
5536         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5537
5538         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5539                U64_LO(bp->spq_mapping));
5540         REG_WR(bp,
5541                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5542                U64_HI(bp->spq_mapping));
5543
5544         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5545                bp->spq_prod_idx);
5546 }
5547
5548 static void bnx2x_init_context(struct bnx2x *bp)
5549 {
5550         int i;
5551
5552         /* Rx */
5553         for_each_queue(bp, i) {
5554                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5555                 struct bnx2x_fastpath *fp = &bp->fp[i];
5556                 u8 cl_id = fp->cl_id;
5557
5558                 context->ustorm_st_context.common.sb_index_numbers =
5559                                                 BNX2X_RX_SB_INDEX_NUM;
5560                 context->ustorm_st_context.common.clientId = cl_id;
5561                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5562                 context->ustorm_st_context.common.flags =
5563                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5564                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5565                 context->ustorm_st_context.common.statistics_counter_id =
5566                                                 cl_id;
5567                 context->ustorm_st_context.common.mc_alignment_log_size =
5568                                                 BNX2X_RX_ALIGN_SHIFT;
5569                 context->ustorm_st_context.common.bd_buff_size =
5570                                                 bp->rx_buf_size;
5571                 context->ustorm_st_context.common.bd_page_base_hi =
5572                                                 U64_HI(fp->rx_desc_mapping);
5573                 context->ustorm_st_context.common.bd_page_base_lo =
5574                                                 U64_LO(fp->rx_desc_mapping);
5575                 if (!fp->disable_tpa) {
5576                         context->ustorm_st_context.common.flags |=
5577                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5578                         context->ustorm_st_context.common.sge_buff_size =
5579                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5580                                            0xffff);
5581                         context->ustorm_st_context.common.sge_page_base_hi =
5582                                                 U64_HI(fp->rx_sge_mapping);
5583                         context->ustorm_st_context.common.sge_page_base_lo =
5584                                                 U64_LO(fp->rx_sge_mapping);
5585
5586                         context->ustorm_st_context.common.max_sges_for_packet =
5587                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5588                         context->ustorm_st_context.common.max_sges_for_packet =
5589                                 ((context->ustorm_st_context.common.
5590                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5591                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5592                 }
5593
5594                 context->ustorm_ag_context.cdu_usage =
5595                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5596                                                CDU_REGION_NUMBER_UCM_AG,
5597                                                ETH_CONNECTION_TYPE);
5598
5599                 context->xstorm_ag_context.cdu_reserved =
5600                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5601                                                CDU_REGION_NUMBER_XCM_AG,
5602                                                ETH_CONNECTION_TYPE);
5603         }
5604
5605         /* Tx */
5606         for_each_queue(bp, i) {
5607                 struct bnx2x_fastpath *fp = &bp->fp[i];
5608                 struct eth_context *context =
5609                         bnx2x_sp(bp, context[i].eth);
5610
5611                 context->cstorm_st_context.sb_index_number =
5612                                                 C_SB_ETH_TX_CQ_INDEX;
5613                 context->cstorm_st_context.status_block_id = fp->sb_id;
5614
5615                 context->xstorm_st_context.tx_bd_page_base_hi =
5616                                                 U64_HI(fp->tx_desc_mapping);
5617                 context->xstorm_st_context.tx_bd_page_base_lo =
5618                                                 U64_LO(fp->tx_desc_mapping);
5619                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5620                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5621         }
5622 }
5623
5624 static void bnx2x_init_ind_table(struct bnx2x *bp)
5625 {
5626         int func = BP_FUNC(bp);
5627         int i;
5628
5629         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5630                 return;
5631
5632         DP(NETIF_MSG_IFUP,
5633            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5634         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5635                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5636                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5637                         bp->fp->cl_id + (i % bp->num_queues));
5638 }
5639
5640 static void bnx2x_set_client_config(struct bnx2x *bp)
5641 {
5642         struct tstorm_eth_client_config tstorm_client = {0};
5643         int port = BP_PORT(bp);
5644         int i;
5645
5646         tstorm_client.mtu = bp->dev->mtu;
5647         tstorm_client.config_flags =
5648                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5649                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5650 #ifdef BCM_VLAN
5651         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5652                 tstorm_client.config_flags |=
5653                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5654                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5655         }
5656 #endif
5657
5658         for_each_queue(bp, i) {
5659                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5660
5661                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5662                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5663                        ((u32 *)&tstorm_client)[0]);
5664                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5665                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5666                        ((u32 *)&tstorm_client)[1]);
5667         }
5668
5669         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5670            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5671 }
5672
5673 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5674 {
5675         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5676         int mode = bp->rx_mode;
5677         int mask = bp->rx_mode_cl_mask;
5678         int func = BP_FUNC(bp);
5679         int port = BP_PORT(bp);
5680         int i;
5681         /* All but management unicast packets should pass to the host as well */
5682         u32 llh_mask =
5683                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5684                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5685                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5686                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5687
5688         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5689
5690         switch (mode) {
5691         case BNX2X_RX_MODE_NONE: /* no Rx */
5692                 tstorm_mac_filter.ucast_drop_all = mask;
5693                 tstorm_mac_filter.mcast_drop_all = mask;
5694                 tstorm_mac_filter.bcast_drop_all = mask;
5695                 break;
5696
5697         case BNX2X_RX_MODE_NORMAL:
5698                 tstorm_mac_filter.bcast_accept_all = mask;
5699                 break;
5700
5701         case BNX2X_RX_MODE_ALLMULTI:
5702                 tstorm_mac_filter.mcast_accept_all = mask;
5703                 tstorm_mac_filter.bcast_accept_all = mask;
5704                 break;
5705
5706         case BNX2X_RX_MODE_PROMISC:
5707                 tstorm_mac_filter.ucast_accept_all = mask;
5708                 tstorm_mac_filter.mcast_accept_all = mask;
5709                 tstorm_mac_filter.bcast_accept_all = mask;
5710                 /* pass management unicast packets as well */
5711                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5712                 break;
5713
5714         default:
5715                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5716                 break;
5717         }
5718
5719         REG_WR(bp,
5720                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5721                llh_mask);
5722
5723         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5724                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5725                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5726                        ((u32 *)&tstorm_mac_filter)[i]);
5727
5728 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5729                    ((u32 *)&tstorm_mac_filter)[i]); */
5730         }
5731
5732         if (mode != BNX2X_RX_MODE_NONE)
5733                 bnx2x_set_client_config(bp);
5734 }
5735
5736 static void bnx2x_init_internal_common(struct bnx2x *bp)
5737 {
5738         int i;
5739
5740         /* Zero this manually as its initialization is
5741            currently missing in the initTool */
5742         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5743                 REG_WR(bp, BAR_USTRORM_INTMEM +
5744                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5745 }
5746
5747 static void bnx2x_init_internal_port(struct bnx2x *bp)
5748 {
5749         int port = BP_PORT(bp);
5750
5751         REG_WR(bp,
5752                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5753         REG_WR(bp,
5754                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5755         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5756         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5757 }
5758
5759 static void bnx2x_init_internal_func(struct bnx2x *bp)
5760 {
5761         struct tstorm_eth_function_common_config tstorm_config = {0};
5762         struct stats_indication_flags stats_flags = {0};
5763         int port = BP_PORT(bp);
5764         int func = BP_FUNC(bp);
5765         int i, j;
5766         u32 offset;
5767         u16 max_agg_size;
5768
5769         tstorm_config.config_flags = RSS_FLAGS(bp);
5770
5771         if (is_multi(bp))
5772                 tstorm_config.rss_result_mask = MULTI_MASK;
5773
5774         /* Enable TPA if needed */
5775         if (bp->flags & TPA_ENABLE_FLAG)
5776                 tstorm_config.config_flags |=
5777                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5778
5779         if (IS_E1HMF(bp))
5780                 tstorm_config.config_flags |=
5781                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5782
5783         tstorm_config.leading_client_id = BP_L_ID(bp);
5784
5785         REG_WR(bp, BAR_TSTRORM_INTMEM +
5786                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5787                (*(u32 *)&tstorm_config));
5788
5789         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5790         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5791         bnx2x_set_storm_rx_mode(bp);
5792
5793         for_each_queue(bp, i) {
5794                 u8 cl_id = bp->fp[i].cl_id;
5795
5796                 /* reset xstorm per client statistics */
5797                 offset = BAR_XSTRORM_INTMEM +
5798                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5799                 for (j = 0;
5800                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5801                         REG_WR(bp, offset + j*4, 0);
5802
5803                 /* reset tstorm per client statistics */
5804                 offset = BAR_TSTRORM_INTMEM +
5805                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5806                 for (j = 0;
5807                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5808                         REG_WR(bp, offset + j*4, 0);
5809
5810                 /* reset ustorm per client statistics */
5811                 offset = BAR_USTRORM_INTMEM +
5812                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5813                 for (j = 0;
5814                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5815                         REG_WR(bp, offset + j*4, 0);
5816         }
5817
5818         /* Init statistics related context */
5819         stats_flags.collect_eth = 1;
5820
5821         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5822                ((u32 *)&stats_flags)[0]);
5823         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5824                ((u32 *)&stats_flags)[1]);
5825
5826         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5827                ((u32 *)&stats_flags)[0]);
5828         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5829                ((u32 *)&stats_flags)[1]);
5830
5831         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5832                ((u32 *)&stats_flags)[0]);
5833         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5834                ((u32 *)&stats_flags)[1]);
5835
5836         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5837                ((u32 *)&stats_flags)[0]);
5838         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5839                ((u32 *)&stats_flags)[1]);
5840
5841         REG_WR(bp, BAR_XSTRORM_INTMEM +
5842                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5843                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5844         REG_WR(bp, BAR_XSTRORM_INTMEM +
5845                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5846                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5847
5848         REG_WR(bp, BAR_TSTRORM_INTMEM +
5849                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5850                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5851         REG_WR(bp, BAR_TSTRORM_INTMEM +
5852                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5853                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5854
5855         REG_WR(bp, BAR_USTRORM_INTMEM +
5856                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5857                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5858         REG_WR(bp, BAR_USTRORM_INTMEM +
5859                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5860                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5861
5862         if (CHIP_IS_E1H(bp)) {
5863                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5864                         IS_E1HMF(bp));
5865                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5866                         IS_E1HMF(bp));
5867                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5868                         IS_E1HMF(bp));
5869                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5870                         IS_E1HMF(bp));
5871
5872                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5873                          bp->e1hov);
5874         }
5875
5876         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5877         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5878                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5879         for_each_queue(bp, i) {
5880                 struct bnx2x_fastpath *fp = &bp->fp[i];
5881
5882                 REG_WR(bp, BAR_USTRORM_INTMEM +
5883                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5884                        U64_LO(fp->rx_comp_mapping));
5885                 REG_WR(bp, BAR_USTRORM_INTMEM +
5886                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5887                        U64_HI(fp->rx_comp_mapping));
5888
5889                 /* Next page */
5890                 REG_WR(bp, BAR_USTRORM_INTMEM +
5891                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5892                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5893                 REG_WR(bp, BAR_USTRORM_INTMEM +
5894                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5895                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896
5897                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5898                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5899                          max_agg_size);
5900         }
5901
5902         /* dropless flow control */
5903         if (CHIP_IS_E1H(bp)) {
5904                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5905
5906                 rx_pause.bd_thr_low = 250;
5907                 rx_pause.cqe_thr_low = 250;
5908                 rx_pause.cos = 1;
5909                 rx_pause.sge_thr_low = 0;
5910                 rx_pause.bd_thr_high = 350;
5911                 rx_pause.cqe_thr_high = 350;
5912                 rx_pause.sge_thr_high = 0;
5913
5914                 for_each_queue(bp, i) {
5915                         struct bnx2x_fastpath *fp = &bp->fp[i];
5916
5917                         if (!fp->disable_tpa) {
5918                                 rx_pause.sge_thr_low = 150;
5919                                 rx_pause.sge_thr_high = 250;
5920                         }
5921
5922
5923                         offset = BAR_USTRORM_INTMEM +
5924                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5925                                                                    fp->cl_id);
5926                         for (j = 0;
5927                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5928                              j++)
5929                                 REG_WR(bp, offset + j*4,
5930                                        ((u32 *)&rx_pause)[j]);
5931                 }
5932         }
5933
5934         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5935
5936         /* Init rate shaping and fairness contexts */
5937         if (IS_E1HMF(bp)) {
5938                 int vn;
5939
5940                 /* During init there is no active link
5941                    Until link is up, set link rate to 10Gbps */
5942                 bp->link_vars.line_speed = SPEED_10000;
5943                 bnx2x_init_port_minmax(bp);
5944
5945                 if (!BP_NOMCP(bp))
5946                         bp->mf_config =
5947                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5948                 bnx2x_calc_vn_weight_sum(bp);
5949
5950                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5951                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5952
5953                 /* Enable rate shaping and fairness */
5954                 bp->cmng.flags.cmng_enables |=
5955                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5956
5957         } else {
5958                 /* rate shaping and fairness are disabled */
5959                 DP(NETIF_MSG_IFUP,
5960                    "single function mode  minmax will be disabled\n");
5961         }
5962
5963
5964         /* Store cmng structures to internal memory */
5965         if (bp->port.pmf)
5966                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5967                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5968                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5969                                ((u32 *)(&bp->cmng))[i]);
5970 }
5971
5972 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5973 {
5974         switch (load_code) {
5975         case FW_MSG_CODE_DRV_LOAD_COMMON:
5976                 bnx2x_init_internal_common(bp);
5977                 /* no break */
5978
5979         case FW_MSG_CODE_DRV_LOAD_PORT:
5980                 bnx2x_init_internal_port(bp);
5981                 /* no break */
5982
5983         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5984                 bnx2x_init_internal_func(bp);
5985                 break;
5986
5987         default:
5988                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5989                 break;
5990         }
5991 }
5992
5993 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5994 {
5995         int i;
5996
5997         for_each_queue(bp, i) {
5998                 struct bnx2x_fastpath *fp = &bp->fp[i];
5999
6000                 fp->bp = bp;
6001                 fp->state = BNX2X_FP_STATE_CLOSED;
6002                 fp->index = i;
6003                 fp->cl_id = BP_L_ID(bp) + i;
6004 #ifdef BCM_CNIC
6005                 fp->sb_id = fp->cl_id + 1;
6006 #else
6007                 fp->sb_id = fp->cl_id;
6008 #endif
6009                 DP(NETIF_MSG_IFUP,
6010                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
6011                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6012                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6013                               fp->sb_id);
6014                 bnx2x_update_fpsb_idx(fp);
6015         }
6016
6017         /* ensure status block indices were read */
6018         rmb();
6019
6020
6021         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6022                           DEF_SB_ID);
6023         bnx2x_update_dsb_idx(bp);
6024         bnx2x_update_coalesce(bp);
6025         bnx2x_init_rx_rings(bp);
6026         bnx2x_init_tx_ring(bp);
6027         bnx2x_init_sp_ring(bp);
6028         bnx2x_init_context(bp);
6029         bnx2x_init_internal(bp, load_code);
6030         bnx2x_init_ind_table(bp);
6031         bnx2x_stats_init(bp);
6032
6033         /* At this point, we are ready for interrupts */
6034         atomic_set(&bp->intr_sem, 0);
6035
6036         /* flush all before enabling interrupts */
6037         mb();
6038         mmiowb();
6039
6040         bnx2x_int_enable(bp);
6041
6042         /* Check for SPIO5 */
6043         bnx2x_attn_int_deasserted0(bp,
6044                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6045                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6046 }
6047
6048 /* end of nic init */
6049
6050 /*
6051  * gzip service functions
6052  */
6053
6054 static int bnx2x_gunzip_init(struct bnx2x *bp)
6055 {
6056         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6057                                             &bp->gunzip_mapping, GFP_KERNEL);
6058         if (bp->gunzip_buf  == NULL)
6059                 goto gunzip_nomem1;
6060
6061         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6062         if (bp->strm  == NULL)
6063                 goto gunzip_nomem2;
6064
6065         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6066                                       GFP_KERNEL);
6067         if (bp->strm->workspace == NULL)
6068                 goto gunzip_nomem3;
6069
6070         return 0;
6071
6072 gunzip_nomem3:
6073         kfree(bp->strm);
6074         bp->strm = NULL;
6075
6076 gunzip_nomem2:
6077         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078                           bp->gunzip_mapping);
6079         bp->gunzip_buf = NULL;
6080
6081 gunzip_nomem1:
6082         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6083                " un-compression\n");
6084         return -ENOMEM;
6085 }
6086
6087 static void bnx2x_gunzip_end(struct bnx2x *bp)
6088 {
6089         kfree(bp->strm->workspace);
6090
6091         kfree(bp->strm);
6092         bp->strm = NULL;
6093
6094         if (bp->gunzip_buf) {
6095                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6096                                   bp->gunzip_mapping);
6097                 bp->gunzip_buf = NULL;
6098         }
6099 }
6100
6101 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6102 {
6103         int n, rc;
6104
6105         /* check gzip header */
6106         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6107                 BNX2X_ERR("Bad gzip header\n");
6108                 return -EINVAL;
6109         }
6110
6111         n = 10;
6112
6113 #define FNAME                           0x8
6114
6115         if (zbuf[3] & FNAME)
6116                 while ((zbuf[n++] != 0) && (n < len));
6117
6118         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6119         bp->strm->avail_in = len - n;
6120         bp->strm->next_out = bp->gunzip_buf;
6121         bp->strm->avail_out = FW_BUF_SIZE;
6122
6123         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6124         if (rc != Z_OK)
6125                 return rc;
6126
6127         rc = zlib_inflate(bp->strm, Z_FINISH);
6128         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6129                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6130                            bp->strm->msg);
6131
6132         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6133         if (bp->gunzip_outlen & 0x3)
6134                 netdev_err(bp->dev, "Firmware decompression error:"
6135                                     " gunzip_outlen (%d) not aligned\n",
6136                                 bp->gunzip_outlen);
6137         bp->gunzip_outlen >>= 2;
6138
6139         zlib_inflateEnd(bp->strm);
6140
6141         if (rc == Z_STREAM_END)
6142                 return 0;
6143
6144         return rc;
6145 }
6146
6147 /* nic load/unload */
6148
6149 /*
6150  * General service functions
6151  */
6152
6153 /* send a NIG loopback debug packet */
6154 static void bnx2x_lb_pckt(struct bnx2x *bp)
6155 {
6156         u32 wb_write[3];
6157
6158         /* Ethernet source and destination addresses */
6159         wb_write[0] = 0x55555555;
6160         wb_write[1] = 0x55555555;
6161         wb_write[2] = 0x20;             /* SOP */
6162         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6163
6164         /* NON-IP protocol */
6165         wb_write[0] = 0x09000000;
6166         wb_write[1] = 0x55555555;
6167         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6168         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6169 }
6170
6171 /* some of the internal memories
6172  * are not directly readable from the driver
6173  * to test them we send debug packets
6174  */
6175 static int bnx2x_int_mem_test(struct bnx2x *bp)
6176 {
6177         int factor;
6178         int count, i;
6179         u32 val = 0;
6180
6181         if (CHIP_REV_IS_FPGA(bp))
6182                 factor = 120;
6183         else if (CHIP_REV_IS_EMUL(bp))
6184                 factor = 200;
6185         else
6186                 factor = 1;
6187
6188         DP(NETIF_MSG_HW, "start part1\n");
6189
6190         /* Disable inputs of parser neighbor blocks */
6191         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6192         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6193         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6194         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6195
6196         /*  Write 0 to parser credits for CFC search request */
6197         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6198
6199         /* send Ethernet packet */
6200         bnx2x_lb_pckt(bp);
6201
6202         /* TODO do i reset NIG statistic? */
6203         /* Wait until NIG register shows 1 packet of size 0x10 */
6204         count = 1000 * factor;
6205         while (count) {
6206
6207                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6208                 val = *bnx2x_sp(bp, wb_data[0]);
6209                 if (val == 0x10)
6210                         break;
6211
6212                 msleep(10);
6213                 count--;
6214         }
6215         if (val != 0x10) {
6216                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6217                 return -1;
6218         }
6219
6220         /* Wait until PRS register shows 1 packet */
6221         count = 1000 * factor;
6222         while (count) {
6223                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6224                 if (val == 1)
6225                         break;
6226
6227                 msleep(10);
6228                 count--;
6229         }
6230         if (val != 0x1) {
6231                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6232                 return -2;
6233         }
6234
6235         /* Reset and init BRB, PRS */
6236         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6237         msleep(50);
6238         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6239         msleep(50);
6240         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6242
6243         DP(NETIF_MSG_HW, "part2\n");
6244
6245         /* Disable inputs of parser neighbor blocks */
6246         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6247         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6248         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6249         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6250
6251         /* Write 0 to parser credits for CFC search request */
6252         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6253
6254         /* send 10 Ethernet packets */
6255         for (i = 0; i < 10; i++)
6256                 bnx2x_lb_pckt(bp);
6257
6258         /* Wait until NIG register shows 10 + 1
6259            packets of size 11*0x10 = 0xb0 */
6260         count = 1000 * factor;
6261         while (count) {
6262
6263                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6264                 val = *bnx2x_sp(bp, wb_data[0]);
6265                 if (val == 0xb0)
6266                         break;
6267
6268                 msleep(10);
6269                 count--;
6270         }
6271         if (val != 0xb0) {
6272                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6273                 return -3;
6274         }
6275
6276         /* Wait until PRS register shows 2 packets */
6277         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6278         if (val != 2)
6279                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6280
6281         /* Write 1 to parser credits for CFC search request */
6282         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6283
6284         /* Wait until PRS register shows 3 packets */
6285         msleep(10 * factor);
6286         /* Wait until NIG register shows 1 packet of size 0x10 */
6287         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6288         if (val != 3)
6289                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6290
6291         /* clear NIG EOP FIFO */
6292         for (i = 0; i < 11; i++)
6293                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6294         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6295         if (val != 1) {
6296                 BNX2X_ERR("clear of NIG failed\n");
6297                 return -4;
6298         }
6299
6300         /* Reset and init BRB, PRS, NIG */
6301         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6302         msleep(50);
6303         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6304         msleep(50);
6305         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6306         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6307 #ifndef BCM_CNIC
6308         /* set NIC mode */
6309         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6310 #endif
6311
6312         /* Enable inputs of parser neighbor blocks */
6313         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6314         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6315         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6316         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6317
6318         DP(NETIF_MSG_HW, "done\n");
6319
6320         return 0; /* OK */
6321 }
6322
6323 static void enable_blocks_attention(struct bnx2x *bp)
6324 {
6325         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6326         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6327         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6328         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6329         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6330         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6331         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6332         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6333         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6334 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6335 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6336         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6337         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6338         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6339 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6340 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6341         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6342         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6343         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6344         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6345 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6346 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6347         if (CHIP_REV_IS_FPGA(bp))
6348                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6349         else
6350                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6351         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6352         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6353         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6354 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6355 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6356         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6357         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6358 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6359         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6360 }
6361
6362 static const struct {
6363         u32 addr;
6364         u32 mask;
6365 } bnx2x_parity_mask[] = {
6366         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6367         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6368         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6369         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6370         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6371         {QM_REG_QM_PRTY_MASK, 0x0},
6372         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6373         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6374         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6375         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6376         {CDU_REG_CDU_PRTY_MASK, 0x0},
6377         {CFC_REG_CFC_PRTY_MASK, 0x0},
6378         {DBG_REG_DBG_PRTY_MASK, 0x0},
6379         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6380         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6381         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6382         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6383         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6384         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6385         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6386         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6387         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6388         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6389         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6390         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6391         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6392         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6393         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6394 };
6395
6396 static void enable_blocks_parity(struct bnx2x *bp)
6397 {
6398         int i, mask_arr_len =
6399                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6400
6401         for (i = 0; i < mask_arr_len; i++)
6402                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6403                         bnx2x_parity_mask[i].mask);
6404 }
6405
6406
6407 static void bnx2x_reset_common(struct bnx2x *bp)
6408 {
6409         /* reset_common */
6410         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6411                0xd3ffff7f);
6412         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6413 }
6414
6415 static void bnx2x_init_pxp(struct bnx2x *bp)
6416 {
6417         u16 devctl;
6418         int r_order, w_order;
6419
6420         pci_read_config_word(bp->pdev,
6421                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6422         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6423         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6424         if (bp->mrrs == -1)
6425                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6426         else {
6427                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6428                 r_order = bp->mrrs;
6429         }
6430
6431         bnx2x_init_pxp_arb(bp, r_order, w_order);
6432 }
6433
6434 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6435 {
6436         int is_required;
6437         u32 val;
6438         int port;
6439
6440         if (BP_NOMCP(bp))
6441                 return;
6442
6443         is_required = 0;
6444         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6445               SHARED_HW_CFG_FAN_FAILURE_MASK;
6446
6447         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6448                 is_required = 1;
6449
6450         /*
6451          * The fan failure mechanism is usually related to the PHY type since
6452          * the power consumption of the board is affected by the PHY. Currently,
6453          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6454          */
6455         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6456                 for (port = PORT_0; port < PORT_MAX; port++) {
6457                         u32 phy_type =
6458                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6459                                          external_phy_config) &
6460                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6461                         is_required |=
6462                                 ((phy_type ==
6463                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6464                                  (phy_type ==
6465                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6466                                  (phy_type ==
6467                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6468                 }
6469
6470         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6471
6472         if (is_required == 0)
6473                 return;
6474
6475         /* Fan failure is indicated by SPIO 5 */
6476         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6477                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6478
6479         /* set to active low mode */
6480         val = REG_RD(bp, MISC_REG_SPIO_INT);
6481         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6482                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6483         REG_WR(bp, MISC_REG_SPIO_INT, val);
6484
6485         /* enable interrupt to signal the IGU */
6486         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6487         val |= (1 << MISC_REGISTERS_SPIO_5);
6488         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6489 }
6490
6491 static int bnx2x_init_common(struct bnx2x *bp)
6492 {
6493         u32 val, i;
6494 #ifdef BCM_CNIC
6495         u32 wb_write[2];
6496 #endif
6497
6498         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6499
6500         bnx2x_reset_common(bp);
6501         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6502         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6503
6504         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6505         if (CHIP_IS_E1H(bp))
6506                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6507
6508         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6509         msleep(30);
6510         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6511
6512         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6513         if (CHIP_IS_E1(bp)) {
6514                 /* enable HW interrupt from PXP on USDM overflow
6515                    bit 16 on INT_MASK_0 */
6516                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6517         }
6518
6519         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6520         bnx2x_init_pxp(bp);
6521
6522 #ifdef __BIG_ENDIAN
6523         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6524         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6525         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6526         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6527         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6528         /* make sure this value is 0 */
6529         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6530
6531 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6532         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6533         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6534         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6535         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6536 #endif
6537
6538         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6539 #ifdef BCM_CNIC
6540         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6541         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6542         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6543 #endif
6544
6545         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6546                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6547
6548         /* let the HW do it's magic ... */
6549         msleep(100);
6550         /* finish PXP init */
6551         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6552         if (val != 1) {
6553                 BNX2X_ERR("PXP2 CFG failed\n");
6554                 return -EBUSY;
6555         }
6556         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6557         if (val != 1) {
6558                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6559                 return -EBUSY;
6560         }
6561
6562         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6563         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6564
6565         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6566
6567         /* clean the DMAE memory */
6568         bp->dmae_ready = 1;
6569         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6570
6571         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6572         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6573         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6574         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6575
6576         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6577         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6578         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6579         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6580
6581         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6582
6583 #ifdef BCM_CNIC
6584         wb_write[0] = 0;
6585         wb_write[1] = 0;
6586         for (i = 0; i < 64; i++) {
6587                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6588                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6589
6590                 if (CHIP_IS_E1H(bp)) {
6591                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6592                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6593                                           wb_write, 2);
6594                 }
6595         }
6596 #endif
6597         /* soft reset pulse */
6598         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6599         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6600
6601 #ifdef BCM_CNIC
6602         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6603 #endif
6604
6605         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6606         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6607         if (!CHIP_REV_IS_SLOW(bp)) {
6608                 /* enable hw interrupt from doorbell Q */
6609                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6610         }
6611
6612         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6613         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6614         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6615 #ifndef BCM_CNIC
6616         /* set NIC mode */
6617         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6618 #endif
6619         if (CHIP_IS_E1H(bp))
6620                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6621
6622         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6623         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6624         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6625         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6626
6627         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6628         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6629         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6630         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631
6632         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6633         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6634         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6635         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6636
6637         /* sync semi rtc */
6638         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6639                0x80000000);
6640         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6641                0x80000000);
6642
6643         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6644         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6645         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6646
6647         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6648         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6649                 REG_WR(bp, i, random32());
6650         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6651 #ifdef BCM_CNIC
6652         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6653         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6654         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6655         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6656         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6657         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6658         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6659         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6660         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6661         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6662 #endif
6663         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6664
6665         if (sizeof(union cdu_context) != 1024)
6666                 /* we currently assume that a context is 1024 bytes */
6667                 dev_alert(&bp->pdev->dev, "please adjust the size "
6668                                           "of cdu_context(%ld)\n",
6669                          (long)sizeof(union cdu_context));
6670
6671         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6672         val = (4 << 24) + (0 << 12) + 1024;
6673         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6674
6675         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6676         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6677         /* enable context validation interrupt from CFC */
6678         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6679
6680         /* set the thresholds to prevent CFC/CDU race */
6681         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6682
6683         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6684         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6685
6686         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6687         /* Reset PCIE errors for debug */
6688         REG_WR(bp, 0x2814, 0xffffffff);
6689         REG_WR(bp, 0x3820, 0xffffffff);
6690
6691         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6692         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6693         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6694         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6695
6696         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6697         if (CHIP_IS_E1H(bp)) {
6698                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6699                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6700         }
6701
6702         if (CHIP_REV_IS_SLOW(bp))
6703                 msleep(200);
6704
6705         /* finish CFC init */
6706         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6707         if (val != 1) {
6708                 BNX2X_ERR("CFC LL_INIT failed\n");
6709                 return -EBUSY;
6710         }
6711         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6712         if (val != 1) {
6713                 BNX2X_ERR("CFC AC_INIT failed\n");
6714                 return -EBUSY;
6715         }
6716         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6717         if (val != 1) {
6718                 BNX2X_ERR("CFC CAM_INIT failed\n");
6719                 return -EBUSY;
6720         }
6721         REG_WR(bp, CFC_REG_DEBUG0, 0);
6722
6723         /* read NIG statistic
6724            to see if this is our first up since powerup */
6725         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6726         val = *bnx2x_sp(bp, wb_data[0]);
6727
6728         /* do internal memory self test */
6729         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6730                 BNX2X_ERR("internal mem self test failed\n");
6731                 return -EBUSY;
6732         }
6733
6734         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6735         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6736         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6737         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6738         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6739                 bp->port.need_hw_lock = 1;
6740                 break;
6741
6742         default:
6743                 break;
6744         }
6745
6746         bnx2x_setup_fan_failure_detection(bp);
6747
6748         /* clear PXP2 attentions */
6749         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6750
6751         enable_blocks_attention(bp);
6752         if (CHIP_PARITY_SUPPORTED(bp))
6753                 enable_blocks_parity(bp);
6754
6755         if (!BP_NOMCP(bp)) {
6756                 bnx2x_acquire_phy_lock(bp);
6757                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6758                 bnx2x_release_phy_lock(bp);
6759         } else
6760                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6761
6762         return 0;
6763 }
6764
6765 static int bnx2x_init_port(struct bnx2x *bp)
6766 {
6767         int port = BP_PORT(bp);
6768         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6769         u32 low, high;
6770         u32 val;
6771
6772         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6773
6774         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6775
6776         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6777         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6778
6779         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6780         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6781         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6782         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6783
6784 #ifdef BCM_CNIC
6785         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6786
6787         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6788         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6789         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6790 #endif
6791
6792         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6793
6794         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6795         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6796                 /* no pause for emulation and FPGA */
6797                 low = 0;
6798                 high = 513;
6799         } else {
6800                 if (IS_E1HMF(bp))
6801                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6802                 else if (bp->dev->mtu > 4096) {
6803                         if (bp->flags & ONE_PORT_FLAG)
6804                                 low = 160;
6805                         else {
6806                                 val = bp->dev->mtu;
6807                                 /* (24*1024 + val*4)/256 */
6808                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6809                         }
6810                 } else
6811                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6812                 high = low + 56;        /* 14*1024/256 */
6813         }
6814         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6815         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6816
6817
6818         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6819
6820         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6821         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6822         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6823         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6824
6825         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6826         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6827         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6828         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6829
6830         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6831         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6832
6833         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6834
6835         /* configure PBF to work without PAUSE mtu 9000 */
6836         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6837
6838         /* update threshold */
6839         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6840         /* update init credit */
6841         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6842
6843         /* probe changes */
6844         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6845         msleep(5);
6846         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6847
6848 #ifdef BCM_CNIC
6849         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6850 #endif
6851         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6852         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6853
6854         if (CHIP_IS_E1(bp)) {
6855                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6856                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6857         }
6858         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6859
6860         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6861         /* init aeu_mask_attn_func_0/1:
6862          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6863          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6864          *             bits 4-7 are used for "per vn group attention" */
6865         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6866                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6867
6868         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6869         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6870         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6871         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6872         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6873
6874         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6875
6876         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6877
6878         if (CHIP_IS_E1H(bp)) {
6879                 /* 0x2 disable e1hov, 0x1 enable */
6880                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6881                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6882
6883                 {
6884                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6885                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6886                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6887                 }
6888         }
6889
6890         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6891         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6892
6893         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6894         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6895                 {
6896                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6897
6898                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6899                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6900
6901                 /* The GPIO should be swapped if the swap register is
6902                    set and active */
6903                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6904                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6905
6906                 /* Select function upon port-swap configuration */
6907                 if (port == 0) {
6908                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6909                         aeu_gpio_mask = (swap_val && swap_override) ?
6910                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6911                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6912                 } else {
6913                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6914                         aeu_gpio_mask = (swap_val && swap_override) ?
6915                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6916                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6917                 }
6918                 val = REG_RD(bp, offset);
6919                 /* add GPIO3 to group */
6920                 val |= aeu_gpio_mask;
6921                 REG_WR(bp, offset, val);
6922                 }
6923                 break;
6924
6925         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6926         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6927                 /* add SPIO 5 to group 0 */
6928                 {
6929                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6930                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6931                 val = REG_RD(bp, reg_addr);
6932                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6933                 REG_WR(bp, reg_addr, val);
6934                 }
6935                 break;
6936
6937         default:
6938                 break;
6939         }
6940
6941         bnx2x__link_reset(bp);
6942
6943         return 0;
6944 }
6945
6946 #define ILT_PER_FUNC            (768/2)
6947 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6948 /* the phys address is shifted right 12 bits and has an added
6949    1=valid bit added to the 53rd bit
6950    then since this is a wide register(TM)
6951    we split it into two 32 bit writes
6952  */
6953 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6954 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6955 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6956 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6957
6958 #ifdef BCM_CNIC
6959 #define CNIC_ILT_LINES          127
6960 #define CNIC_CTX_PER_ILT        16
6961 #else
6962 #define CNIC_ILT_LINES          0
6963 #endif
6964
6965 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6966 {
6967         int reg;
6968
6969         if (CHIP_IS_E1H(bp))
6970                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6971         else /* E1 */
6972                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6973
6974         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6975 }
6976
6977 static int bnx2x_init_func(struct bnx2x *bp)
6978 {
6979         int port = BP_PORT(bp);
6980         int func = BP_FUNC(bp);
6981         u32 addr, val;
6982         int i;
6983
6984         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6985
6986         /* set MSI reconfigure capability */
6987         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6988         val = REG_RD(bp, addr);
6989         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6990         REG_WR(bp, addr, val);
6991
6992         i = FUNC_ILT_BASE(func);
6993
6994         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6995         if (CHIP_IS_E1H(bp)) {
6996                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6997                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6998         } else /* E1 */
6999                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7000                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7001
7002 #ifdef BCM_CNIC
7003         i += 1 + CNIC_ILT_LINES;
7004         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7005         if (CHIP_IS_E1(bp))
7006                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7007         else {
7008                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7009                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7010         }
7011
7012         i++;
7013         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7014         if (CHIP_IS_E1(bp))
7015                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7016         else {
7017                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7018                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7019         }
7020
7021         i++;
7022         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7023         if (CHIP_IS_E1(bp))
7024                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7025         else {
7026                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7027                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7028         }
7029
7030         /* tell the searcher where the T2 table is */
7031         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7032
7033         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7034                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7035
7036         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7037                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7038                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7039
7040         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7041 #endif
7042
7043         if (CHIP_IS_E1H(bp)) {
7044                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7045                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7046                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7047                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7048                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7049                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7050                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7051                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7052                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7053
7054                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7055                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7056         }
7057
7058         /* HC init per function */
7059         if (CHIP_IS_E1H(bp)) {
7060                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7061
7062                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7063                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7064         }
7065         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7066
7067         /* Reset PCIE errors for debug */
7068         REG_WR(bp, 0x2114, 0xffffffff);
7069         REG_WR(bp, 0x2120, 0xffffffff);
7070
7071         return 0;
7072 }
7073
7074 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7075 {
7076         int i, rc = 0;
7077
7078         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7079            BP_FUNC(bp), load_code);
7080
7081         bp->dmae_ready = 0;
7082         mutex_init(&bp->dmae_mutex);
7083         rc = bnx2x_gunzip_init(bp);
7084         if (rc)
7085                 return rc;
7086
7087         switch (load_code) {
7088         case FW_MSG_CODE_DRV_LOAD_COMMON:
7089                 rc = bnx2x_init_common(bp);
7090                 if (rc)
7091                         goto init_hw_err;
7092                 /* no break */
7093
7094         case FW_MSG_CODE_DRV_LOAD_PORT:
7095                 bp->dmae_ready = 1;
7096                 rc = bnx2x_init_port(bp);
7097                 if (rc)
7098                         goto init_hw_err;
7099                 /* no break */
7100
7101         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7102                 bp->dmae_ready = 1;
7103                 rc = bnx2x_init_func(bp);
7104                 if (rc)
7105                         goto init_hw_err;
7106                 break;
7107
7108         default:
7109                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7110                 break;
7111         }
7112
7113         if (!BP_NOMCP(bp)) {
7114                 int func = BP_FUNC(bp);
7115
7116                 bp->fw_drv_pulse_wr_seq =
7117                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7118                                  DRV_PULSE_SEQ_MASK);
7119                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7120         }
7121
7122         /* this needs to be done before gunzip end */
7123         bnx2x_zero_def_sb(bp);
7124         for_each_queue(bp, i)
7125                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7126 #ifdef BCM_CNIC
7127         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7128 #endif
7129
7130 init_hw_err:
7131         bnx2x_gunzip_end(bp);
7132
7133         return rc;
7134 }
7135
7136 static void bnx2x_free_mem(struct bnx2x *bp)
7137 {
7138
7139 #define BNX2X_PCI_FREE(x, y, size) \
7140         do { \
7141                 if (x) { \
7142                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7143                         x = NULL; \
7144                         y = 0; \
7145                 } \
7146         } while (0)
7147
7148 #define BNX2X_FREE(x) \
7149         do { \
7150                 if (x) { \
7151                         vfree(x); \
7152                         x = NULL; \
7153                 } \
7154         } while (0)
7155
7156         int i;
7157
7158         /* fastpath */
7159         /* Common */
7160         for_each_queue(bp, i) {
7161
7162                 /* status blocks */
7163                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7164                                bnx2x_fp(bp, i, status_blk_mapping),
7165                                sizeof(struct host_status_block));
7166         }
7167         /* Rx */
7168         for_each_queue(bp, i) {
7169
7170                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7171                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7172                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7173                                bnx2x_fp(bp, i, rx_desc_mapping),
7174                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7175
7176                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7177                                bnx2x_fp(bp, i, rx_comp_mapping),
7178                                sizeof(struct eth_fast_path_rx_cqe) *
7179                                NUM_RCQ_BD);
7180
7181                 /* SGE ring */
7182                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7183                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7184                                bnx2x_fp(bp, i, rx_sge_mapping),
7185                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7186         }
7187         /* Tx */
7188         for_each_queue(bp, i) {
7189
7190                 /* fastpath tx rings: tx_buf tx_desc */
7191                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7192                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7193                                bnx2x_fp(bp, i, tx_desc_mapping),
7194                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7195         }
7196         /* end of fastpath */
7197
7198         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7199                        sizeof(struct host_def_status_block));
7200
7201         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7202                        sizeof(struct bnx2x_slowpath));
7203
7204 #ifdef BCM_CNIC
7205         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7206         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7207         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7208         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7209         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7210                        sizeof(struct host_status_block));
7211 #endif
7212         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7213
7214 #undef BNX2X_PCI_FREE
7215 #undef BNX2X_KFREE
7216 }
7217
7218 static int bnx2x_alloc_mem(struct bnx2x *bp)
7219 {
7220
7221 #define BNX2X_PCI_ALLOC(x, y, size) \
7222         do { \
7223                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7224                 if (x == NULL) \
7225                         goto alloc_mem_err; \
7226                 memset(x, 0, size); \
7227         } while (0)
7228
7229 #define BNX2X_ALLOC(x, size) \
7230         do { \
7231                 x = vmalloc(size); \
7232                 if (x == NULL) \
7233                         goto alloc_mem_err; \
7234                 memset(x, 0, size); \
7235         } while (0)
7236
7237         int i;
7238
7239         /* fastpath */
7240         /* Common */
7241         for_each_queue(bp, i) {
7242                 bnx2x_fp(bp, i, bp) = bp;
7243
7244                 /* status blocks */
7245                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7246                                 &bnx2x_fp(bp, i, status_blk_mapping),
7247                                 sizeof(struct host_status_block));
7248         }
7249         /* Rx */
7250         for_each_queue(bp, i) {
7251
7252                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7253                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7254                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7255                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7256                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7257                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7258
7259                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7260                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7261                                 sizeof(struct eth_fast_path_rx_cqe) *
7262                                 NUM_RCQ_BD);
7263
7264                 /* SGE ring */
7265                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7266                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7267                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7268                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7269                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7270         }
7271         /* Tx */
7272         for_each_queue(bp, i) {
7273
7274                 /* fastpath tx rings: tx_buf tx_desc */
7275                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7276                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7277                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7278                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7279                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7280         }
7281         /* end of fastpath */
7282
7283         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7284                         sizeof(struct host_def_status_block));
7285
7286         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7287                         sizeof(struct bnx2x_slowpath));
7288
7289 #ifdef BCM_CNIC
7290         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7291
7292         /* allocate searcher T2 table
7293            we allocate 1/4 of alloc num for T2
7294           (which is not entered into the ILT) */
7295         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7296
7297         /* Initialize T2 (for 1024 connections) */
7298         for (i = 0; i < 16*1024; i += 64)
7299                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7300
7301         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7302         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7303
7304         /* QM queues (128*MAX_CONN) */
7305         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7306
7307         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7308                         sizeof(struct host_status_block));
7309 #endif
7310
7311         /* Slow path ring */
7312         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7313
7314         return 0;
7315
7316 alloc_mem_err:
7317         bnx2x_free_mem(bp);
7318         return -ENOMEM;
7319
7320 #undef BNX2X_PCI_ALLOC
7321 #undef BNX2X_ALLOC
7322 }
7323
7324 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7325 {
7326         int i;
7327
7328         for_each_queue(bp, i) {
7329                 struct bnx2x_fastpath *fp = &bp->fp[i];
7330
7331                 u16 bd_cons = fp->tx_bd_cons;
7332                 u16 sw_prod = fp->tx_pkt_prod;
7333                 u16 sw_cons = fp->tx_pkt_cons;
7334
7335                 while (sw_cons != sw_prod) {
7336                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7337                         sw_cons++;
7338                 }
7339         }
7340 }
7341
7342 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7343 {
7344         int i, j;
7345
7346         for_each_queue(bp, j) {
7347                 struct bnx2x_fastpath *fp = &bp->fp[j];
7348
7349                 for (i = 0; i < NUM_RX_BD; i++) {
7350                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7351                         struct sk_buff *skb = rx_buf->skb;
7352
7353                         if (skb == NULL)
7354                                 continue;
7355
7356                         dma_unmap_single(&bp->pdev->dev,
7357                                          dma_unmap_addr(rx_buf, mapping),
7358                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7359
7360                         rx_buf->skb = NULL;
7361                         dev_kfree_skb(skb);
7362                 }
7363                 if (!fp->disable_tpa)
7364                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7365                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7366                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7367         }
7368 }
7369
7370 static void bnx2x_free_skbs(struct bnx2x *bp)
7371 {
7372         bnx2x_free_tx_skbs(bp);
7373         bnx2x_free_rx_skbs(bp);
7374 }
7375
7376 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7377 {
7378         int i, offset = 1;
7379
7380         free_irq(bp->msix_table[0].vector, bp->dev);
7381         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7382            bp->msix_table[0].vector);
7383
7384 #ifdef BCM_CNIC
7385         offset++;
7386 #endif
7387         for_each_queue(bp, i) {
7388                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7389                    "state %x\n", i, bp->msix_table[i + offset].vector,
7390                    bnx2x_fp(bp, i, state));
7391
7392                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7393         }
7394 }
7395
7396 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7397 {
7398         if (bp->flags & USING_MSIX_FLAG) {
7399                 if (!disable_only)
7400                         bnx2x_free_msix_irqs(bp);
7401                 pci_disable_msix(bp->pdev);
7402                 bp->flags &= ~USING_MSIX_FLAG;
7403
7404         } else if (bp->flags & USING_MSI_FLAG) {
7405                 if (!disable_only)
7406                         free_irq(bp->pdev->irq, bp->dev);
7407                 pci_disable_msi(bp->pdev);
7408                 bp->flags &= ~USING_MSI_FLAG;
7409
7410         } else if (!disable_only)
7411                 free_irq(bp->pdev->irq, bp->dev);
7412 }
7413
7414 static int bnx2x_enable_msix(struct bnx2x *bp)
7415 {
7416         int i, rc, offset = 1;
7417         int igu_vec = 0;
7418
7419         bp->msix_table[0].entry = igu_vec;
7420         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7421
7422 #ifdef BCM_CNIC
7423         igu_vec = BP_L_ID(bp) + offset;
7424         bp->msix_table[1].entry = igu_vec;
7425         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7426         offset++;
7427 #endif
7428         for_each_queue(bp, i) {
7429                 igu_vec = BP_L_ID(bp) + offset + i;
7430                 bp->msix_table[i + offset].entry = igu_vec;
7431                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7432                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7433         }
7434
7435         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7436                              BNX2X_NUM_QUEUES(bp) + offset);
7437
7438         /*
7439          * reconfigure number of tx/rx queues according to available
7440          * MSI-X vectors
7441          */
7442         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7443                 /* vectors available for FP */
7444                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7445
7446                 DP(NETIF_MSG_IFUP,
7447                    "Trying to use less MSI-X vectors: %d\n", rc);
7448
7449                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7450
7451                 if (rc) {
7452                         DP(NETIF_MSG_IFUP,
7453                            "MSI-X is not attainable  rc %d\n", rc);
7454                         return rc;
7455                 }
7456
7457                 bp->num_queues = min(bp->num_queues, fp_vec);
7458
7459                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7460                                   bp->num_queues);
7461         } else if (rc) {
7462                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7463                 return rc;
7464         }
7465
7466         bp->flags |= USING_MSIX_FLAG;
7467
7468         return 0;
7469 }
7470
7471 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7472 {
7473         int i, rc, offset = 1;
7474
7475         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7476                          bp->dev->name, bp->dev);
7477         if (rc) {
7478                 BNX2X_ERR("request sp irq failed\n");
7479                 return -EBUSY;
7480         }
7481
7482 #ifdef BCM_CNIC
7483         offset++;
7484 #endif
7485         for_each_queue(bp, i) {
7486                 struct bnx2x_fastpath *fp = &bp->fp[i];
7487                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7488                          bp->dev->name, i);
7489
7490                 rc = request_irq(bp->msix_table[i + offset].vector,
7491                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7492                 if (rc) {
7493                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7494                         bnx2x_free_msix_irqs(bp);
7495                         return -EBUSY;
7496                 }
7497
7498                 fp->state = BNX2X_FP_STATE_IRQ;
7499         }
7500
7501         i = BNX2X_NUM_QUEUES(bp);
7502         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7503                " ... fp[%d] %d\n",
7504                bp->msix_table[0].vector,
7505                0, bp->msix_table[offset].vector,
7506                i - 1, bp->msix_table[offset + i - 1].vector);
7507
7508         return 0;
7509 }
7510
7511 static int bnx2x_enable_msi(struct bnx2x *bp)
7512 {
7513         int rc;
7514
7515         rc = pci_enable_msi(bp->pdev);
7516         if (rc) {
7517                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7518                 return -1;
7519         }
7520         bp->flags |= USING_MSI_FLAG;
7521
7522         return 0;
7523 }
7524
7525 static int bnx2x_req_irq(struct bnx2x *bp)
7526 {
7527         unsigned long flags;
7528         int rc;
7529
7530         if (bp->flags & USING_MSI_FLAG)
7531                 flags = 0;
7532         else
7533                 flags = IRQF_SHARED;
7534
7535         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7536                          bp->dev->name, bp->dev);
7537         if (!rc)
7538                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7539
7540         return rc;
7541 }
7542
7543 static void bnx2x_napi_enable(struct bnx2x *bp)
7544 {
7545         int i;
7546
7547         for_each_queue(bp, i)
7548                 napi_enable(&bnx2x_fp(bp, i, napi));
7549 }
7550
7551 static void bnx2x_napi_disable(struct bnx2x *bp)
7552 {
7553         int i;
7554
7555         for_each_queue(bp, i)
7556                 napi_disable(&bnx2x_fp(bp, i, napi));
7557 }
7558
7559 static void bnx2x_netif_start(struct bnx2x *bp)
7560 {
7561         int intr_sem;
7562
7563         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7564         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7565
7566         if (intr_sem) {
7567                 if (netif_running(bp->dev)) {
7568                         bnx2x_napi_enable(bp);
7569                         bnx2x_int_enable(bp);
7570                         if (bp->state == BNX2X_STATE_OPEN)
7571                                 netif_tx_wake_all_queues(bp->dev);
7572                 }
7573         }
7574 }
7575
7576 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7577 {
7578         bnx2x_int_disable_sync(bp, disable_hw);
7579         bnx2x_napi_disable(bp);
7580         netif_tx_disable(bp->dev);
7581 }
7582
7583 /*
7584  * Init service functions
7585  */
7586
7587 /**
7588  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7589  *
7590  * @param bp driver descriptor
7591  * @param set set or clear an entry (1 or 0)
7592  * @param mac pointer to a buffer containing a MAC
7593  * @param cl_bit_vec bit vector of clients to register a MAC for
7594  * @param cam_offset offset in a CAM to use
7595  * @param with_bcast set broadcast MAC as well
7596  */
7597 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7598                                       u32 cl_bit_vec, u8 cam_offset,
7599                                       u8 with_bcast)
7600 {
7601         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7602         int port = BP_PORT(bp);
7603
7604         /* CAM allocation
7605          * unicasts 0-31:port0 32-63:port1
7606          * multicast 64-127:port0 128-191:port1
7607          */
7608         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7609         config->hdr.offset = cam_offset;
7610         config->hdr.client_id = 0xff;
7611         config->hdr.reserved1 = 0;
7612
7613         /* primary MAC */
7614         config->config_table[0].cam_entry.msb_mac_addr =
7615                                         swab16(*(u16 *)&mac[0]);
7616         config->config_table[0].cam_entry.middle_mac_addr =
7617                                         swab16(*(u16 *)&mac[2]);
7618         config->config_table[0].cam_entry.lsb_mac_addr =
7619                                         swab16(*(u16 *)&mac[4]);
7620         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7621         if (set)
7622                 config->config_table[0].target_table_entry.flags = 0;
7623         else
7624                 CAM_INVALIDATE(config->config_table[0]);
7625         config->config_table[0].target_table_entry.clients_bit_vector =
7626                                                 cpu_to_le32(cl_bit_vec);
7627         config->config_table[0].target_table_entry.vlan_id = 0;
7628
7629         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7630            (set ? "setting" : "clearing"),
7631            config->config_table[0].cam_entry.msb_mac_addr,
7632            config->config_table[0].cam_entry.middle_mac_addr,
7633            config->config_table[0].cam_entry.lsb_mac_addr);
7634
7635         /* broadcast */
7636         if (with_bcast) {
7637                 config->config_table[1].cam_entry.msb_mac_addr =
7638                         cpu_to_le16(0xffff);
7639                 config->config_table[1].cam_entry.middle_mac_addr =
7640                         cpu_to_le16(0xffff);
7641                 config->config_table[1].cam_entry.lsb_mac_addr =
7642                         cpu_to_le16(0xffff);
7643                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7644                 if (set)
7645                         config->config_table[1].target_table_entry.flags =
7646                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7647                 else
7648                         CAM_INVALIDATE(config->config_table[1]);
7649                 config->config_table[1].target_table_entry.clients_bit_vector =
7650                                                         cpu_to_le32(cl_bit_vec);
7651                 config->config_table[1].target_table_entry.vlan_id = 0;
7652         }
7653
7654         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7655                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7656                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7657 }
7658
7659 /**
7660  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7661  *
7662  * @param bp driver descriptor
7663  * @param set set or clear an entry (1 or 0)
7664  * @param mac pointer to a buffer containing a MAC
7665  * @param cl_bit_vec bit vector of clients to register a MAC for
7666  * @param cam_offset offset in a CAM to use
7667  */
7668 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7669                                        u32 cl_bit_vec, u8 cam_offset)
7670 {
7671         struct mac_configuration_cmd_e1h *config =
7672                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7673
7674         config->hdr.length = 1;
7675         config->hdr.offset = cam_offset;
7676         config->hdr.client_id = 0xff;
7677         config->hdr.reserved1 = 0;
7678
7679         /* primary MAC */
7680         config->config_table[0].msb_mac_addr =
7681                                         swab16(*(u16 *)&mac[0]);
7682         config->config_table[0].middle_mac_addr =
7683                                         swab16(*(u16 *)&mac[2]);
7684         config->config_table[0].lsb_mac_addr =
7685                                         swab16(*(u16 *)&mac[4]);
7686         config->config_table[0].clients_bit_vector =
7687                                         cpu_to_le32(cl_bit_vec);
7688         config->config_table[0].vlan_id = 0;
7689         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7690         if (set)
7691                 config->config_table[0].flags = BP_PORT(bp);
7692         else
7693                 config->config_table[0].flags =
7694                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7695
7696         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7697            (set ? "setting" : "clearing"),
7698            config->config_table[0].msb_mac_addr,
7699            config->config_table[0].middle_mac_addr,
7700            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7701
7702         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7703                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7704                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7705 }
7706
7707 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7708                              int *state_p, int poll)
7709 {
7710         /* can take a while if any port is running */
7711         int cnt = 5000;
7712
7713         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7714            poll ? "polling" : "waiting", state, idx);
7715
7716         might_sleep();
7717         while (cnt--) {
7718                 if (poll) {
7719                         bnx2x_rx_int(bp->fp, 10);
7720                         /* if index is different from 0
7721                          * the reply for some commands will
7722                          * be on the non default queue
7723                          */
7724                         if (idx)
7725                                 bnx2x_rx_int(&bp->fp[idx], 10);
7726                 }
7727
7728                 mb(); /* state is changed by bnx2x_sp_event() */
7729                 if (*state_p == state) {
7730 #ifdef BNX2X_STOP_ON_ERROR
7731                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7732 #endif
7733                         return 0;
7734                 }
7735
7736                 msleep(1);
7737
7738                 if (bp->panic)
7739                         return -EIO;
7740         }
7741
7742         /* timeout! */
7743         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7744                   poll ? "polling" : "waiting", state, idx);
7745 #ifdef BNX2X_STOP_ON_ERROR
7746         bnx2x_panic();
7747 #endif
7748
7749         return -EBUSY;
7750 }
7751
7752 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7753 {
7754         bp->set_mac_pending++;
7755         smp_wmb();
7756
7757         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7758                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7759
7760         /* Wait for a completion */
7761         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7762 }
7763
7764 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7765 {
7766         bp->set_mac_pending++;
7767         smp_wmb();
7768
7769         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7770                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7771                                   1);
7772
7773         /* Wait for a completion */
7774         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7775 }
7776
7777 #ifdef BCM_CNIC
7778 /**
7779  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7780  * MAC(s). This function will wait until the ramdord completion
7781  * returns.
7782  *
7783  * @param bp driver handle
7784  * @param set set or clear the CAM entry
7785  *
7786  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7787  */
7788 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7789 {
7790         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7791
7792         bp->set_mac_pending++;
7793         smp_wmb();
7794
7795         /* Send a SET_MAC ramrod */
7796         if (CHIP_IS_E1(bp))
7797                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7798                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7799                                   1);
7800         else
7801                 /* CAM allocation for E1H
7802                 * unicasts: by func number
7803                 * multicast: 20+FUNC*20, 20 each
7804                 */
7805                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7806                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7807
7808         /* Wait for a completion when setting */
7809         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7810
7811         return 0;
7812 }
7813 #endif
7814
7815 static int bnx2x_setup_leading(struct bnx2x *bp)
7816 {
7817         int rc;
7818
7819         /* reset IGU state */
7820         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7821
7822         /* SETUP ramrod */
7823         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7824
7825         /* Wait for completion */
7826         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7827
7828         return rc;
7829 }
7830
7831 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7832 {
7833         struct bnx2x_fastpath *fp = &bp->fp[index];
7834
7835         /* reset IGU state */
7836         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7837
7838         /* SETUP ramrod */
7839         fp->state = BNX2X_FP_STATE_OPENING;
7840         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7841                       fp->cl_id, 0);
7842
7843         /* Wait for completion */
7844         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7845                                  &(fp->state), 0);
7846 }
7847
7848 static int bnx2x_poll(struct napi_struct *napi, int budget);
7849
7850 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7851 {
7852
7853         switch (bp->multi_mode) {
7854         case ETH_RSS_MODE_DISABLED:
7855                 bp->num_queues = 1;
7856                 break;
7857
7858         case ETH_RSS_MODE_REGULAR:
7859                 if (num_queues)
7860                         bp->num_queues = min_t(u32, num_queues,
7861                                                   BNX2X_MAX_QUEUES(bp));
7862                 else
7863                         bp->num_queues = min_t(u32, num_online_cpus(),
7864                                                   BNX2X_MAX_QUEUES(bp));
7865                 break;
7866
7867
7868         default:
7869                 bp->num_queues = 1;
7870                 break;
7871         }
7872 }
7873
7874 static int bnx2x_set_num_queues(struct bnx2x *bp)
7875 {
7876         int rc = 0;
7877
7878         switch (int_mode) {
7879         case INT_MODE_INTx:
7880         case INT_MODE_MSI:
7881                 bp->num_queues = 1;
7882                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7883                 break;
7884         default:
7885                 /* Set number of queues according to bp->multi_mode value */
7886                 bnx2x_set_num_queues_msix(bp);
7887
7888                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7889                    bp->num_queues);
7890
7891                 /* if we can't use MSI-X we only need one fp,
7892                  * so try to enable MSI-X with the requested number of fp's
7893                  * and fallback to MSI or legacy INTx with one fp
7894                  */
7895                 rc = bnx2x_enable_msix(bp);
7896                 if (rc)
7897                         /* failed to enable MSI-X */
7898                         bp->num_queues = 1;
7899                 break;
7900         }
7901         bp->dev->real_num_tx_queues = bp->num_queues;
7902         return rc;
7903 }
7904
7905 #ifdef BCM_CNIC
7906 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7907 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7908 #endif
7909
7910 /* must be called with rtnl_lock */
7911 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7912 {
7913         u32 load_code;
7914         int i, rc;
7915
7916 #ifdef BNX2X_STOP_ON_ERROR
7917         if (unlikely(bp->panic))
7918                 return -EPERM;
7919 #endif
7920
7921         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7922
7923         rc = bnx2x_set_num_queues(bp);
7924
7925         if (bnx2x_alloc_mem(bp)) {
7926                 bnx2x_free_irq(bp, true);
7927                 return -ENOMEM;
7928         }
7929
7930         for_each_queue(bp, i)
7931                 bnx2x_fp(bp, i, disable_tpa) =
7932                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7933
7934         for_each_queue(bp, i)
7935                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7936                                bnx2x_poll, 128);
7937
7938         bnx2x_napi_enable(bp);
7939
7940         if (bp->flags & USING_MSIX_FLAG) {
7941                 rc = bnx2x_req_msix_irqs(bp);
7942                 if (rc) {
7943                         bnx2x_free_irq(bp, true);
7944                         goto load_error1;
7945                 }
7946         } else {
7947                 /* Fall to INTx if failed to enable MSI-X due to lack of
7948                    memory (in bnx2x_set_num_queues()) */
7949                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7950                         bnx2x_enable_msi(bp);
7951                 bnx2x_ack_int(bp);
7952                 rc = bnx2x_req_irq(bp);
7953                 if (rc) {
7954                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7955                         bnx2x_free_irq(bp, true);
7956                         goto load_error1;
7957                 }
7958                 if (bp->flags & USING_MSI_FLAG) {
7959                         bp->dev->irq = bp->pdev->irq;
7960                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7961                                     bp->pdev->irq);
7962                 }
7963         }
7964
7965         /* Send LOAD_REQUEST command to MCP
7966            Returns the type of LOAD command:
7967            if it is the first port to be initialized
7968            common blocks should be initialized, otherwise - not
7969         */
7970         if (!BP_NOMCP(bp)) {
7971                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7972                 if (!load_code) {
7973                         BNX2X_ERR("MCP response failure, aborting\n");
7974                         rc = -EBUSY;
7975                         goto load_error2;
7976                 }
7977                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7978                         rc = -EBUSY; /* other port in diagnostic mode */
7979                         goto load_error2;
7980                 }
7981
7982         } else {
7983                 int port = BP_PORT(bp);
7984
7985                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7986                    load_count[0], load_count[1], load_count[2]);
7987                 load_count[0]++;
7988                 load_count[1 + port]++;
7989                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7990                    load_count[0], load_count[1], load_count[2]);
7991                 if (load_count[0] == 1)
7992                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7993                 else if (load_count[1 + port] == 1)
7994                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7995                 else
7996                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7997         }
7998
7999         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8000             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8001                 bp->port.pmf = 1;
8002         else
8003                 bp->port.pmf = 0;
8004         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8005
8006         /* Initialize HW */
8007         rc = bnx2x_init_hw(bp, load_code);
8008         if (rc) {
8009                 BNX2X_ERR("HW init failed, aborting\n");
8010                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8011                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8012                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8013                 goto load_error2;
8014         }
8015
8016         /* Setup NIC internals and enable interrupts */
8017         bnx2x_nic_init(bp, load_code);
8018
8019         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8020             (bp->common.shmem2_base))
8021                 SHMEM2_WR(bp, dcc_support,
8022                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8023                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8024
8025         /* Send LOAD_DONE command to MCP */
8026         if (!BP_NOMCP(bp)) {
8027                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8028                 if (!load_code) {
8029                         BNX2X_ERR("MCP response failure, aborting\n");
8030                         rc = -EBUSY;
8031                         goto load_error3;
8032                 }
8033         }
8034
8035         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8036
8037         rc = bnx2x_setup_leading(bp);
8038         if (rc) {
8039                 BNX2X_ERR("Setup leading failed!\n");
8040 #ifndef BNX2X_STOP_ON_ERROR
8041                 goto load_error3;
8042 #else
8043                 bp->panic = 1;
8044                 return -EBUSY;
8045 #endif
8046         }
8047
8048         if (CHIP_IS_E1H(bp))
8049                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8050                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8051                         bp->flags |= MF_FUNC_DIS;
8052                 }
8053
8054         if (bp->state == BNX2X_STATE_OPEN) {
8055 #ifdef BCM_CNIC
8056                 /* Enable Timer scan */
8057                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8058 #endif
8059                 for_each_nondefault_queue(bp, i) {
8060                         rc = bnx2x_setup_multi(bp, i);
8061                         if (rc)
8062 #ifdef BCM_CNIC
8063                                 goto load_error4;
8064 #else
8065                                 goto load_error3;
8066 #endif
8067                 }
8068
8069                 if (CHIP_IS_E1(bp))
8070                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8071                 else
8072                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8073 #ifdef BCM_CNIC
8074                 /* Set iSCSI L2 MAC */
8075                 mutex_lock(&bp->cnic_mutex);
8076                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8077                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8078                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8079                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8080                                       CNIC_SB_ID(bp));
8081                 }
8082                 mutex_unlock(&bp->cnic_mutex);
8083 #endif
8084         }
8085
8086         if (bp->port.pmf)
8087                 bnx2x_initial_phy_init(bp, load_mode);
8088
8089         /* Start fast path */
8090         switch (load_mode) {
8091         case LOAD_NORMAL:
8092                 if (bp->state == BNX2X_STATE_OPEN) {
8093                         /* Tx queue should be only reenabled */
8094                         netif_tx_wake_all_queues(bp->dev);
8095                 }
8096                 /* Initialize the receive filter. */
8097                 bnx2x_set_rx_mode(bp->dev);
8098                 break;
8099
8100         case LOAD_OPEN:
8101                 netif_tx_start_all_queues(bp->dev);
8102                 if (bp->state != BNX2X_STATE_OPEN)
8103                         netif_tx_disable(bp->dev);
8104                 /* Initialize the receive filter. */
8105                 bnx2x_set_rx_mode(bp->dev);
8106                 break;
8107
8108         case LOAD_DIAG:
8109                 /* Initialize the receive filter. */
8110                 bnx2x_set_rx_mode(bp->dev);
8111                 bp->state = BNX2X_STATE_DIAG;
8112                 break;
8113
8114         default:
8115                 break;
8116         }
8117
8118         if (!bp->port.pmf)
8119                 bnx2x__link_status_update(bp);
8120
8121         /* start the timer */
8122         mod_timer(&bp->timer, jiffies + bp->current_interval);
8123
8124 #ifdef BCM_CNIC
8125         bnx2x_setup_cnic_irq_info(bp);
8126         if (bp->state == BNX2X_STATE_OPEN)
8127                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8128 #endif
8129         bnx2x_inc_load_cnt(bp);
8130
8131         return 0;
8132
8133 #ifdef BCM_CNIC
8134 load_error4:
8135         /* Disable Timer scan */
8136         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8137 #endif
8138 load_error3:
8139         bnx2x_int_disable_sync(bp, 1);
8140         if (!BP_NOMCP(bp)) {
8141                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8142                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143         }
8144         bp->port.pmf = 0;
8145         /* Free SKBs, SGEs, TPA pool and driver internals */
8146         bnx2x_free_skbs(bp);
8147         for_each_queue(bp, i)
8148                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8149 load_error2:
8150         /* Release IRQs */
8151         bnx2x_free_irq(bp, false);
8152 load_error1:
8153         bnx2x_napi_disable(bp);
8154         for_each_queue(bp, i)
8155                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8156         bnx2x_free_mem(bp);
8157
8158         return rc;
8159 }
8160
8161 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8162 {
8163         struct bnx2x_fastpath *fp = &bp->fp[index];
8164         int rc;
8165
8166         /* halt the connection */
8167         fp->state = BNX2X_FP_STATE_HALTING;
8168         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8169
8170         /* Wait for completion */
8171         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8172                                &(fp->state), 1);
8173         if (rc) /* timeout */
8174                 return rc;
8175
8176         /* delete cfc entry */
8177         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8178
8179         /* Wait for completion */
8180         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8181                                &(fp->state), 1);
8182         return rc;
8183 }
8184
8185 static int bnx2x_stop_leading(struct bnx2x *bp)
8186 {
8187         __le16 dsb_sp_prod_idx;
8188         /* if the other port is handling traffic,
8189            this can take a lot of time */
8190         int cnt = 500;
8191         int rc;
8192
8193         might_sleep();
8194
8195         /* Send HALT ramrod */
8196         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8197         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8198
8199         /* Wait for completion */
8200         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8201                                &(bp->fp[0].state), 1);
8202         if (rc) /* timeout */
8203                 return rc;
8204
8205         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8206
8207         /* Send PORT_DELETE ramrod */
8208         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8209
8210         /* Wait for completion to arrive on default status block
8211            we are going to reset the chip anyway
8212            so there is not much to do if this times out
8213          */
8214         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8215                 if (!cnt) {
8216                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8217                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8218                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8219 #ifdef BNX2X_STOP_ON_ERROR
8220                         bnx2x_panic();
8221 #endif
8222                         rc = -EBUSY;
8223                         break;
8224                 }
8225                 cnt--;
8226                 msleep(1);
8227                 rmb(); /* Refresh the dsb_sp_prod */
8228         }
8229         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8230         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8231
8232         return rc;
8233 }
8234
8235 static void bnx2x_reset_func(struct bnx2x *bp)
8236 {
8237         int port = BP_PORT(bp);
8238         int func = BP_FUNC(bp);
8239         int base, i;
8240
8241         /* Configure IGU */
8242         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8243         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8244
8245 #ifdef BCM_CNIC
8246         /* Disable Timer scan */
8247         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8248         /*
8249          * Wait for at least 10ms and up to 2 second for the timers scan to
8250          * complete
8251          */
8252         for (i = 0; i < 200; i++) {
8253                 msleep(10);
8254                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8255                         break;
8256         }
8257 #endif
8258         /* Clear ILT */
8259         base = FUNC_ILT_BASE(func);
8260         for (i = base; i < base + ILT_PER_FUNC; i++)
8261                 bnx2x_ilt_wr(bp, i, 0);
8262 }
8263
8264 static void bnx2x_reset_port(struct bnx2x *bp)
8265 {
8266         int port = BP_PORT(bp);
8267         u32 val;
8268
8269         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8270
8271         /* Do not rcv packets to BRB */
8272         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8273         /* Do not direct rcv packets that are not for MCP to the BRB */
8274         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8275                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8276
8277         /* Configure AEU */
8278         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8279
8280         msleep(100);
8281         /* Check for BRB port occupancy */
8282         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8283         if (val)
8284                 DP(NETIF_MSG_IFDOWN,
8285                    "BRB1 is not empty  %d blocks are occupied\n", val);
8286
8287         /* TODO: Close Doorbell port? */
8288 }
8289
8290 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8291 {
8292         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8293            BP_FUNC(bp), reset_code);
8294
8295         switch (reset_code) {
8296         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8297                 bnx2x_reset_port(bp);
8298                 bnx2x_reset_func(bp);
8299                 bnx2x_reset_common(bp);
8300                 break;
8301
8302         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8303                 bnx2x_reset_port(bp);
8304                 bnx2x_reset_func(bp);
8305                 break;
8306
8307         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8308                 bnx2x_reset_func(bp);
8309                 break;
8310
8311         default:
8312                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8313                 break;
8314         }
8315 }
8316
8317 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8318 {
8319         int port = BP_PORT(bp);
8320         u32 reset_code = 0;
8321         int i, cnt, rc;
8322
8323         /* Wait until tx fastpath tasks complete */
8324         for_each_queue(bp, i) {
8325                 struct bnx2x_fastpath *fp = &bp->fp[i];
8326
8327                 cnt = 1000;
8328                 while (bnx2x_has_tx_work_unload(fp)) {
8329
8330                         bnx2x_tx_int(fp);
8331                         if (!cnt) {
8332                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8333                                           i);
8334 #ifdef BNX2X_STOP_ON_ERROR
8335                                 bnx2x_panic();
8336                                 return -EBUSY;
8337 #else
8338                                 break;
8339 #endif
8340                         }
8341                         cnt--;
8342                         msleep(1);
8343                 }
8344         }
8345         /* Give HW time to discard old tx messages */
8346         msleep(1);
8347
8348         if (CHIP_IS_E1(bp)) {
8349                 struct mac_configuration_cmd *config =
8350                                                 bnx2x_sp(bp, mcast_config);
8351
8352                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8353
8354                 for (i = 0; i < config->hdr.length; i++)
8355                         CAM_INVALIDATE(config->config_table[i]);
8356
8357                 config->hdr.length = i;
8358                 if (CHIP_REV_IS_SLOW(bp))
8359                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8360                 else
8361                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8362                 config->hdr.client_id = bp->fp->cl_id;
8363                 config->hdr.reserved1 = 0;
8364
8365                 bp->set_mac_pending++;
8366                 smp_wmb();
8367
8368                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8369                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8370                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8371
8372         } else { /* E1H */
8373                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8374
8375                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8376
8377                 for (i = 0; i < MC_HASH_SIZE; i++)
8378                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8379
8380                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8381         }
8382 #ifdef BCM_CNIC
8383         /* Clear iSCSI L2 MAC */
8384         mutex_lock(&bp->cnic_mutex);
8385         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8386                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8387                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8388         }
8389         mutex_unlock(&bp->cnic_mutex);
8390 #endif
8391
8392         if (unload_mode == UNLOAD_NORMAL)
8393                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8394
8395         else if (bp->flags & NO_WOL_FLAG)
8396                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8397
8398         else if (bp->wol) {
8399                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8400                 u8 *mac_addr = bp->dev->dev_addr;
8401                 u32 val;
8402                 /* The mac address is written to entries 1-4 to
8403                    preserve entry 0 which is used by the PMF */
8404                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8405
8406                 val = (mac_addr[0] << 8) | mac_addr[1];
8407                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8408
8409                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8410                       (mac_addr[4] << 8) | mac_addr[5];
8411                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8412
8413                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8414
8415         } else
8416                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8417
8418         /* Close multi and leading connections
8419            Completions for ramrods are collected in a synchronous way */
8420         for_each_nondefault_queue(bp, i)
8421                 if (bnx2x_stop_multi(bp, i))
8422                         goto unload_error;
8423
8424         rc = bnx2x_stop_leading(bp);
8425         if (rc) {
8426                 BNX2X_ERR("Stop leading failed!\n");
8427 #ifdef BNX2X_STOP_ON_ERROR
8428                 return -EBUSY;
8429 #else
8430                 goto unload_error;
8431 #endif
8432         }
8433
8434 unload_error:
8435         if (!BP_NOMCP(bp))
8436                 reset_code = bnx2x_fw_command(bp, reset_code);
8437         else {
8438                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8439                    load_count[0], load_count[1], load_count[2]);
8440                 load_count[0]--;
8441                 load_count[1 + port]--;
8442                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8443                    load_count[0], load_count[1], load_count[2]);
8444                 if (load_count[0] == 0)
8445                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8446                 else if (load_count[1 + port] == 0)
8447                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8448                 else
8449                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8450         }
8451
8452         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8453             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8454                 bnx2x__link_reset(bp);
8455
8456         /* Reset the chip */
8457         bnx2x_reset_chip(bp, reset_code);
8458
8459         /* Report UNLOAD_DONE to MCP */
8460         if (!BP_NOMCP(bp))
8461                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8462
8463 }
8464
8465 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8466 {
8467         u32 val;
8468
8469         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8470
8471         if (CHIP_IS_E1(bp)) {
8472                 int port = BP_PORT(bp);
8473                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8474                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8475
8476                 val = REG_RD(bp, addr);
8477                 val &= ~(0x300);
8478                 REG_WR(bp, addr, val);
8479         } else if (CHIP_IS_E1H(bp)) {
8480                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8481                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8482                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8483                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8484         }
8485 }
8486
8487 /* must be called with rtnl_lock */
8488 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8489 {
8490         int i;
8491
8492         if (bp->state == BNX2X_STATE_CLOSED) {
8493                 /* Interface has been removed - nothing to recover */
8494                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8495                 bp->is_leader = 0;
8496                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8497                 smp_wmb();
8498
8499                 return -EINVAL;
8500         }
8501
8502 #ifdef BCM_CNIC
8503         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8504 #endif
8505         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8506
8507         /* Set "drop all" */
8508         bp->rx_mode = BNX2X_RX_MODE_NONE;
8509         bnx2x_set_storm_rx_mode(bp);
8510
8511         /* Disable HW interrupts, NAPI and Tx */
8512         bnx2x_netif_stop(bp, 1);
8513         netif_carrier_off(bp->dev);
8514
8515         del_timer_sync(&bp->timer);
8516         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8517                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8518         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8519
8520         /* Release IRQs */
8521         bnx2x_free_irq(bp, false);
8522
8523         /* Cleanup the chip if needed */
8524         if (unload_mode != UNLOAD_RECOVERY)
8525                 bnx2x_chip_cleanup(bp, unload_mode);
8526
8527         bp->port.pmf = 0;
8528
8529         /* Free SKBs, SGEs, TPA pool and driver internals */
8530         bnx2x_free_skbs(bp);
8531         for_each_queue(bp, i)
8532                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8533         for_each_queue(bp, i)
8534                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8535         bnx2x_free_mem(bp);
8536
8537         bp->state = BNX2X_STATE_CLOSED;
8538
8539         /* The last driver must disable a "close the gate" if there is no
8540          * parity attention or "process kill" pending.
8541          */
8542         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8543             bnx2x_reset_is_done(bp))
8544                 bnx2x_disable_close_the_gate(bp);
8545
8546         /* Reset MCP mail box sequence if there is on going recovery */
8547         if (unload_mode == UNLOAD_RECOVERY)
8548                 bp->fw_seq = 0;
8549
8550         return 0;
8551 }
8552
8553 /* Close gates #2, #3 and #4: */
8554 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8555 {
8556         u32 val, addr;
8557
8558         /* Gates #2 and #4a are closed/opened for "not E1" only */
8559         if (!CHIP_IS_E1(bp)) {
8560                 /* #4 */
8561                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8562                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8563                        close ? (val | 0x1) : (val & (~(u32)1)));
8564                 /* #2 */
8565                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8566                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8567                        close ? (val | 0x1) : (val & (~(u32)1)));
8568         }
8569
8570         /* #3 */
8571         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8572         val = REG_RD(bp, addr);
8573         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8574
8575         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8576                 close ? "closing" : "opening");
8577         mmiowb();
8578 }
8579
8580 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8581
8582 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8583 {
8584         /* Do some magic... */
8585         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8586         *magic_val = val & SHARED_MF_CLP_MAGIC;
8587         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8588 }
8589
8590 /* Restore the value of the `magic' bit.
8591  *
8592  * @param pdev Device handle.
8593  * @param magic_val Old value of the `magic' bit.
8594  */
8595 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8596 {
8597         /* Restore the `magic' bit value... */
8598         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8599         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8600                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8601         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8602         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8603                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8604 }
8605
8606 /* Prepares for MCP reset: takes care of CLP configurations.
8607  *
8608  * @param bp
8609  * @param magic_val Old value of 'magic' bit.
8610  */
8611 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8612 {
8613         u32 shmem;
8614         u32 validity_offset;
8615
8616         DP(NETIF_MSG_HW, "Starting\n");
8617
8618         /* Set `magic' bit in order to save MF config */
8619         if (!CHIP_IS_E1(bp))
8620                 bnx2x_clp_reset_prep(bp, magic_val);
8621
8622         /* Get shmem offset */
8623         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8624         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8625
8626         /* Clear validity map flags */
8627         if (shmem > 0)
8628                 REG_WR(bp, shmem + validity_offset, 0);
8629 }
8630
8631 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8632 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8633
8634 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8635  * depending on the HW type.
8636  *
8637  * @param bp
8638  */
8639 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8640 {
8641         /* special handling for emulation and FPGA,
8642            wait 10 times longer */
8643         if (CHIP_REV_IS_SLOW(bp))
8644                 msleep(MCP_ONE_TIMEOUT*10);
8645         else
8646                 msleep(MCP_ONE_TIMEOUT);
8647 }
8648
8649 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8650 {
8651         u32 shmem, cnt, validity_offset, val;
8652         int rc = 0;
8653
8654         msleep(100);
8655
8656         /* Get shmem offset */
8657         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8658         if (shmem == 0) {
8659                 BNX2X_ERR("Shmem 0 return failure\n");
8660                 rc = -ENOTTY;
8661                 goto exit_lbl;
8662         }
8663
8664         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8665
8666         /* Wait for MCP to come up */
8667         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8668                 /* TBD: its best to check validity map of last port.
8669                  * currently checks on port 0.
8670                  */
8671                 val = REG_RD(bp, shmem + validity_offset);
8672                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8673                    shmem + validity_offset, val);
8674
8675                 /* check that shared memory is valid. */
8676                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8677                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8678                         break;
8679
8680                 bnx2x_mcp_wait_one(bp);
8681         }
8682
8683         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8684
8685         /* Check that shared memory is valid. This indicates that MCP is up. */
8686         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8687             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8688                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8689                 rc = -ENOTTY;
8690                 goto exit_lbl;
8691         }
8692
8693 exit_lbl:
8694         /* Restore the `magic' bit value */
8695         if (!CHIP_IS_E1(bp))
8696                 bnx2x_clp_reset_done(bp, magic_val);
8697
8698         return rc;
8699 }
8700
8701 static void bnx2x_pxp_prep(struct bnx2x *bp)
8702 {
8703         if (!CHIP_IS_E1(bp)) {
8704                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8705                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8706                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8707                 mmiowb();
8708         }
8709 }
8710
8711 /*
8712  * Reset the whole chip except for:
8713  *      - PCIE core
8714  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8715  *              one reset bit)
8716  *      - IGU
8717  *      - MISC (including AEU)
8718  *      - GRC
8719  *      - RBCN, RBCP
8720  */
8721 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8722 {
8723         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8724
8725         not_reset_mask1 =
8726                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8727                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8728                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8729
8730         not_reset_mask2 =
8731                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8732                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8733                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8734                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8735                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8736                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8737                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8738                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8739
8740         reset_mask1 = 0xffffffff;
8741
8742         if (CHIP_IS_E1(bp))
8743                 reset_mask2 = 0xffff;
8744         else
8745                 reset_mask2 = 0x1ffff;
8746
8747         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8748                reset_mask1 & (~not_reset_mask1));
8749         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8750                reset_mask2 & (~not_reset_mask2));
8751
8752         barrier();
8753         mmiowb();
8754
8755         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8756         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8757         mmiowb();
8758 }
8759
8760 static int bnx2x_process_kill(struct bnx2x *bp)
8761 {
8762         int cnt = 1000;
8763         u32 val = 0;
8764         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8765
8766
8767         /* Empty the Tetris buffer, wait for 1s */
8768         do {
8769                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8770                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8771                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8772                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8773                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8774                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8775                     ((port_is_idle_0 & 0x1) == 0x1) &&
8776                     ((port_is_idle_1 & 0x1) == 0x1) &&
8777                     (pgl_exp_rom2 == 0xffffffff))
8778                         break;
8779                 msleep(1);
8780         } while (cnt-- > 0);
8781
8782         if (cnt <= 0) {
8783                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8784                           " are still"
8785                           " outstanding read requests after 1s!\n");
8786                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8787                           " port_is_idle_0=0x%08x,"
8788                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8789                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8790                           pgl_exp_rom2);
8791                 return -EAGAIN;
8792         }
8793
8794         barrier();
8795
8796         /* Close gates #2, #3 and #4 */
8797         bnx2x_set_234_gates(bp, true);
8798
8799         /* TBD: Indicate that "process kill" is in progress to MCP */
8800
8801         /* Clear "unprepared" bit */
8802         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8803         barrier();
8804
8805         /* Make sure all is written to the chip before the reset */
8806         mmiowb();
8807
8808         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8809          * PSWHST, GRC and PSWRD Tetris buffer.
8810          */
8811         msleep(1);
8812
8813         /* Prepare to chip reset: */
8814         /* MCP */
8815         bnx2x_reset_mcp_prep(bp, &val);
8816
8817         /* PXP */
8818         bnx2x_pxp_prep(bp);
8819         barrier();
8820
8821         /* reset the chip */
8822         bnx2x_process_kill_chip_reset(bp);
8823         barrier();
8824
8825         /* Recover after reset: */
8826         /* MCP */
8827         if (bnx2x_reset_mcp_comp(bp, val))
8828                 return -EAGAIN;
8829
8830         /* PXP */
8831         bnx2x_pxp_prep(bp);
8832
8833         /* Open the gates #2, #3 and #4 */
8834         bnx2x_set_234_gates(bp, false);
8835
8836         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8837          * reset state, re-enable attentions. */
8838
8839         return 0;
8840 }
8841
8842 static int bnx2x_leader_reset(struct bnx2x *bp)
8843 {
8844         int rc = 0;
8845         /* Try to recover after the failure */
8846         if (bnx2x_process_kill(bp)) {
8847                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8848                        bp->dev->name);
8849                 rc = -EAGAIN;
8850                 goto exit_leader_reset;
8851         }
8852
8853         /* Clear "reset is in progress" bit and update the driver state */
8854         bnx2x_set_reset_done(bp);
8855         bp->recovery_state = BNX2X_RECOVERY_DONE;
8856
8857 exit_leader_reset:
8858         bp->is_leader = 0;
8859         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8860         smp_wmb();
8861         return rc;
8862 }
8863
8864 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8865
8866 /* Assumption: runs under rtnl lock. This together with the fact
8867  * that it's called only from bnx2x_reset_task() ensure that it
8868  * will never be called when netif_running(bp->dev) is false.
8869  */
8870 static void bnx2x_parity_recover(struct bnx2x *bp)
8871 {
8872         DP(NETIF_MSG_HW, "Handling parity\n");
8873         while (1) {
8874                 switch (bp->recovery_state) {
8875                 case BNX2X_RECOVERY_INIT:
8876                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8877                         /* Try to get a LEADER_LOCK HW lock */
8878                         if (bnx2x_trylock_hw_lock(bp,
8879                                 HW_LOCK_RESOURCE_RESERVED_08))
8880                                 bp->is_leader = 1;
8881
8882                         /* Stop the driver */
8883                         /* If interface has been removed - break */
8884                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8885                                 return;
8886
8887                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8888                         /* Ensure "is_leader" and "recovery_state"
8889                          *  update values are seen on other CPUs
8890                          */
8891                         smp_wmb();
8892                         break;
8893
8894                 case BNX2X_RECOVERY_WAIT:
8895                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8896                         if (bp->is_leader) {
8897                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8898                                 if (load_counter) {
8899                                         /* Wait until all other functions get
8900                                          * down.
8901                                          */
8902                                         schedule_delayed_work(&bp->reset_task,
8903                                                                 HZ/10);
8904                                         return;
8905                                 } else {
8906                                         /* If all other functions got down -
8907                                          * try to bring the chip back to
8908                                          * normal. In any case it's an exit
8909                                          * point for a leader.
8910                                          */
8911                                         if (bnx2x_leader_reset(bp) ||
8912                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8913                                                 printk(KERN_ERR"%s: Recovery "
8914                                                 "has failed. Power cycle is "
8915                                                 "needed.\n", bp->dev->name);
8916                                                 /* Disconnect this device */
8917                                                 netif_device_detach(bp->dev);
8918                                                 /* Block ifup for all function
8919                                                  * of this ASIC until
8920                                                  * "process kill" or power
8921                                                  * cycle.
8922                                                  */
8923                                                 bnx2x_set_reset_in_progress(bp);
8924                                                 /* Shut down the power */
8925                                                 bnx2x_set_power_state(bp,
8926                                                                 PCI_D3hot);
8927                                                 return;
8928                                         }
8929
8930                                         return;
8931                                 }
8932                         } else { /* non-leader */
8933                                 if (!bnx2x_reset_is_done(bp)) {
8934                                         /* Try to get a LEADER_LOCK HW lock as
8935                                          * long as a former leader may have
8936                                          * been unloaded by the user or
8937                                          * released a leadership by another
8938                                          * reason.
8939                                          */
8940                                         if (bnx2x_trylock_hw_lock(bp,
8941                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8942                                                 /* I'm a leader now! Restart a
8943                                                  * switch case.
8944                                                  */
8945                                                 bp->is_leader = 1;
8946                                                 break;
8947                                         }
8948
8949                                         schedule_delayed_work(&bp->reset_task,
8950                                                                 HZ/10);
8951                                         return;
8952
8953                                 } else { /* A leader has completed
8954                                           * the "process kill". It's an exit
8955                                           * point for a non-leader.
8956                                           */
8957                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8958                                         bp->recovery_state =
8959                                                 BNX2X_RECOVERY_DONE;
8960                                         smp_wmb();
8961                                         return;
8962                                 }
8963                         }
8964                 default:
8965                         return;
8966                 }
8967         }
8968 }
8969
8970 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8971  * scheduled on a general queue in order to prevent a dead lock.
8972  */
8973 static void bnx2x_reset_task(struct work_struct *work)
8974 {
8975         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8976
8977 #ifdef BNX2X_STOP_ON_ERROR
8978         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8979                   " so reset not done to allow debug dump,\n"
8980          KERN_ERR " you will need to reboot when done\n");
8981         return;
8982 #endif
8983
8984         rtnl_lock();
8985
8986         if (!netif_running(bp->dev))
8987                 goto reset_task_exit;
8988
8989         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8990                 bnx2x_parity_recover(bp);
8991         else {
8992                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8993                 bnx2x_nic_load(bp, LOAD_NORMAL);
8994         }
8995
8996 reset_task_exit:
8997         rtnl_unlock();
8998 }
8999
9000 /* end of nic load/unload */
9001
9002 /* ethtool_ops */
9003
9004 /*
9005  * Init service functions
9006  */
9007
9008 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9009 {
9010         switch (func) {
9011         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9012         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9013         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9014         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9015         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9016         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9017         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9018         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9019         default:
9020                 BNX2X_ERR("Unsupported function index: %d\n", func);
9021                 return (u32)(-1);
9022         }
9023 }
9024
9025 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9026 {
9027         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9028
9029         /* Flush all outstanding writes */
9030         mmiowb();
9031
9032         /* Pretend to be function 0 */
9033         REG_WR(bp, reg, 0);
9034         /* Flush the GRC transaction (in the chip) */
9035         new_val = REG_RD(bp, reg);
9036         if (new_val != 0) {
9037                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9038                           new_val);
9039                 BUG();
9040         }
9041
9042         /* From now we are in the "like-E1" mode */
9043         bnx2x_int_disable(bp);
9044
9045         /* Flush all outstanding writes */
9046         mmiowb();
9047
9048         /* Restore the original funtion settings */
9049         REG_WR(bp, reg, orig_func);
9050         new_val = REG_RD(bp, reg);
9051         if (new_val != orig_func) {
9052                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9053                           orig_func, new_val);
9054                 BUG();
9055         }
9056 }
9057
9058 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9059 {
9060         if (CHIP_IS_E1H(bp))
9061                 bnx2x_undi_int_disable_e1h(bp, func);
9062         else
9063                 bnx2x_int_disable(bp);
9064 }
9065
9066 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9067 {
9068         u32 val;
9069
9070         /* Check if there is any driver already loaded */
9071         val = REG_RD(bp, MISC_REG_UNPREPARED);
9072         if (val == 0x1) {
9073                 /* Check if it is the UNDI driver
9074                  * UNDI driver initializes CID offset for normal bell to 0x7
9075                  */
9076                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9077                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9078                 if (val == 0x7) {
9079                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9080                         /* save our func */
9081                         int func = BP_FUNC(bp);
9082                         u32 swap_en;
9083                         u32 swap_val;
9084
9085                         /* clear the UNDI indication */
9086                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9087
9088                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9089
9090                         /* try unload UNDI on port 0 */
9091                         bp->func = 0;
9092                         bp->fw_seq =
9093                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9094                                 DRV_MSG_SEQ_NUMBER_MASK);
9095                         reset_code = bnx2x_fw_command(bp, reset_code);
9096
9097                         /* if UNDI is loaded on the other port */
9098                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9099
9100                                 /* send "DONE" for previous unload */
9101                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9102
9103                                 /* unload UNDI on port 1 */
9104                                 bp->func = 1;
9105                                 bp->fw_seq =
9106                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9107                                         DRV_MSG_SEQ_NUMBER_MASK);
9108                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9109
9110                                 bnx2x_fw_command(bp, reset_code);
9111                         }
9112
9113                         /* now it's safe to release the lock */
9114                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9115
9116                         bnx2x_undi_int_disable(bp, func);
9117
9118                         /* close input traffic and wait for it */
9119                         /* Do not rcv packets to BRB */
9120                         REG_WR(bp,
9121                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9122                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9123                         /* Do not direct rcv packets that are not for MCP to
9124                          * the BRB */
9125                         REG_WR(bp,
9126                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9127                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9128                         /* clear AEU */
9129                         REG_WR(bp,
9130                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9131                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9132                         msleep(10);
9133
9134                         /* save NIG port swap info */
9135                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9136                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9137                         /* reset device */
9138                         REG_WR(bp,
9139                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9140                                0xd3ffffff);
9141                         REG_WR(bp,
9142                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9143                                0x1403);
9144                         /* take the NIG out of reset and restore swap values */
9145                         REG_WR(bp,
9146                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9147                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9148                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9149                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9150
9151                         /* send unload done to the MCP */
9152                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9153
9154                         /* restore our func and fw_seq */
9155                         bp->func = func;
9156                         bp->fw_seq =
9157                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9158                                 DRV_MSG_SEQ_NUMBER_MASK);
9159
9160                 } else
9161                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9162         }
9163 }
9164
9165 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9166 {
9167         u32 val, val2, val3, val4, id;
9168         u16 pmc;
9169
9170         /* Get the chip revision id and number. */
9171         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9172         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9173         id = ((val & 0xffff) << 16);
9174         val = REG_RD(bp, MISC_REG_CHIP_REV);
9175         id |= ((val & 0xf) << 12);
9176         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9177         id |= ((val & 0xff) << 4);
9178         val = REG_RD(bp, MISC_REG_BOND_ID);
9179         id |= (val & 0xf);
9180         bp->common.chip_id = id;
9181         bp->link_params.chip_id = bp->common.chip_id;
9182         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9183
9184         val = (REG_RD(bp, 0x2874) & 0x55);
9185         if ((bp->common.chip_id & 0x1) ||
9186             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9187                 bp->flags |= ONE_PORT_FLAG;
9188                 BNX2X_DEV_INFO("single port device\n");
9189         }
9190
9191         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9192         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9193                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9194         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9195                        bp->common.flash_size, bp->common.flash_size);
9196
9197         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9198         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9199         bp->link_params.shmem_base = bp->common.shmem_base;
9200         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9201                        bp->common.shmem_base, bp->common.shmem2_base);
9202
9203         if (!bp->common.shmem_base ||
9204             (bp->common.shmem_base < 0xA0000) ||
9205             (bp->common.shmem_base >= 0xC0000)) {
9206                 BNX2X_DEV_INFO("MCP not active\n");
9207                 bp->flags |= NO_MCP_FLAG;
9208                 return;
9209         }
9210
9211         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9212         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9213                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9214                 BNX2X_ERROR("BAD MCP validity signature\n");
9215
9216         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9217         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9218
9219         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9220                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9221                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9222
9223         bp->link_params.feature_config_flags = 0;
9224         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9225         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9226                 bp->link_params.feature_config_flags |=
9227                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9228         else
9229                 bp->link_params.feature_config_flags &=
9230                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9231
9232         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9233         bp->common.bc_ver = val;
9234         BNX2X_DEV_INFO("bc_ver %X\n", val);
9235         if (val < BNX2X_BC_VER) {
9236                 /* for now only warn
9237                  * later we might need to enforce this */
9238                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9239                             "please upgrade BC\n", BNX2X_BC_VER, val);
9240         }
9241         bp->link_params.feature_config_flags |=
9242                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9243                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9244
9245         if (BP_E1HVN(bp) == 0) {
9246                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9247                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9248         } else {
9249                 /* no WOL capability for E1HVN != 0 */
9250                 bp->flags |= NO_WOL_FLAG;
9251         }
9252         BNX2X_DEV_INFO("%sWoL capable\n",
9253                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9254
9255         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9256         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9257         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9258         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9259
9260         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9261                  val, val2, val3, val4);
9262 }
9263
9264 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9265                                                     u32 switch_cfg)
9266 {
9267         int port = BP_PORT(bp);
9268         u32 ext_phy_type;
9269
9270         switch (switch_cfg) {
9271         case SWITCH_CFG_1G:
9272                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9273
9274                 ext_phy_type =
9275                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9276                 switch (ext_phy_type) {
9277                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9278                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9279                                        ext_phy_type);
9280
9281                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9282                                                SUPPORTED_10baseT_Full |
9283                                                SUPPORTED_100baseT_Half |
9284                                                SUPPORTED_100baseT_Full |
9285                                                SUPPORTED_1000baseT_Full |
9286                                                SUPPORTED_2500baseX_Full |
9287                                                SUPPORTED_TP |
9288                                                SUPPORTED_FIBRE |
9289                                                SUPPORTED_Autoneg |
9290                                                SUPPORTED_Pause |
9291                                                SUPPORTED_Asym_Pause);
9292                         break;
9293
9294                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9295                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9296                                        ext_phy_type);
9297
9298                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9299                                                SUPPORTED_10baseT_Full |
9300                                                SUPPORTED_100baseT_Half |
9301                                                SUPPORTED_100baseT_Full |
9302                                                SUPPORTED_1000baseT_Full |
9303                                                SUPPORTED_TP |
9304                                                SUPPORTED_FIBRE |
9305                                                SUPPORTED_Autoneg |
9306                                                SUPPORTED_Pause |
9307                                                SUPPORTED_Asym_Pause);
9308                         break;
9309
9310                 default:
9311                         BNX2X_ERR("NVRAM config error. "
9312                                   "BAD SerDes ext_phy_config 0x%x\n",
9313                                   bp->link_params.ext_phy_config);
9314                         return;
9315                 }
9316
9317                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9318                                            port*0x10);
9319                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9320                 break;
9321
9322         case SWITCH_CFG_10G:
9323                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9324
9325                 ext_phy_type =
9326                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9327                 switch (ext_phy_type) {
9328                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9329                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9330                                        ext_phy_type);
9331
9332                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9333                                                SUPPORTED_10baseT_Full |
9334                                                SUPPORTED_100baseT_Half |
9335                                                SUPPORTED_100baseT_Full |
9336                                                SUPPORTED_1000baseT_Full |
9337                                                SUPPORTED_2500baseX_Full |
9338                                                SUPPORTED_10000baseT_Full |
9339                                                SUPPORTED_TP |
9340                                                SUPPORTED_FIBRE |
9341                                                SUPPORTED_Autoneg |
9342                                                SUPPORTED_Pause |
9343                                                SUPPORTED_Asym_Pause);
9344                         break;
9345
9346                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9347                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9348                                        ext_phy_type);
9349
9350                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9351                                                SUPPORTED_1000baseT_Full |
9352                                                SUPPORTED_FIBRE |
9353                                                SUPPORTED_Autoneg |
9354                                                SUPPORTED_Pause |
9355                                                SUPPORTED_Asym_Pause);
9356                         break;
9357
9358                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9359                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9360                                        ext_phy_type);
9361
9362                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9363                                                SUPPORTED_2500baseX_Full |
9364                                                SUPPORTED_1000baseT_Full |
9365                                                SUPPORTED_FIBRE |
9366                                                SUPPORTED_Autoneg |
9367                                                SUPPORTED_Pause |
9368                                                SUPPORTED_Asym_Pause);
9369                         break;
9370
9371                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9372                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9373                                        ext_phy_type);
9374
9375                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9376                                                SUPPORTED_FIBRE |
9377                                                SUPPORTED_Pause |
9378                                                SUPPORTED_Asym_Pause);
9379                         break;
9380
9381                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9382                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9383                                        ext_phy_type);
9384
9385                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9386                                                SUPPORTED_1000baseT_Full |
9387                                                SUPPORTED_FIBRE |
9388                                                SUPPORTED_Pause |
9389                                                SUPPORTED_Asym_Pause);
9390                         break;
9391
9392                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9393                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9394                                        ext_phy_type);
9395
9396                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9397                                                SUPPORTED_1000baseT_Full |
9398                                                SUPPORTED_Autoneg |
9399                                                SUPPORTED_FIBRE |
9400                                                SUPPORTED_Pause |
9401                                                SUPPORTED_Asym_Pause);
9402                         break;
9403
9404                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9405                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9406                                        ext_phy_type);
9407
9408                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9409                                                SUPPORTED_1000baseT_Full |
9410                                                SUPPORTED_Autoneg |
9411                                                SUPPORTED_FIBRE |
9412                                                SUPPORTED_Pause |
9413                                                SUPPORTED_Asym_Pause);
9414                         break;
9415
9416                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9417                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9418                                        ext_phy_type);
9419
9420                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9421                                                SUPPORTED_TP |
9422                                                SUPPORTED_Autoneg |
9423                                                SUPPORTED_Pause |
9424                                                SUPPORTED_Asym_Pause);
9425                         break;
9426
9427                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9428                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9429                                        ext_phy_type);
9430
9431                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9432                                                SUPPORTED_10baseT_Full |
9433                                                SUPPORTED_100baseT_Half |
9434                                                SUPPORTED_100baseT_Full |
9435                                                SUPPORTED_1000baseT_Full |
9436                                                SUPPORTED_10000baseT_Full |
9437                                                SUPPORTED_TP |
9438                                                SUPPORTED_Autoneg |
9439                                                SUPPORTED_Pause |
9440                                                SUPPORTED_Asym_Pause);
9441                         break;
9442
9443                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9444                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9445                                   bp->link_params.ext_phy_config);
9446                         break;
9447
9448                 default:
9449                         BNX2X_ERR("NVRAM config error. "
9450                                   "BAD XGXS ext_phy_config 0x%x\n",
9451                                   bp->link_params.ext_phy_config);
9452                         return;
9453                 }
9454
9455                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9456                                            port*0x18);
9457                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9458
9459                 break;
9460
9461         default:
9462                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9463                           bp->port.link_config);
9464                 return;
9465         }
9466         bp->link_params.phy_addr = bp->port.phy_addr;
9467
9468         /* mask what we support according to speed_cap_mask */
9469         if (!(bp->link_params.speed_cap_mask &
9470                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9471                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9472
9473         if (!(bp->link_params.speed_cap_mask &
9474                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9475                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9476
9477         if (!(bp->link_params.speed_cap_mask &
9478                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9479                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9480
9481         if (!(bp->link_params.speed_cap_mask &
9482                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9483                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9484
9485         if (!(bp->link_params.speed_cap_mask &
9486                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9487                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9488                                         SUPPORTED_1000baseT_Full);
9489
9490         if (!(bp->link_params.speed_cap_mask &
9491                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9492                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9493
9494         if (!(bp->link_params.speed_cap_mask &
9495                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9496                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9497
9498         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9499 }
9500
9501 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9502 {
9503         bp->link_params.req_duplex = DUPLEX_FULL;
9504
9505         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9506         case PORT_FEATURE_LINK_SPEED_AUTO:
9507                 if (bp->port.supported & SUPPORTED_Autoneg) {
9508                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9509                         bp->port.advertising = bp->port.supported;
9510                 } else {
9511                         u32 ext_phy_type =
9512                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9513
9514                         if ((ext_phy_type ==
9515                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9516                             (ext_phy_type ==
9517                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9518                                 /* force 10G, no AN */
9519                                 bp->link_params.req_line_speed = SPEED_10000;
9520                                 bp->port.advertising =
9521                                                 (ADVERTISED_10000baseT_Full |
9522                                                  ADVERTISED_FIBRE);
9523                                 break;
9524                         }
9525                         BNX2X_ERR("NVRAM config error. "
9526                                   "Invalid link_config 0x%x"
9527                                   "  Autoneg not supported\n",
9528                                   bp->port.link_config);
9529                         return;
9530                 }
9531                 break;
9532
9533         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9534                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9535                         bp->link_params.req_line_speed = SPEED_10;
9536                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9537                                                 ADVERTISED_TP);
9538                 } else {
9539                         BNX2X_ERROR("NVRAM config error. "
9540                                     "Invalid link_config 0x%x"
9541                                     "  speed_cap_mask 0x%x\n",
9542                                     bp->port.link_config,
9543                                     bp->link_params.speed_cap_mask);
9544                         return;
9545                 }
9546                 break;
9547
9548         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9549                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9550                         bp->link_params.req_line_speed = SPEED_10;
9551                         bp->link_params.req_duplex = DUPLEX_HALF;
9552                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9553                                                 ADVERTISED_TP);
9554                 } else {
9555                         BNX2X_ERROR("NVRAM config error. "
9556                                     "Invalid link_config 0x%x"
9557                                     "  speed_cap_mask 0x%x\n",
9558                                     bp->port.link_config,
9559                                     bp->link_params.speed_cap_mask);
9560                         return;
9561                 }
9562                 break;
9563
9564         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9565                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9566                         bp->link_params.req_line_speed = SPEED_100;
9567                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9568                                                 ADVERTISED_TP);
9569                 } else {
9570                         BNX2X_ERROR("NVRAM config error. "
9571                                     "Invalid link_config 0x%x"
9572                                     "  speed_cap_mask 0x%x\n",
9573                                     bp->port.link_config,
9574                                     bp->link_params.speed_cap_mask);
9575                         return;
9576                 }
9577                 break;
9578
9579         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9580                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9581                         bp->link_params.req_line_speed = SPEED_100;
9582                         bp->link_params.req_duplex = DUPLEX_HALF;
9583                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9584                                                 ADVERTISED_TP);
9585                 } else {
9586                         BNX2X_ERROR("NVRAM config error. "
9587                                     "Invalid link_config 0x%x"
9588                                     "  speed_cap_mask 0x%x\n",
9589                                     bp->port.link_config,
9590                                     bp->link_params.speed_cap_mask);
9591                         return;
9592                 }
9593                 break;
9594
9595         case PORT_FEATURE_LINK_SPEED_1G:
9596                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9597                         bp->link_params.req_line_speed = SPEED_1000;
9598                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9599                                                 ADVERTISED_TP);
9600                 } else {
9601                         BNX2X_ERROR("NVRAM config error. "
9602                                     "Invalid link_config 0x%x"
9603                                     "  speed_cap_mask 0x%x\n",
9604                                     bp->port.link_config,
9605                                     bp->link_params.speed_cap_mask);
9606                         return;
9607                 }
9608                 break;
9609
9610         case PORT_FEATURE_LINK_SPEED_2_5G:
9611                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9612                         bp->link_params.req_line_speed = SPEED_2500;
9613                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9614                                                 ADVERTISED_TP);
9615                 } else {
9616                         BNX2X_ERROR("NVRAM config error. "
9617                                     "Invalid link_config 0x%x"
9618                                     "  speed_cap_mask 0x%x\n",
9619                                     bp->port.link_config,
9620                                     bp->link_params.speed_cap_mask);
9621                         return;
9622                 }
9623                 break;
9624
9625         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9626         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9627         case PORT_FEATURE_LINK_SPEED_10G_KR:
9628                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9629                         bp->link_params.req_line_speed = SPEED_10000;
9630                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9631                                                 ADVERTISED_FIBRE);
9632                 } else {
9633                         BNX2X_ERROR("NVRAM config error. "
9634                                     "Invalid link_config 0x%x"
9635                                     "  speed_cap_mask 0x%x\n",
9636                                     bp->port.link_config,
9637                                     bp->link_params.speed_cap_mask);
9638                         return;
9639                 }
9640                 break;
9641
9642         default:
9643                 BNX2X_ERROR("NVRAM config error. "
9644                             "BAD link speed link_config 0x%x\n",
9645                             bp->port.link_config);
9646                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9647                 bp->port.advertising = bp->port.supported;
9648                 break;
9649         }
9650
9651         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9652                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9653         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9654             !(bp->port.supported & SUPPORTED_Autoneg))
9655                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9656
9657         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9658                        "  advertising 0x%x\n",
9659                        bp->link_params.req_line_speed,
9660                        bp->link_params.req_duplex,
9661                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9662 }
9663
9664 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9665 {
9666         mac_hi = cpu_to_be16(mac_hi);
9667         mac_lo = cpu_to_be32(mac_lo);
9668         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9669         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9670 }
9671
9672 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9673 {
9674         int port = BP_PORT(bp);
9675         u32 val, val2;
9676         u32 config;
9677         u16 i;
9678         u32 ext_phy_type;
9679
9680         bp->link_params.bp = bp;
9681         bp->link_params.port = port;
9682
9683         bp->link_params.lane_config =
9684                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9685         bp->link_params.ext_phy_config =
9686                 SHMEM_RD(bp,
9687                          dev_info.port_hw_config[port].external_phy_config);
9688         /* BCM8727_NOC => BCM8727 no over current */
9689         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9690             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9691                 bp->link_params.ext_phy_config &=
9692                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9693                 bp->link_params.ext_phy_config |=
9694                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9695                 bp->link_params.feature_config_flags |=
9696                         FEATURE_CONFIG_BCM8727_NOC;
9697         }
9698
9699         bp->link_params.speed_cap_mask =
9700                 SHMEM_RD(bp,
9701                          dev_info.port_hw_config[port].speed_capability_mask);
9702
9703         bp->port.link_config =
9704                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9705
9706         /* Get the 4 lanes xgxs config rx and tx */
9707         for (i = 0; i < 2; i++) {
9708                 val = SHMEM_RD(bp,
9709                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9710                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9711                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9712
9713                 val = SHMEM_RD(bp,
9714                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9715                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9716                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9717         }
9718
9719         /* If the device is capable of WoL, set the default state according
9720          * to the HW
9721          */
9722         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9723         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9724                    (config & PORT_FEATURE_WOL_ENABLED));
9725
9726         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9727                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9728                        bp->link_params.lane_config,
9729                        bp->link_params.ext_phy_config,
9730                        bp->link_params.speed_cap_mask, bp->port.link_config);
9731
9732         bp->link_params.switch_cfg |= (bp->port.link_config &
9733                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9734         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9735
9736         bnx2x_link_settings_requested(bp);
9737
9738         /*
9739          * If connected directly, work with the internal PHY, otherwise, work
9740          * with the external PHY
9741          */
9742         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9743         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9744                 bp->mdio.prtad = bp->link_params.phy_addr;
9745
9746         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9747                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9748                 bp->mdio.prtad =
9749                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9750
9751         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9752         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9753         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9754         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9755         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9756
9757 #ifdef BCM_CNIC
9758         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9759         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9760         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9761 #endif
9762 }
9763
9764 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9765 {
9766         int func = BP_FUNC(bp);
9767         u32 val, val2;
9768         int rc = 0;
9769
9770         bnx2x_get_common_hwinfo(bp);
9771
9772         bp->e1hov = 0;
9773         bp->e1hmf = 0;
9774         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9775                 bp->mf_config =
9776                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9777
9778                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9779                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9780                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9781                         bp->e1hmf = 1;
9782                 BNX2X_DEV_INFO("%s function mode\n",
9783                                IS_E1HMF(bp) ? "multi" : "single");
9784
9785                 if (IS_E1HMF(bp)) {
9786                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9787                                                                 e1hov_tag) &
9788                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9789                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9790                                 bp->e1hov = val;
9791                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9792                                                "(0x%04x)\n",
9793                                                func, bp->e1hov, bp->e1hov);
9794                         } else {
9795                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9796                                             "  aborting\n", func);
9797                                 rc = -EPERM;
9798                         }
9799                 } else {
9800                         if (BP_E1HVN(bp)) {
9801                                 BNX2X_ERROR("VN %d in single function mode,"
9802                                             "  aborting\n", BP_E1HVN(bp));
9803                                 rc = -EPERM;
9804                         }
9805                 }
9806         }
9807
9808         if (!BP_NOMCP(bp)) {
9809                 bnx2x_get_port_hwinfo(bp);
9810
9811                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9812                               DRV_MSG_SEQ_NUMBER_MASK);
9813                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9814         }
9815
9816         if (IS_E1HMF(bp)) {
9817                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9818                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9819                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9820                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9821                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9822                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9823                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9824                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9825                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9826                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9827                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9828                                ETH_ALEN);
9829                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9830                                ETH_ALEN);
9831                 }
9832
9833                 return rc;
9834         }
9835
9836         if (BP_NOMCP(bp)) {
9837                 /* only supposed to happen on emulation/FPGA */
9838                 BNX2X_ERROR("warning: random MAC workaround active\n");
9839                 random_ether_addr(bp->dev->dev_addr);
9840                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9841         }
9842
9843         return rc;
9844 }
9845
9846 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9847 {
9848         int cnt, i, block_end, rodi;
9849         char vpd_data[BNX2X_VPD_LEN+1];
9850         char str_id_reg[VENDOR_ID_LEN+1];
9851         char str_id_cap[VENDOR_ID_LEN+1];
9852         u8 len;
9853
9854         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9855         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9856
9857         if (cnt < BNX2X_VPD_LEN)
9858                 goto out_not_found;
9859
9860         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9861                              PCI_VPD_LRDT_RO_DATA);
9862         if (i < 0)
9863                 goto out_not_found;
9864
9865
9866         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9867                     pci_vpd_lrdt_size(&vpd_data[i]);
9868
9869         i += PCI_VPD_LRDT_TAG_SIZE;
9870
9871         if (block_end > BNX2X_VPD_LEN)
9872                 goto out_not_found;
9873
9874         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9875                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9876         if (rodi < 0)
9877                 goto out_not_found;
9878
9879         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9880
9881         if (len != VENDOR_ID_LEN)
9882                 goto out_not_found;
9883
9884         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9885
9886         /* vendor specific info */
9887         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9888         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9889         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9890             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9891
9892                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9893                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9894                 if (rodi >= 0) {
9895                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9896
9897                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9898
9899                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9900                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9901                                 bp->fw_ver[len] = ' ';
9902                         }
9903                 }
9904                 return;
9905         }
9906 out_not_found:
9907         return;
9908 }
9909
9910 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9911 {
9912         int func = BP_FUNC(bp);
9913         int timer_interval;
9914         int rc;
9915
9916         /* Disable interrupt handling until HW is initialized */
9917         atomic_set(&bp->intr_sem, 1);
9918         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9919
9920         mutex_init(&bp->port.phy_mutex);
9921         mutex_init(&bp->fw_mb_mutex);
9922         spin_lock_init(&bp->stats_lock);
9923 #ifdef BCM_CNIC
9924         mutex_init(&bp->cnic_mutex);
9925 #endif
9926
9927         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9928         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9929
9930         rc = bnx2x_get_hwinfo(bp);
9931
9932         bnx2x_read_fwinfo(bp);
9933         /* need to reset chip if undi was active */
9934         if (!BP_NOMCP(bp))
9935                 bnx2x_undi_unload(bp);
9936
9937         if (CHIP_REV_IS_FPGA(bp))
9938                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9939
9940         if (BP_NOMCP(bp) && (func == 0))
9941                 dev_err(&bp->pdev->dev, "MCP disabled, "
9942                                         "must load devices in order!\n");
9943
9944         /* Set multi queue mode */
9945         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9946             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9947                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9948                                         "requested is not MSI-X\n");
9949                 multi_mode = ETH_RSS_MODE_DISABLED;
9950         }
9951         bp->multi_mode = multi_mode;
9952
9953
9954         bp->dev->features |= NETIF_F_GRO;
9955
9956         /* Set TPA flags */
9957         if (disable_tpa) {
9958                 bp->flags &= ~TPA_ENABLE_FLAG;
9959                 bp->dev->features &= ~NETIF_F_LRO;
9960         } else {
9961                 bp->flags |= TPA_ENABLE_FLAG;
9962                 bp->dev->features |= NETIF_F_LRO;
9963         }
9964
9965         if (CHIP_IS_E1(bp))
9966                 bp->dropless_fc = 0;
9967         else
9968                 bp->dropless_fc = dropless_fc;
9969
9970         bp->mrrs = mrrs;
9971
9972         bp->tx_ring_size = MAX_TX_AVAIL;
9973         bp->rx_ring_size = MAX_RX_AVAIL;
9974
9975         bp->rx_csum = 1;
9976
9977         /* make sure that the numbers are in the right granularity */
9978         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9979         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9980
9981         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9982         bp->current_interval = (poll ? poll : timer_interval);
9983
9984         init_timer(&bp->timer);
9985         bp->timer.expires = jiffies + bp->current_interval;
9986         bp->timer.data = (unsigned long) bp;
9987         bp->timer.function = bnx2x_timer;
9988
9989         return rc;
9990 }
9991
9992 /*
9993  * ethtool service functions
9994  */
9995
9996 /* All ethtool functions called with rtnl_lock */
9997
9998 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9999 {
10000         struct bnx2x *bp = netdev_priv(dev);
10001
10002         cmd->supported = bp->port.supported;
10003         cmd->advertising = bp->port.advertising;
10004
10005         if ((bp->state == BNX2X_STATE_OPEN) &&
10006             !(bp->flags & MF_FUNC_DIS) &&
10007             (bp->link_vars.link_up)) {
10008                 cmd->speed = bp->link_vars.line_speed;
10009                 cmd->duplex = bp->link_vars.duplex;
10010                 if (IS_E1HMF(bp)) {
10011                         u16 vn_max_rate;
10012
10013                         vn_max_rate =
10014                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10015                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10016                         if (vn_max_rate < cmd->speed)
10017                                 cmd->speed = vn_max_rate;
10018                 }
10019         } else {
10020                 cmd->speed = -1;
10021                 cmd->duplex = -1;
10022         }
10023
10024         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10025                 u32 ext_phy_type =
10026                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10027
10028                 switch (ext_phy_type) {
10029                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10030                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10031                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10032                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10033                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10034                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10035                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10036                         cmd->port = PORT_FIBRE;
10037                         break;
10038
10039                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10040                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10041                         cmd->port = PORT_TP;
10042                         break;
10043
10044                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10045                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10046                                   bp->link_params.ext_phy_config);
10047                         break;
10048
10049                 default:
10050                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10051                            bp->link_params.ext_phy_config);
10052                         break;
10053                 }
10054         } else
10055                 cmd->port = PORT_TP;
10056
10057         cmd->phy_address = bp->mdio.prtad;
10058         cmd->transceiver = XCVR_INTERNAL;
10059
10060         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10061                 cmd->autoneg = AUTONEG_ENABLE;
10062         else
10063                 cmd->autoneg = AUTONEG_DISABLE;
10064
10065         cmd->maxtxpkt = 0;
10066         cmd->maxrxpkt = 0;
10067
10068         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10069            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10070            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10071            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10072            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10073            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10074            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10075
10076         return 0;
10077 }
10078
10079 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10080 {
10081         struct bnx2x *bp = netdev_priv(dev);
10082         u32 advertising;
10083
10084         if (IS_E1HMF(bp))
10085                 return 0;
10086
10087         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10088            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10089            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10090            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10091            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10092            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10093            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10094
10095         if (cmd->autoneg == AUTONEG_ENABLE) {
10096                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10097                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10098                         return -EINVAL;
10099                 }
10100
10101                 /* advertise the requested speed and duplex if supported */
10102                 cmd->advertising &= bp->port.supported;
10103
10104                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10105                 bp->link_params.req_duplex = DUPLEX_FULL;
10106                 bp->port.advertising |= (ADVERTISED_Autoneg |
10107                                          cmd->advertising);
10108
10109         } else { /* forced speed */
10110                 /* advertise the requested speed and duplex if supported */
10111                 switch (cmd->speed) {
10112                 case SPEED_10:
10113                         if (cmd->duplex == DUPLEX_FULL) {
10114                                 if (!(bp->port.supported &
10115                                       SUPPORTED_10baseT_Full)) {
10116                                         DP(NETIF_MSG_LINK,
10117                                            "10M full not supported\n");
10118                                         return -EINVAL;
10119                                 }
10120
10121                                 advertising = (ADVERTISED_10baseT_Full |
10122                                                ADVERTISED_TP);
10123                         } else {
10124                                 if (!(bp->port.supported &
10125                                       SUPPORTED_10baseT_Half)) {
10126                                         DP(NETIF_MSG_LINK,
10127                                            "10M half not supported\n");
10128                                         return -EINVAL;
10129                                 }
10130
10131                                 advertising = (ADVERTISED_10baseT_Half |
10132                                                ADVERTISED_TP);
10133                         }
10134                         break;
10135
10136                 case SPEED_100:
10137                         if (cmd->duplex == DUPLEX_FULL) {
10138                                 if (!(bp->port.supported &
10139                                                 SUPPORTED_100baseT_Full)) {
10140                                         DP(NETIF_MSG_LINK,
10141                                            "100M full not supported\n");
10142                                         return -EINVAL;
10143                                 }
10144
10145                                 advertising = (ADVERTISED_100baseT_Full |
10146                                                ADVERTISED_TP);
10147                         } else {
10148                                 if (!(bp->port.supported &
10149                                                 SUPPORTED_100baseT_Half)) {
10150                                         DP(NETIF_MSG_LINK,
10151                                            "100M half not supported\n");
10152                                         return -EINVAL;
10153                                 }
10154
10155                                 advertising = (ADVERTISED_100baseT_Half |
10156                                                ADVERTISED_TP);
10157                         }
10158                         break;
10159
10160                 case SPEED_1000:
10161                         if (cmd->duplex != DUPLEX_FULL) {
10162                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10163                                 return -EINVAL;
10164                         }
10165
10166                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10167                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10168                                 return -EINVAL;
10169                         }
10170
10171                         advertising = (ADVERTISED_1000baseT_Full |
10172                                        ADVERTISED_TP);
10173                         break;
10174
10175                 case SPEED_2500:
10176                         if (cmd->duplex != DUPLEX_FULL) {
10177                                 DP(NETIF_MSG_LINK,
10178                                    "2.5G half not supported\n");
10179                                 return -EINVAL;
10180                         }
10181
10182                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10183                                 DP(NETIF_MSG_LINK,
10184                                    "2.5G full not supported\n");
10185                                 return -EINVAL;
10186                         }
10187
10188                         advertising = (ADVERTISED_2500baseX_Full |
10189                                        ADVERTISED_TP);
10190                         break;
10191
10192                 case SPEED_10000:
10193                         if (cmd->duplex != DUPLEX_FULL) {
10194                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10195                                 return -EINVAL;
10196                         }
10197
10198                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10199                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10200                                 return -EINVAL;
10201                         }
10202
10203                         advertising = (ADVERTISED_10000baseT_Full |
10204                                        ADVERTISED_FIBRE);
10205                         break;
10206
10207                 default:
10208                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10209                         return -EINVAL;
10210                 }
10211
10212                 bp->link_params.req_line_speed = cmd->speed;
10213                 bp->link_params.req_duplex = cmd->duplex;
10214                 bp->port.advertising = advertising;
10215         }
10216
10217         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10218            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10219            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10220            bp->port.advertising);
10221
10222         if (netif_running(dev)) {
10223                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10224                 bnx2x_link_set(bp);
10225         }
10226
10227         return 0;
10228 }
10229
10230 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10231 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10232
10233 static int bnx2x_get_regs_len(struct net_device *dev)
10234 {
10235         struct bnx2x *bp = netdev_priv(dev);
10236         int regdump_len = 0;
10237         int i;
10238
10239         if (CHIP_IS_E1(bp)) {
10240                 for (i = 0; i < REGS_COUNT; i++)
10241                         if (IS_E1_ONLINE(reg_addrs[i].info))
10242                                 regdump_len += reg_addrs[i].size;
10243
10244                 for (i = 0; i < WREGS_COUNT_E1; i++)
10245                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10246                                 regdump_len += wreg_addrs_e1[i].size *
10247                                         (1 + wreg_addrs_e1[i].read_regs_count);
10248
10249         } else { /* E1H */
10250                 for (i = 0; i < REGS_COUNT; i++)
10251                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10252                                 regdump_len += reg_addrs[i].size;
10253
10254                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10255                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10256                                 regdump_len += wreg_addrs_e1h[i].size *
10257                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10258         }
10259         regdump_len *= 4;
10260         regdump_len += sizeof(struct dump_hdr);
10261
10262         return regdump_len;
10263 }
10264
10265 static void bnx2x_get_regs(struct net_device *dev,
10266                            struct ethtool_regs *regs, void *_p)
10267 {
10268         u32 *p = _p, i, j;
10269         struct bnx2x *bp = netdev_priv(dev);
10270         struct dump_hdr dump_hdr = {0};
10271
10272         regs->version = 0;
10273         memset(p, 0, regs->len);
10274
10275         if (!netif_running(bp->dev))
10276                 return;
10277
10278         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10279         dump_hdr.dump_sign = dump_sign_all;
10280         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10281         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10282         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10283         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10284         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10285
10286         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10287         p += dump_hdr.hdr_size + 1;
10288
10289         if (CHIP_IS_E1(bp)) {
10290                 for (i = 0; i < REGS_COUNT; i++)
10291                         if (IS_E1_ONLINE(reg_addrs[i].info))
10292                                 for (j = 0; j < reg_addrs[i].size; j++)
10293                                         *p++ = REG_RD(bp,
10294                                                       reg_addrs[i].addr + j*4);
10295
10296         } else { /* E1H */
10297                 for (i = 0; i < REGS_COUNT; i++)
10298                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10299                                 for (j = 0; j < reg_addrs[i].size; j++)
10300                                         *p++ = REG_RD(bp,
10301                                                       reg_addrs[i].addr + j*4);
10302         }
10303 }
10304
10305 #define PHY_FW_VER_LEN                  10
10306
10307 static void bnx2x_get_drvinfo(struct net_device *dev,
10308                               struct ethtool_drvinfo *info)
10309 {
10310         struct bnx2x *bp = netdev_priv(dev);
10311         u8 phy_fw_ver[PHY_FW_VER_LEN];
10312
10313         strcpy(info->driver, DRV_MODULE_NAME);
10314         strcpy(info->version, DRV_MODULE_VERSION);
10315
10316         phy_fw_ver[0] = '\0';
10317         if (bp->port.pmf) {
10318                 bnx2x_acquire_phy_lock(bp);
10319                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10320                                              (bp->state != BNX2X_STATE_CLOSED),
10321                                              phy_fw_ver, PHY_FW_VER_LEN);
10322                 bnx2x_release_phy_lock(bp);
10323         }
10324
10325         strncpy(info->fw_version, bp->fw_ver, 32);
10326         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10327                  "bc %d.%d.%d%s%s",
10328                  (bp->common.bc_ver & 0xff0000) >> 16,
10329                  (bp->common.bc_ver & 0xff00) >> 8,
10330                  (bp->common.bc_ver & 0xff),
10331                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10332         strcpy(info->bus_info, pci_name(bp->pdev));
10333         info->n_stats = BNX2X_NUM_STATS;
10334         info->testinfo_len = BNX2X_NUM_TESTS;
10335         info->eedump_len = bp->common.flash_size;
10336         info->regdump_len = bnx2x_get_regs_len(dev);
10337 }
10338
10339 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10340 {
10341         struct bnx2x *bp = netdev_priv(dev);
10342
10343         if (bp->flags & NO_WOL_FLAG) {
10344                 wol->supported = 0;
10345                 wol->wolopts = 0;
10346         } else {
10347                 wol->supported = WAKE_MAGIC;
10348                 if (bp->wol)
10349                         wol->wolopts = WAKE_MAGIC;
10350                 else
10351                         wol->wolopts = 0;
10352         }
10353         memset(&wol->sopass, 0, sizeof(wol->sopass));
10354 }
10355
10356 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10357 {
10358         struct bnx2x *bp = netdev_priv(dev);
10359
10360         if (wol->wolopts & ~WAKE_MAGIC)
10361                 return -EINVAL;
10362
10363         if (wol->wolopts & WAKE_MAGIC) {
10364                 if (bp->flags & NO_WOL_FLAG)
10365                         return -EINVAL;
10366
10367                 bp->wol = 1;
10368         } else
10369                 bp->wol = 0;
10370
10371         return 0;
10372 }
10373
10374 static u32 bnx2x_get_msglevel(struct net_device *dev)
10375 {
10376         struct bnx2x *bp = netdev_priv(dev);
10377
10378         return bp->msg_enable;
10379 }
10380
10381 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10382 {
10383         struct bnx2x *bp = netdev_priv(dev);
10384
10385         if (capable(CAP_NET_ADMIN))
10386                 bp->msg_enable = level;
10387 }
10388
10389 static int bnx2x_nway_reset(struct net_device *dev)
10390 {
10391         struct bnx2x *bp = netdev_priv(dev);
10392
10393         if (!bp->port.pmf)
10394                 return 0;
10395
10396         if (netif_running(dev)) {
10397                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10398                 bnx2x_link_set(bp);
10399         }
10400
10401         return 0;
10402 }
10403
10404 static u32 bnx2x_get_link(struct net_device *dev)
10405 {
10406         struct bnx2x *bp = netdev_priv(dev);
10407
10408         if (bp->flags & MF_FUNC_DIS)
10409                 return 0;
10410
10411         return bp->link_vars.link_up;
10412 }
10413
10414 static int bnx2x_get_eeprom_len(struct net_device *dev)
10415 {
10416         struct bnx2x *bp = netdev_priv(dev);
10417
10418         return bp->common.flash_size;
10419 }
10420
10421 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10422 {
10423         int port = BP_PORT(bp);
10424         int count, i;
10425         u32 val = 0;
10426
10427         /* adjust timeout for emulation/FPGA */
10428         count = NVRAM_TIMEOUT_COUNT;
10429         if (CHIP_REV_IS_SLOW(bp))
10430                 count *= 100;
10431
10432         /* request access to nvram interface */
10433         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10434                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10435
10436         for (i = 0; i < count*10; i++) {
10437                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10438                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10439                         break;
10440
10441                 udelay(5);
10442         }
10443
10444         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10445                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10446                 return -EBUSY;
10447         }
10448
10449         return 0;
10450 }
10451
10452 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10453 {
10454         int port = BP_PORT(bp);
10455         int count, i;
10456         u32 val = 0;
10457
10458         /* adjust timeout for emulation/FPGA */
10459         count = NVRAM_TIMEOUT_COUNT;
10460         if (CHIP_REV_IS_SLOW(bp))
10461                 count *= 100;
10462
10463         /* relinquish nvram interface */
10464         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10465                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10466
10467         for (i = 0; i < count*10; i++) {
10468                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10469                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10470                         break;
10471
10472                 udelay(5);
10473         }
10474
10475         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10476                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10477                 return -EBUSY;
10478         }
10479
10480         return 0;
10481 }
10482
10483 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10484 {
10485         u32 val;
10486
10487         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10488
10489         /* enable both bits, even on read */
10490         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10491                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10492                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10493 }
10494
10495 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10496 {
10497         u32 val;
10498
10499         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10500
10501         /* disable both bits, even after read */
10502         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10503                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10504                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10505 }
10506
10507 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10508                                   u32 cmd_flags)
10509 {
10510         int count, i, rc;
10511         u32 val;
10512
10513         /* build the command word */
10514         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10515
10516         /* need to clear DONE bit separately */
10517         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10518
10519         /* address of the NVRAM to read from */
10520         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10521                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10522
10523         /* issue a read command */
10524         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10525
10526         /* adjust timeout for emulation/FPGA */
10527         count = NVRAM_TIMEOUT_COUNT;
10528         if (CHIP_REV_IS_SLOW(bp))
10529                 count *= 100;
10530
10531         /* wait for completion */
10532         *ret_val = 0;
10533         rc = -EBUSY;
10534         for (i = 0; i < count; i++) {
10535                 udelay(5);
10536                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10537
10538                 if (val & MCPR_NVM_COMMAND_DONE) {
10539                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10540                         /* we read nvram data in cpu order
10541                          * but ethtool sees it as an array of bytes
10542                          * converting to big-endian will do the work */
10543                         *ret_val = cpu_to_be32(val);
10544                         rc = 0;
10545                         break;
10546                 }
10547         }
10548
10549         return rc;
10550 }
10551
10552 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10553                             int buf_size)
10554 {
10555         int rc;
10556         u32 cmd_flags;
10557         __be32 val;
10558
10559         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10560                 DP(BNX2X_MSG_NVM,
10561                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10562                    offset, buf_size);
10563                 return -EINVAL;
10564         }
10565
10566         if (offset + buf_size > bp->common.flash_size) {
10567                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10568                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10569                    offset, buf_size, bp->common.flash_size);
10570                 return -EINVAL;
10571         }
10572
10573         /* request access to nvram interface */
10574         rc = bnx2x_acquire_nvram_lock(bp);
10575         if (rc)
10576                 return rc;
10577
10578         /* enable access to nvram interface */
10579         bnx2x_enable_nvram_access(bp);
10580
10581         /* read the first word(s) */
10582         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10583         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10584                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10585                 memcpy(ret_buf, &val, 4);
10586
10587                 /* advance to the next dword */
10588                 offset += sizeof(u32);
10589                 ret_buf += sizeof(u32);
10590                 buf_size -= sizeof(u32);
10591                 cmd_flags = 0;
10592         }
10593
10594         if (rc == 0) {
10595                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10596                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10597                 memcpy(ret_buf, &val, 4);
10598         }
10599
10600         /* disable access to nvram interface */
10601         bnx2x_disable_nvram_access(bp);
10602         bnx2x_release_nvram_lock(bp);
10603
10604         return rc;
10605 }
10606
10607 static int bnx2x_get_eeprom(struct net_device *dev,
10608                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10609 {
10610         struct bnx2x *bp = netdev_priv(dev);
10611         int rc;
10612
10613         if (!netif_running(dev))
10614                 return -EAGAIN;
10615
10616         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10617            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10618            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10619            eeprom->len, eeprom->len);
10620
10621         /* parameters already validated in ethtool_get_eeprom */
10622
10623         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10624
10625         return rc;
10626 }
10627
10628 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10629                                    u32 cmd_flags)
10630 {
10631         int count, i, rc;
10632
10633         /* build the command word */
10634         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10635
10636         /* need to clear DONE bit separately */
10637         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10638
10639         /* write the data */
10640         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10641
10642         /* address of the NVRAM to write to */
10643         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10644                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10645
10646         /* issue the write command */
10647         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10648
10649         /* adjust timeout for emulation/FPGA */
10650         count = NVRAM_TIMEOUT_COUNT;
10651         if (CHIP_REV_IS_SLOW(bp))
10652                 count *= 100;
10653
10654         /* wait for completion */
10655         rc = -EBUSY;
10656         for (i = 0; i < count; i++) {
10657                 udelay(5);
10658                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10659                 if (val & MCPR_NVM_COMMAND_DONE) {
10660                         rc = 0;
10661                         break;
10662                 }
10663         }
10664
10665         return rc;
10666 }
10667
10668 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10669
10670 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10671                               int buf_size)
10672 {
10673         int rc;
10674         u32 cmd_flags;
10675         u32 align_offset;
10676         __be32 val;
10677
10678         if (offset + buf_size > bp->common.flash_size) {
10679                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10680                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10681                    offset, buf_size, bp->common.flash_size);
10682                 return -EINVAL;
10683         }
10684
10685         /* request access to nvram interface */
10686         rc = bnx2x_acquire_nvram_lock(bp);
10687         if (rc)
10688                 return rc;
10689
10690         /* enable access to nvram interface */
10691         bnx2x_enable_nvram_access(bp);
10692
10693         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10694         align_offset = (offset & ~0x03);
10695         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10696
10697         if (rc == 0) {
10698                 val &= ~(0xff << BYTE_OFFSET(offset));
10699                 val |= (*data_buf << BYTE_OFFSET(offset));
10700
10701                 /* nvram data is returned as an array of bytes
10702                  * convert it back to cpu order */
10703                 val = be32_to_cpu(val);
10704
10705                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10706                                              cmd_flags);
10707         }
10708
10709         /* disable access to nvram interface */
10710         bnx2x_disable_nvram_access(bp);
10711         bnx2x_release_nvram_lock(bp);
10712
10713         return rc;
10714 }
10715
10716 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10717                              int buf_size)
10718 {
10719         int rc;
10720         u32 cmd_flags;
10721         u32 val;
10722         u32 written_so_far;
10723
10724         if (buf_size == 1)      /* ethtool */
10725                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10726
10727         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10728                 DP(BNX2X_MSG_NVM,
10729                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10730                    offset, buf_size);
10731                 return -EINVAL;
10732         }
10733
10734         if (offset + buf_size > bp->common.flash_size) {
10735                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10736                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10737                    offset, buf_size, bp->common.flash_size);
10738                 return -EINVAL;
10739         }
10740
10741         /* request access to nvram interface */
10742         rc = bnx2x_acquire_nvram_lock(bp);
10743         if (rc)
10744                 return rc;
10745
10746         /* enable access to nvram interface */
10747         bnx2x_enable_nvram_access(bp);
10748
10749         written_so_far = 0;
10750         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10751         while ((written_so_far < buf_size) && (rc == 0)) {
10752                 if (written_so_far == (buf_size - sizeof(u32)))
10753                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10754                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10755                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10756                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10757                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10758
10759                 memcpy(&val, data_buf, 4);
10760
10761                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10762
10763                 /* advance to the next dword */
10764                 offset += sizeof(u32);
10765                 data_buf += sizeof(u32);
10766                 written_so_far += sizeof(u32);
10767                 cmd_flags = 0;
10768         }
10769
10770         /* disable access to nvram interface */
10771         bnx2x_disable_nvram_access(bp);
10772         bnx2x_release_nvram_lock(bp);
10773
10774         return rc;
10775 }
10776
10777 static int bnx2x_set_eeprom(struct net_device *dev,
10778                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10779 {
10780         struct bnx2x *bp = netdev_priv(dev);
10781         int port = BP_PORT(bp);
10782         int rc = 0;
10783
10784         if (!netif_running(dev))
10785                 return -EAGAIN;
10786
10787         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10788            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10789            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10790            eeprom->len, eeprom->len);
10791
10792         /* parameters already validated in ethtool_set_eeprom */
10793
10794         /* PHY eeprom can be accessed only by the PMF */
10795         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10796             !bp->port.pmf)
10797                 return -EINVAL;
10798
10799         if (eeprom->magic == 0x50485950) {
10800                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10801                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10802
10803                 bnx2x_acquire_phy_lock(bp);
10804                 rc |= bnx2x_link_reset(&bp->link_params,
10805                                        &bp->link_vars, 0);
10806                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10807                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10808                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10809                                        MISC_REGISTERS_GPIO_HIGH, port);
10810                 bnx2x_release_phy_lock(bp);
10811                 bnx2x_link_report(bp);
10812
10813         } else if (eeprom->magic == 0x50485952) {
10814                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10815                 if (bp->state == BNX2X_STATE_OPEN) {
10816                         bnx2x_acquire_phy_lock(bp);
10817                         rc |= bnx2x_link_reset(&bp->link_params,
10818                                                &bp->link_vars, 1);
10819
10820                         rc |= bnx2x_phy_init(&bp->link_params,
10821                                              &bp->link_vars);
10822                         bnx2x_release_phy_lock(bp);
10823                         bnx2x_calc_fc_adv(bp);
10824                 }
10825         } else if (eeprom->magic == 0x53985943) {
10826                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10827                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10828                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10829                         u8 ext_phy_addr =
10830                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10831
10832                         /* DSP Remove Download Mode */
10833                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10834                                        MISC_REGISTERS_GPIO_LOW, port);
10835
10836                         bnx2x_acquire_phy_lock(bp);
10837
10838                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10839
10840                         /* wait 0.5 sec to allow it to run */
10841                         msleep(500);
10842                         bnx2x_ext_phy_hw_reset(bp, port);
10843                         msleep(500);
10844                         bnx2x_release_phy_lock(bp);
10845                 }
10846         } else
10847                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10848
10849         return rc;
10850 }
10851
10852 static int bnx2x_get_coalesce(struct net_device *dev,
10853                               struct ethtool_coalesce *coal)
10854 {
10855         struct bnx2x *bp = netdev_priv(dev);
10856
10857         memset(coal, 0, sizeof(struct ethtool_coalesce));
10858
10859         coal->rx_coalesce_usecs = bp->rx_ticks;
10860         coal->tx_coalesce_usecs = bp->tx_ticks;
10861
10862         return 0;
10863 }
10864
10865 static int bnx2x_set_coalesce(struct net_device *dev,
10866                               struct ethtool_coalesce *coal)
10867 {
10868         struct bnx2x *bp = netdev_priv(dev);
10869
10870         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10871         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10872                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10873
10874         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10875         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10876                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10877
10878         if (netif_running(dev))
10879                 bnx2x_update_coalesce(bp);
10880
10881         return 0;
10882 }
10883
10884 static void bnx2x_get_ringparam(struct net_device *dev,
10885                                 struct ethtool_ringparam *ering)
10886 {
10887         struct bnx2x *bp = netdev_priv(dev);
10888
10889         ering->rx_max_pending = MAX_RX_AVAIL;
10890         ering->rx_mini_max_pending = 0;
10891         ering->rx_jumbo_max_pending = 0;
10892
10893         ering->rx_pending = bp->rx_ring_size;
10894         ering->rx_mini_pending = 0;
10895         ering->rx_jumbo_pending = 0;
10896
10897         ering->tx_max_pending = MAX_TX_AVAIL;
10898         ering->tx_pending = bp->tx_ring_size;
10899 }
10900
10901 static int bnx2x_set_ringparam(struct net_device *dev,
10902                                struct ethtool_ringparam *ering)
10903 {
10904         struct bnx2x *bp = netdev_priv(dev);
10905         int rc = 0;
10906
10907         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10908                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10909                 return -EAGAIN;
10910         }
10911
10912         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10913             (ering->tx_pending > MAX_TX_AVAIL) ||
10914             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10915                 return -EINVAL;
10916
10917         bp->rx_ring_size = ering->rx_pending;
10918         bp->tx_ring_size = ering->tx_pending;
10919
10920         if (netif_running(dev)) {
10921                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10922                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10923         }
10924
10925         return rc;
10926 }
10927
10928 static void bnx2x_get_pauseparam(struct net_device *dev,
10929                                  struct ethtool_pauseparam *epause)
10930 {
10931         struct bnx2x *bp = netdev_priv(dev);
10932
10933         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10934                            BNX2X_FLOW_CTRL_AUTO) &&
10935                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10936
10937         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10938                             BNX2X_FLOW_CTRL_RX);
10939         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10940                             BNX2X_FLOW_CTRL_TX);
10941
10942         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10943            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10944            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10945 }
10946
10947 static int bnx2x_set_pauseparam(struct net_device *dev,
10948                                 struct ethtool_pauseparam *epause)
10949 {
10950         struct bnx2x *bp = netdev_priv(dev);
10951
10952         if (IS_E1HMF(bp))
10953                 return 0;
10954
10955         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10956            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10957            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10958
10959         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10960
10961         if (epause->rx_pause)
10962                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10963
10964         if (epause->tx_pause)
10965                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10966
10967         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10968                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10969
10970         if (epause->autoneg) {
10971                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10972                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10973                         return -EINVAL;
10974                 }
10975
10976                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10977                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10978         }
10979
10980         DP(NETIF_MSG_LINK,
10981            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10982
10983         if (netif_running(dev)) {
10984                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10985                 bnx2x_link_set(bp);
10986         }
10987
10988         return 0;
10989 }
10990
10991 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10992 {
10993         struct bnx2x *bp = netdev_priv(dev);
10994         int changed = 0;
10995         int rc = 0;
10996
10997         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10998                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10999                 return -EAGAIN;
11000         }
11001
11002         /* TPA requires Rx CSUM offloading */
11003         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
11004                 if (!disable_tpa) {
11005                         if (!(dev->features & NETIF_F_LRO)) {
11006                                 dev->features |= NETIF_F_LRO;
11007                                 bp->flags |= TPA_ENABLE_FLAG;
11008                                 changed = 1;
11009                         }
11010                 } else
11011                         rc = -EINVAL;
11012         } else if (dev->features & NETIF_F_LRO) {
11013                 dev->features &= ~NETIF_F_LRO;
11014                 bp->flags &= ~TPA_ENABLE_FLAG;
11015                 changed = 1;
11016         }
11017
11018         if (data & ETH_FLAG_RXHASH)
11019                 dev->features |= NETIF_F_RXHASH;
11020         else
11021                 dev->features &= ~NETIF_F_RXHASH;
11022
11023         if (changed && netif_running(dev)) {
11024                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11025                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11026         }
11027
11028         return rc;
11029 }
11030
11031 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11032 {
11033         struct bnx2x *bp = netdev_priv(dev);
11034
11035         return bp->rx_csum;
11036 }
11037
11038 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11039 {
11040         struct bnx2x *bp = netdev_priv(dev);
11041         int rc = 0;
11042
11043         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11044                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11045                 return -EAGAIN;
11046         }
11047
11048         bp->rx_csum = data;
11049
11050         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11051            TPA'ed packets will be discarded due to wrong TCP CSUM */
11052         if (!data) {
11053                 u32 flags = ethtool_op_get_flags(dev);
11054
11055                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11056         }
11057
11058         return rc;
11059 }
11060
11061 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11062 {
11063         if (data) {
11064                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11065                 dev->features |= NETIF_F_TSO6;
11066         } else {
11067                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11068                 dev->features &= ~NETIF_F_TSO6;
11069         }
11070
11071         return 0;
11072 }
11073
11074 static const struct {
11075         char string[ETH_GSTRING_LEN];
11076 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11077         { "register_test (offline)" },
11078         { "memory_test (offline)" },
11079         { "loopback_test (offline)" },
11080         { "nvram_test (online)" },
11081         { "interrupt_test (online)" },
11082         { "link_test (online)" },
11083         { "idle check (online)" }
11084 };
11085
11086 static int bnx2x_test_registers(struct bnx2x *bp)
11087 {
11088         int idx, i, rc = -ENODEV;
11089         u32 wr_val = 0;
11090         int port = BP_PORT(bp);
11091         static const struct {
11092                 u32 offset0;
11093                 u32 offset1;
11094                 u32 mask;
11095         } reg_tbl[] = {
11096 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11097                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11098                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11099                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11100                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11101                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11102                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11103                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11104                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11105                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11106 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11107                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11108                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11109                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11110                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11111                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11112                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11113                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11114                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11115                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11116 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11117                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11118                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11119                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11120                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11121                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11122                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11123                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11124                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11125                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11126 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11127                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11128                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11129                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11130                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11131                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11132                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11133
11134                 { 0xffffffff, 0, 0x00000000 }
11135         };
11136
11137         if (!netif_running(bp->dev))
11138                 return rc;
11139
11140         /* Repeat the test twice:
11141            First by writing 0x00000000, second by writing 0xffffffff */
11142         for (idx = 0; idx < 2; idx++) {
11143
11144                 switch (idx) {
11145                 case 0:
11146                         wr_val = 0;
11147                         break;
11148                 case 1:
11149                         wr_val = 0xffffffff;
11150                         break;
11151                 }
11152
11153                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11154                         u32 offset, mask, save_val, val;
11155
11156                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11157                         mask = reg_tbl[i].mask;
11158
11159                         save_val = REG_RD(bp, offset);
11160
11161                         REG_WR(bp, offset, (wr_val & mask));
11162                         val = REG_RD(bp, offset);
11163
11164                         /* Restore the original register's value */
11165                         REG_WR(bp, offset, save_val);
11166
11167                         /* verify value is as expected */
11168                         if ((val & mask) != (wr_val & mask)) {
11169                                 DP(NETIF_MSG_PROBE,
11170                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11171                                    offset, val, wr_val, mask);
11172                                 goto test_reg_exit;
11173                         }
11174                 }
11175         }
11176
11177         rc = 0;
11178
11179 test_reg_exit:
11180         return rc;
11181 }
11182
11183 static int bnx2x_test_memory(struct bnx2x *bp)
11184 {
11185         int i, j, rc = -ENODEV;
11186         u32 val;
11187         static const struct {
11188                 u32 offset;
11189                 int size;
11190         } mem_tbl[] = {
11191                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11192                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11193                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11194                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11195                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11196                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11197                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11198
11199                 { 0xffffffff, 0 }
11200         };
11201         static const struct {
11202                 char *name;
11203                 u32 offset;
11204                 u32 e1_mask;
11205                 u32 e1h_mask;
11206         } prty_tbl[] = {
11207                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11208                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11209                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11210                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11211                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11212                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11213
11214                 { NULL, 0xffffffff, 0, 0 }
11215         };
11216
11217         if (!netif_running(bp->dev))
11218                 return rc;
11219
11220         /* Go through all the memories */
11221         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11222                 for (j = 0; j < mem_tbl[i].size; j++)
11223                         REG_RD(bp, mem_tbl[i].offset + j*4);
11224
11225         /* Check the parity status */
11226         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11227                 val = REG_RD(bp, prty_tbl[i].offset);
11228                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11229                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11230                         DP(NETIF_MSG_HW,
11231                            "%s is 0x%x\n", prty_tbl[i].name, val);
11232                         goto test_mem_exit;
11233                 }
11234         }
11235
11236         rc = 0;
11237
11238 test_mem_exit:
11239         return rc;
11240 }
11241
11242 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11243 {
11244         int cnt = 1000;
11245
11246         if (link_up)
11247                 while (bnx2x_link_test(bp) && cnt--)
11248                         msleep(10);
11249 }
11250
11251 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11252 {
11253         unsigned int pkt_size, num_pkts, i;
11254         struct sk_buff *skb;
11255         unsigned char *packet;
11256         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11257         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11258         u16 tx_start_idx, tx_idx;
11259         u16 rx_start_idx, rx_idx;
11260         u16 pkt_prod, bd_prod;
11261         struct sw_tx_bd *tx_buf;
11262         struct eth_tx_start_bd *tx_start_bd;
11263         struct eth_tx_parse_bd *pbd = NULL;
11264         dma_addr_t mapping;
11265         union eth_rx_cqe *cqe;
11266         u8 cqe_fp_flags;
11267         struct sw_rx_bd *rx_buf;
11268         u16 len;
11269         int rc = -ENODEV;
11270
11271         /* check the loopback mode */
11272         switch (loopback_mode) {
11273         case BNX2X_PHY_LOOPBACK:
11274                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11275                         return -EINVAL;
11276                 break;
11277         case BNX2X_MAC_LOOPBACK:
11278                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11279                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11280                 break;
11281         default:
11282                 return -EINVAL;
11283         }
11284
11285         /* prepare the loopback packet */
11286         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11287                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11288         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11289         if (!skb) {
11290                 rc = -ENOMEM;
11291                 goto test_loopback_exit;
11292         }
11293         packet = skb_put(skb, pkt_size);
11294         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11295         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11296         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11297         for (i = ETH_HLEN; i < pkt_size; i++)
11298                 packet[i] = (unsigned char) (i & 0xff);
11299
11300         /* send the loopback packet */
11301         num_pkts = 0;
11302         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11303         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11304
11305         pkt_prod = fp_tx->tx_pkt_prod++;
11306         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11307         tx_buf->first_bd = fp_tx->tx_bd_prod;
11308         tx_buf->skb = skb;
11309         tx_buf->flags = 0;
11310
11311         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11312         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11313         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11314                                  skb_headlen(skb), DMA_TO_DEVICE);
11315         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11316         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11317         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11318         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11319         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11320         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11321         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11322                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11323
11324         /* turn on parsing and get a BD */
11325         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11326         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11327
11328         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11329
11330         wmb();
11331
11332         fp_tx->tx_db.data.prod += 2;
11333         barrier();
11334         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11335
11336         mmiowb();
11337
11338         num_pkts++;
11339         fp_tx->tx_bd_prod += 2; /* start + pbd */
11340
11341         udelay(100);
11342
11343         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11344         if (tx_idx != tx_start_idx + num_pkts)
11345                 goto test_loopback_exit;
11346
11347         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11348         if (rx_idx != rx_start_idx + num_pkts)
11349                 goto test_loopback_exit;
11350
11351         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11352         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11353         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11354                 goto test_loopback_rx_exit;
11355
11356         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11357         if (len != pkt_size)
11358                 goto test_loopback_rx_exit;
11359
11360         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11361         skb = rx_buf->skb;
11362         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11363         for (i = ETH_HLEN; i < pkt_size; i++)
11364                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11365                         goto test_loopback_rx_exit;
11366
11367         rc = 0;
11368
11369 test_loopback_rx_exit:
11370
11371         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11372         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11373         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11374         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11375
11376         /* Update producers */
11377         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11378                              fp_rx->rx_sge_prod);
11379
11380 test_loopback_exit:
11381         bp->link_params.loopback_mode = LOOPBACK_NONE;
11382
11383         return rc;
11384 }
11385
11386 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11387 {
11388         int rc = 0, res;
11389
11390         if (BP_NOMCP(bp))
11391                 return rc;
11392
11393         if (!netif_running(bp->dev))
11394                 return BNX2X_LOOPBACK_FAILED;
11395
11396         bnx2x_netif_stop(bp, 1);
11397         bnx2x_acquire_phy_lock(bp);
11398
11399         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11400         if (res) {
11401                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11402                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11403         }
11404
11405         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11406         if (res) {
11407                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11408                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11409         }
11410
11411         bnx2x_release_phy_lock(bp);
11412         bnx2x_netif_start(bp);
11413
11414         return rc;
11415 }
11416
11417 #define CRC32_RESIDUAL                  0xdebb20e3
11418
11419 static int bnx2x_test_nvram(struct bnx2x *bp)
11420 {
11421         static const struct {
11422                 int offset;
11423                 int size;
11424         } nvram_tbl[] = {
11425                 {     0,  0x14 }, /* bootstrap */
11426                 {  0x14,  0xec }, /* dir */
11427                 { 0x100, 0x350 }, /* manuf_info */
11428                 { 0x450,  0xf0 }, /* feature_info */
11429                 { 0x640,  0x64 }, /* upgrade_key_info */
11430                 { 0x6a4,  0x64 },
11431                 { 0x708,  0x70 }, /* manuf_key_info */
11432                 { 0x778,  0x70 },
11433                 {     0,     0 }
11434         };
11435         __be32 buf[0x350 / 4];
11436         u8 *data = (u8 *)buf;
11437         int i, rc;
11438         u32 magic, crc;
11439
11440         if (BP_NOMCP(bp))
11441                 return 0;
11442
11443         rc = bnx2x_nvram_read(bp, 0, data, 4);
11444         if (rc) {
11445                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11446                 goto test_nvram_exit;
11447         }
11448
11449         magic = be32_to_cpu(buf[0]);
11450         if (magic != 0x669955aa) {
11451                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11452                 rc = -ENODEV;
11453                 goto test_nvram_exit;
11454         }
11455
11456         for (i = 0; nvram_tbl[i].size; i++) {
11457
11458                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11459                                       nvram_tbl[i].size);
11460                 if (rc) {
11461                         DP(NETIF_MSG_PROBE,
11462                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11463                         goto test_nvram_exit;
11464                 }
11465
11466                 crc = ether_crc_le(nvram_tbl[i].size, data);
11467                 if (crc != CRC32_RESIDUAL) {
11468                         DP(NETIF_MSG_PROBE,
11469                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11470                         rc = -ENODEV;
11471                         goto test_nvram_exit;
11472                 }
11473         }
11474
11475 test_nvram_exit:
11476         return rc;
11477 }
11478
11479 static int bnx2x_test_intr(struct bnx2x *bp)
11480 {
11481         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11482         int i, rc;
11483
11484         if (!netif_running(bp->dev))
11485                 return -ENODEV;
11486
11487         config->hdr.length = 0;
11488         if (CHIP_IS_E1(bp))
11489                 /* use last unicast entries */
11490                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11491         else
11492                 config->hdr.offset = BP_FUNC(bp);
11493         config->hdr.client_id = bp->fp->cl_id;
11494         config->hdr.reserved1 = 0;
11495
11496         bp->set_mac_pending++;
11497         smp_wmb();
11498         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11499                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11500                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11501         if (rc == 0) {
11502                 for (i = 0; i < 10; i++) {
11503                         if (!bp->set_mac_pending)
11504                                 break;
11505                         smp_rmb();
11506                         msleep_interruptible(10);
11507                 }
11508                 if (i == 10)
11509                         rc = -ENODEV;
11510         }
11511
11512         return rc;
11513 }
11514
11515 static void bnx2x_self_test(struct net_device *dev,
11516                             struct ethtool_test *etest, u64 *buf)
11517 {
11518         struct bnx2x *bp = netdev_priv(dev);
11519
11520         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11521                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11522                 etest->flags |= ETH_TEST_FL_FAILED;
11523                 return;
11524         }
11525
11526         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11527
11528         if (!netif_running(dev))
11529                 return;
11530
11531         /* offline tests are not supported in MF mode */
11532         if (IS_E1HMF(bp))
11533                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11534
11535         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11536                 int port = BP_PORT(bp);
11537                 u32 val;
11538                 u8 link_up;
11539
11540                 /* save current value of input enable for TX port IF */
11541                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11542                 /* disable input for TX port IF */
11543                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11544
11545                 link_up = (bnx2x_link_test(bp) == 0);
11546                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11547                 bnx2x_nic_load(bp, LOAD_DIAG);
11548                 /* wait until link state is restored */
11549                 bnx2x_wait_for_link(bp, link_up);
11550
11551                 if (bnx2x_test_registers(bp) != 0) {
11552                         buf[0] = 1;
11553                         etest->flags |= ETH_TEST_FL_FAILED;
11554                 }
11555                 if (bnx2x_test_memory(bp) != 0) {
11556                         buf[1] = 1;
11557                         etest->flags |= ETH_TEST_FL_FAILED;
11558                 }
11559                 buf[2] = bnx2x_test_loopback(bp, link_up);
11560                 if (buf[2] != 0)
11561                         etest->flags |= ETH_TEST_FL_FAILED;
11562
11563                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11564
11565                 /* restore input for TX port IF */
11566                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11567
11568                 bnx2x_nic_load(bp, LOAD_NORMAL);
11569                 /* wait until link state is restored */
11570                 bnx2x_wait_for_link(bp, link_up);
11571         }
11572         if (bnx2x_test_nvram(bp) != 0) {
11573                 buf[3] = 1;
11574                 etest->flags |= ETH_TEST_FL_FAILED;
11575         }
11576         if (bnx2x_test_intr(bp) != 0) {
11577                 buf[4] = 1;
11578                 etest->flags |= ETH_TEST_FL_FAILED;
11579         }
11580         if (bp->port.pmf)
11581                 if (bnx2x_link_test(bp) != 0) {
11582                         buf[5] = 1;
11583                         etest->flags |= ETH_TEST_FL_FAILED;
11584                 }
11585
11586 #ifdef BNX2X_EXTRA_DEBUG
11587         bnx2x_panic_dump(bp);
11588 #endif
11589 }
11590
11591 static const struct {
11592         long offset;
11593         int size;
11594         u8 string[ETH_GSTRING_LEN];
11595 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11596 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11597         { Q_STATS_OFFSET32(error_bytes_received_hi),
11598                                                 8, "[%d]: rx_error_bytes" },
11599         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11600                                                 8, "[%d]: rx_ucast_packets" },
11601         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11602                                                 8, "[%d]: rx_mcast_packets" },
11603         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11604                                                 8, "[%d]: rx_bcast_packets" },
11605         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11606         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11607                                          4, "[%d]: rx_phy_ip_err_discards"},
11608         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11609                                          4, "[%d]: rx_skb_alloc_discard" },
11610         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11611
11612 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11613         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11614                                                 8, "[%d]: tx_ucast_packets" },
11615         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11616                                                 8, "[%d]: tx_mcast_packets" },
11617         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11618                                                 8, "[%d]: tx_bcast_packets" }
11619 };
11620
11621 static const struct {
11622         long offset;
11623         int size;
11624         u32 flags;
11625 #define STATS_FLAGS_PORT                1
11626 #define STATS_FLAGS_FUNC                2
11627 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11628         u8 string[ETH_GSTRING_LEN];
11629 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11630 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11631                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11632         { STATS_OFFSET32(error_bytes_received_hi),
11633                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11634         { STATS_OFFSET32(total_unicast_packets_received_hi),
11635                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11636         { STATS_OFFSET32(total_multicast_packets_received_hi),
11637                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11638         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11639                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11640         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11641                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11642         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11643                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11644         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11645                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11646         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11647                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11648 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11649                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11650         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11651                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11652         { STATS_OFFSET32(no_buff_discard_hi),
11653                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11654         { STATS_OFFSET32(mac_filter_discard),
11655                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11656         { STATS_OFFSET32(xxoverflow_discard),
11657                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11658         { STATS_OFFSET32(brb_drop_hi),
11659                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11660         { STATS_OFFSET32(brb_truncate_hi),
11661                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11662         { STATS_OFFSET32(pause_frames_received_hi),
11663                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11664         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11665                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11666         { STATS_OFFSET32(nig_timer_max),
11667                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11668 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11669                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11670         { STATS_OFFSET32(rx_skb_alloc_failed),
11671                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11672         { STATS_OFFSET32(hw_csum_err),
11673                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11674
11675         { STATS_OFFSET32(total_bytes_transmitted_hi),
11676                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11677         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11678                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11679         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11680                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11681         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11682                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11683         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11684                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11685         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11686                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11687         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11688                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11689 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11690                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11691         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11692                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11693         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11694                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11695         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11696                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11697         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11698                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11699         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11700                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11701         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11702                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11703         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11704                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11705         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11706                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11707         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11708                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11709 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11710                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11711         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11712                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11713         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11714                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11715         { STATS_OFFSET32(pause_frames_sent_hi),
11716                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11717 };
11718
11719 #define IS_PORT_STAT(i) \
11720         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11721 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11722 #define IS_E1HMF_MODE_STAT(bp) \
11723                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11724
11725 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11726 {
11727         struct bnx2x *bp = netdev_priv(dev);
11728         int i, num_stats;
11729
11730         switch (stringset) {
11731         case ETH_SS_STATS:
11732                 if (is_multi(bp)) {
11733                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11734                         if (!IS_E1HMF_MODE_STAT(bp))
11735                                 num_stats += BNX2X_NUM_STATS;
11736                 } else {
11737                         if (IS_E1HMF_MODE_STAT(bp)) {
11738                                 num_stats = 0;
11739                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11740                                         if (IS_FUNC_STAT(i))
11741                                                 num_stats++;
11742                         } else
11743                                 num_stats = BNX2X_NUM_STATS;
11744                 }
11745                 return num_stats;
11746
11747         case ETH_SS_TEST:
11748                 return BNX2X_NUM_TESTS;
11749
11750         default:
11751                 return -EINVAL;
11752         }
11753 }
11754
11755 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11756 {
11757         struct bnx2x *bp = netdev_priv(dev);
11758         int i, j, k;
11759
11760         switch (stringset) {
11761         case ETH_SS_STATS:
11762                 if (is_multi(bp)) {
11763                         k = 0;
11764                         for_each_queue(bp, i) {
11765                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11766                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11767                                                 bnx2x_q_stats_arr[j].string, i);
11768                                 k += BNX2X_NUM_Q_STATS;
11769                         }
11770                         if (IS_E1HMF_MODE_STAT(bp))
11771                                 break;
11772                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11773                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11774                                        bnx2x_stats_arr[j].string);
11775                 } else {
11776                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11777                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11778                                         continue;
11779                                 strcpy(buf + j*ETH_GSTRING_LEN,
11780                                        bnx2x_stats_arr[i].string);
11781                                 j++;
11782                         }
11783                 }
11784                 break;
11785
11786         case ETH_SS_TEST:
11787                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11788                 break;
11789         }
11790 }
11791
11792 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11793                                     struct ethtool_stats *stats, u64 *buf)
11794 {
11795         struct bnx2x *bp = netdev_priv(dev);
11796         u32 *hw_stats, *offset;
11797         int i, j, k;
11798
11799         if (is_multi(bp)) {
11800                 k = 0;
11801                 for_each_queue(bp, i) {
11802                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11803                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11804                                 if (bnx2x_q_stats_arr[j].size == 0) {
11805                                         /* skip this counter */
11806                                         buf[k + j] = 0;
11807                                         continue;
11808                                 }
11809                                 offset = (hw_stats +
11810                                           bnx2x_q_stats_arr[j].offset);
11811                                 if (bnx2x_q_stats_arr[j].size == 4) {
11812                                         /* 4-byte counter */
11813                                         buf[k + j] = (u64) *offset;
11814                                         continue;
11815                                 }
11816                                 /* 8-byte counter */
11817                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11818                         }
11819                         k += BNX2X_NUM_Q_STATS;
11820                 }
11821                 if (IS_E1HMF_MODE_STAT(bp))
11822                         return;
11823                 hw_stats = (u32 *)&bp->eth_stats;
11824                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11825                         if (bnx2x_stats_arr[j].size == 0) {
11826                                 /* skip this counter */
11827                                 buf[k + j] = 0;
11828                                 continue;
11829                         }
11830                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11831                         if (bnx2x_stats_arr[j].size == 4) {
11832                                 /* 4-byte counter */
11833                                 buf[k + j] = (u64) *offset;
11834                                 continue;
11835                         }
11836                         /* 8-byte counter */
11837                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11838                 }
11839         } else {
11840                 hw_stats = (u32 *)&bp->eth_stats;
11841                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11842                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11843                                 continue;
11844                         if (bnx2x_stats_arr[i].size == 0) {
11845                                 /* skip this counter */
11846                                 buf[j] = 0;
11847                                 j++;
11848                                 continue;
11849                         }
11850                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11851                         if (bnx2x_stats_arr[i].size == 4) {
11852                                 /* 4-byte counter */
11853                                 buf[j] = (u64) *offset;
11854                                 j++;
11855                                 continue;
11856                         }
11857                         /* 8-byte counter */
11858                         buf[j] = HILO_U64(*offset, *(offset + 1));
11859                         j++;
11860                 }
11861         }
11862 }
11863
11864 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11865 {
11866         struct bnx2x *bp = netdev_priv(dev);
11867         int i;
11868
11869         if (!netif_running(dev))
11870                 return 0;
11871
11872         if (!bp->port.pmf)
11873                 return 0;
11874
11875         if (data == 0)
11876                 data = 2;
11877
11878         for (i = 0; i < (data * 2); i++) {
11879                 if ((i % 2) == 0)
11880                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11881                                       SPEED_1000);
11882                 else
11883                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11884
11885                 msleep_interruptible(500);
11886                 if (signal_pending(current))
11887                         break;
11888         }
11889
11890         if (bp->link_vars.link_up)
11891                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11892                               bp->link_vars.line_speed);
11893
11894         return 0;
11895 }
11896
11897 static const struct ethtool_ops bnx2x_ethtool_ops = {
11898         .get_settings           = bnx2x_get_settings,
11899         .set_settings           = bnx2x_set_settings,
11900         .get_drvinfo            = bnx2x_get_drvinfo,
11901         .get_regs_len           = bnx2x_get_regs_len,
11902         .get_regs               = bnx2x_get_regs,
11903         .get_wol                = bnx2x_get_wol,
11904         .set_wol                = bnx2x_set_wol,
11905         .get_msglevel           = bnx2x_get_msglevel,
11906         .set_msglevel           = bnx2x_set_msglevel,
11907         .nway_reset             = bnx2x_nway_reset,
11908         .get_link               = bnx2x_get_link,
11909         .get_eeprom_len         = bnx2x_get_eeprom_len,
11910         .get_eeprom             = bnx2x_get_eeprom,
11911         .set_eeprom             = bnx2x_set_eeprom,
11912         .get_coalesce           = bnx2x_get_coalesce,
11913         .set_coalesce           = bnx2x_set_coalesce,
11914         .get_ringparam          = bnx2x_get_ringparam,
11915         .set_ringparam          = bnx2x_set_ringparam,
11916         .get_pauseparam         = bnx2x_get_pauseparam,
11917         .set_pauseparam         = bnx2x_set_pauseparam,
11918         .get_rx_csum            = bnx2x_get_rx_csum,
11919         .set_rx_csum            = bnx2x_set_rx_csum,
11920         .get_tx_csum            = ethtool_op_get_tx_csum,
11921         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11922         .set_flags              = bnx2x_set_flags,
11923         .get_flags              = ethtool_op_get_flags,
11924         .get_sg                 = ethtool_op_get_sg,
11925         .set_sg                 = ethtool_op_set_sg,
11926         .get_tso                = ethtool_op_get_tso,
11927         .set_tso                = bnx2x_set_tso,
11928         .self_test              = bnx2x_self_test,
11929         .get_sset_count         = bnx2x_get_sset_count,
11930         .get_strings            = bnx2x_get_strings,
11931         .phys_id                = bnx2x_phys_id,
11932         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11933 };
11934
11935 /* end of ethtool_ops */
11936
11937 /****************************************************************************
11938 * General service functions
11939 ****************************************************************************/
11940
11941 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11942 {
11943         u16 pmcsr;
11944
11945         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11946
11947         switch (state) {
11948         case PCI_D0:
11949                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11950                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11951                                        PCI_PM_CTRL_PME_STATUS));
11952
11953                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11954                         /* delay required during transition out of D3hot */
11955                         msleep(20);
11956                 break;
11957
11958         case PCI_D3hot:
11959                 /* If there are other clients above don't
11960                    shut down the power */
11961                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11962                         return 0;
11963                 /* Don't shut down the power for emulation and FPGA */
11964                 if (CHIP_REV_IS_SLOW(bp))
11965                         return 0;
11966
11967                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11968                 pmcsr |= 3;
11969
11970                 if (bp->wol)
11971                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11972
11973                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11974                                       pmcsr);
11975
11976                 /* No more memory access after this point until
11977                 * device is brought back to D0.
11978                 */
11979                 break;
11980
11981         default:
11982                 return -EINVAL;
11983         }
11984         return 0;
11985 }
11986
11987 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11988 {
11989         u16 rx_cons_sb;
11990
11991         /* Tell compiler that status block fields can change */
11992         barrier();
11993         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11994         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11995                 rx_cons_sb++;
11996         return (fp->rx_comp_cons != rx_cons_sb);
11997 }
11998
11999 /*
12000  * net_device service functions
12001  */
12002
12003 static int bnx2x_poll(struct napi_struct *napi, int budget)
12004 {
12005         int work_done = 0;
12006         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12007                                                  napi);
12008         struct bnx2x *bp = fp->bp;
12009
12010         while (1) {
12011 #ifdef BNX2X_STOP_ON_ERROR
12012                 if (unlikely(bp->panic)) {
12013                         napi_complete(napi);
12014                         return 0;
12015                 }
12016 #endif
12017
12018                 if (bnx2x_has_tx_work(fp))
12019                         bnx2x_tx_int(fp);
12020
12021                 if (bnx2x_has_rx_work(fp)) {
12022                         work_done += bnx2x_rx_int(fp, budget - work_done);
12023
12024                         /* must not complete if we consumed full budget */
12025                         if (work_done >= budget)
12026                                 break;
12027                 }
12028
12029                 /* Fall out from the NAPI loop if needed */
12030                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12031                         bnx2x_update_fpsb_idx(fp);
12032                 /* bnx2x_has_rx_work() reads the status block, thus we need
12033                  * to ensure that status block indices have been actually read
12034                  * (bnx2x_update_fpsb_idx) prior to this check
12035                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12036                  * value of the status block to IGU (if there was a DMA right
12037                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12038                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12039                  * before bnx2x_ack_sb). In this case there will never be
12040                  * another interrupt until there is another update of the
12041                  * status block, while there is still unhandled work.
12042                  */
12043                         rmb();
12044
12045                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12046                                 napi_complete(napi);
12047                                 /* Re-enable interrupts */
12048                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12049                                              le16_to_cpu(fp->fp_c_idx),
12050                                              IGU_INT_NOP, 1);
12051                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12052                                              le16_to_cpu(fp->fp_u_idx),
12053                                              IGU_INT_ENABLE, 1);
12054                                 break;
12055                         }
12056                 }
12057         }
12058
12059         return work_done;
12060 }
12061
12062
12063 /* we split the first BD into headers and data BDs
12064  * to ease the pain of our fellow microcode engineers
12065  * we use one mapping for both BDs
12066  * So far this has only been observed to happen
12067  * in Other Operating Systems(TM)
12068  */
12069 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12070                                    struct bnx2x_fastpath *fp,
12071                                    struct sw_tx_bd *tx_buf,
12072                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12073                                    u16 bd_prod, int nbd)
12074 {
12075         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12076         struct eth_tx_bd *d_tx_bd;
12077         dma_addr_t mapping;
12078         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12079
12080         /* first fix first BD */
12081         h_tx_bd->nbd = cpu_to_le16(nbd);
12082         h_tx_bd->nbytes = cpu_to_le16(hlen);
12083
12084         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12085            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12086            h_tx_bd->addr_lo, h_tx_bd->nbd);
12087
12088         /* now get a new data BD
12089          * (after the pbd) and fill it */
12090         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12091         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12092
12093         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12094                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12095
12096         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12097         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12098         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12099
12100         /* this marks the BD as one that has no individual mapping */
12101         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12102
12103         DP(NETIF_MSG_TX_QUEUED,
12104            "TSO split data size is %d (%x:%x)\n",
12105            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12106
12107         /* update tx_bd */
12108         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12109
12110         return bd_prod;
12111 }
12112
12113 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12114 {
12115         if (fix > 0)
12116                 csum = (u16) ~csum_fold(csum_sub(csum,
12117                                 csum_partial(t_header - fix, fix, 0)));
12118
12119         else if (fix < 0)
12120                 csum = (u16) ~csum_fold(csum_add(csum,
12121                                 csum_partial(t_header, -fix, 0)));
12122
12123         return swab16(csum);
12124 }
12125
12126 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12127 {
12128         u32 rc;
12129
12130         if (skb->ip_summed != CHECKSUM_PARTIAL)
12131                 rc = XMIT_PLAIN;
12132
12133         else {
12134                 if (skb->protocol == htons(ETH_P_IPV6)) {
12135                         rc = XMIT_CSUM_V6;
12136                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12137                                 rc |= XMIT_CSUM_TCP;
12138
12139                 } else {
12140                         rc = XMIT_CSUM_V4;
12141                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12142                                 rc |= XMIT_CSUM_TCP;
12143                 }
12144         }
12145
12146         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12147                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12148
12149         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12150                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12151
12152         return rc;
12153 }
12154
12155 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12156 /* check if packet requires linearization (packet is too fragmented)
12157    no need to check fragmentation if page size > 8K (there will be no
12158    violation to FW restrictions) */
12159 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12160                              u32 xmit_type)
12161 {
12162         int to_copy = 0;
12163         int hlen = 0;
12164         int first_bd_sz = 0;
12165
12166         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12167         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12168
12169                 if (xmit_type & XMIT_GSO) {
12170                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12171                         /* Check if LSO packet needs to be copied:
12172                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12173                         int wnd_size = MAX_FETCH_BD - 3;
12174                         /* Number of windows to check */
12175                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12176                         int wnd_idx = 0;
12177                         int frag_idx = 0;
12178                         u32 wnd_sum = 0;
12179
12180                         /* Headers length */
12181                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12182                                 tcp_hdrlen(skb);
12183
12184                         /* Amount of data (w/o headers) on linear part of SKB*/
12185                         first_bd_sz = skb_headlen(skb) - hlen;
12186
12187                         wnd_sum  = first_bd_sz;
12188
12189                         /* Calculate the first sum - it's special */
12190                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12191                                 wnd_sum +=
12192                                         skb_shinfo(skb)->frags[frag_idx].size;
12193
12194                         /* If there was data on linear skb data - check it */
12195                         if (first_bd_sz > 0) {
12196                                 if (unlikely(wnd_sum < lso_mss)) {
12197                                         to_copy = 1;
12198                                         goto exit_lbl;
12199                                 }
12200
12201                                 wnd_sum -= first_bd_sz;
12202                         }
12203
12204                         /* Others are easier: run through the frag list and
12205                            check all windows */
12206                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12207                                 wnd_sum +=
12208                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12209
12210                                 if (unlikely(wnd_sum < lso_mss)) {
12211                                         to_copy = 1;
12212                                         break;
12213                                 }
12214                                 wnd_sum -=
12215                                         skb_shinfo(skb)->frags[wnd_idx].size;
12216                         }
12217                 } else {
12218                         /* in non-LSO too fragmented packet should always
12219                            be linearized */
12220                         to_copy = 1;
12221                 }
12222         }
12223
12224 exit_lbl:
12225         if (unlikely(to_copy))
12226                 DP(NETIF_MSG_TX_QUEUED,
12227                    "Linearization IS REQUIRED for %s packet. "
12228                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12229                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12230                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12231
12232         return to_copy;
12233 }
12234 #endif
12235
12236 /* called with netif_tx_lock
12237  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12238  * netif_wake_queue()
12239  */
12240 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12241 {
12242         struct bnx2x *bp = netdev_priv(dev);
12243         struct bnx2x_fastpath *fp;
12244         struct netdev_queue *txq;
12245         struct sw_tx_bd *tx_buf;
12246         struct eth_tx_start_bd *tx_start_bd;
12247         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12248         struct eth_tx_parse_bd *pbd = NULL;
12249         u16 pkt_prod, bd_prod;
12250         int nbd, fp_index;
12251         dma_addr_t mapping;
12252         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12253         int i;
12254         u8 hlen = 0;
12255         __le16 pkt_size = 0;
12256         struct ethhdr *eth;
12257         u8 mac_type = UNICAST_ADDRESS;
12258
12259 #ifdef BNX2X_STOP_ON_ERROR
12260         if (unlikely(bp->panic))
12261                 return NETDEV_TX_BUSY;
12262 #endif
12263
12264         fp_index = skb_get_queue_mapping(skb);
12265         txq = netdev_get_tx_queue(dev, fp_index);
12266
12267         fp = &bp->fp[fp_index];
12268
12269         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12270                 fp->eth_q_stats.driver_xoff++;
12271                 netif_tx_stop_queue(txq);
12272                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12273                 return NETDEV_TX_BUSY;
12274         }
12275
12276         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12277            "  gso type %x  xmit_type %x\n",
12278            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12279            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12280
12281         eth = (struct ethhdr *)skb->data;
12282
12283         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12284         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12285                 if (is_broadcast_ether_addr(eth->h_dest))
12286                         mac_type = BROADCAST_ADDRESS;
12287                 else
12288                         mac_type = MULTICAST_ADDRESS;
12289         }
12290
12291 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12292         /* First, check if we need to linearize the skb (due to FW
12293            restrictions). No need to check fragmentation if page size > 8K
12294            (there will be no violation to FW restrictions) */
12295         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12296                 /* Statistics of linearization */
12297                 bp->lin_cnt++;
12298                 if (skb_linearize(skb) != 0) {
12299                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12300                            "silently dropping this SKB\n");
12301                         dev_kfree_skb_any(skb);
12302                         return NETDEV_TX_OK;
12303                 }
12304         }
12305 #endif
12306
12307         /*
12308         Please read carefully. First we use one BD which we mark as start,
12309         then we have a parsing info BD (used for TSO or xsum),
12310         and only then we have the rest of the TSO BDs.
12311         (don't forget to mark the last one as last,
12312         and to unmap only AFTER you write to the BD ...)
12313         And above all, all pdb sizes are in words - NOT DWORDS!
12314         */
12315
12316         pkt_prod = fp->tx_pkt_prod++;
12317         bd_prod = TX_BD(fp->tx_bd_prod);
12318
12319         /* get a tx_buf and first BD */
12320         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12321         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12322
12323         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12324         tx_start_bd->general_data =  (mac_type <<
12325                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12326         /* header nbd */
12327         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12328
12329         /* remember the first BD of the packet */
12330         tx_buf->first_bd = fp->tx_bd_prod;
12331         tx_buf->skb = skb;
12332         tx_buf->flags = 0;
12333
12334         DP(NETIF_MSG_TX_QUEUED,
12335            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12336            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12337
12338 #ifdef BCM_VLAN
12339         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12340             (bp->flags & HW_VLAN_TX_FLAG)) {
12341                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12342                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12343         } else
12344 #endif
12345                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12346
12347         /* turn on parsing and get a BD */
12348         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12349         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12350
12351         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12352
12353         if (xmit_type & XMIT_CSUM) {
12354                 hlen = (skb_network_header(skb) - skb->data) / 2;
12355
12356                 /* for now NS flag is not used in Linux */
12357                 pbd->global_data =
12358                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12359                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12360
12361                 pbd->ip_hlen = (skb_transport_header(skb) -
12362                                 skb_network_header(skb)) / 2;
12363
12364                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12365
12366                 pbd->total_hlen = cpu_to_le16(hlen);
12367                 hlen = hlen*2;
12368
12369                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12370
12371                 if (xmit_type & XMIT_CSUM_V4)
12372                         tx_start_bd->bd_flags.as_bitfield |=
12373                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12374                 else
12375                         tx_start_bd->bd_flags.as_bitfield |=
12376                                                 ETH_TX_BD_FLAGS_IPV6;
12377
12378                 if (xmit_type & XMIT_CSUM_TCP) {
12379                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12380
12381                 } else {
12382                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12383
12384                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12385
12386                         DP(NETIF_MSG_TX_QUEUED,
12387                            "hlen %d  fix %d  csum before fix %x\n",
12388                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12389
12390                         /* HW bug: fixup the CSUM */
12391                         pbd->tcp_pseudo_csum =
12392                                 bnx2x_csum_fix(skb_transport_header(skb),
12393                                                SKB_CS(skb), fix);
12394
12395                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12396                            pbd->tcp_pseudo_csum);
12397                 }
12398         }
12399
12400         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12401                                  skb_headlen(skb), DMA_TO_DEVICE);
12402
12403         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12404         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12405         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12406         tx_start_bd->nbd = cpu_to_le16(nbd);
12407         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12408         pkt_size = tx_start_bd->nbytes;
12409
12410         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12411            "  nbytes %d  flags %x  vlan %x\n",
12412            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12413            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12414            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12415
12416         if (xmit_type & XMIT_GSO) {
12417
12418                 DP(NETIF_MSG_TX_QUEUED,
12419                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12420                    skb->len, hlen, skb_headlen(skb),
12421                    skb_shinfo(skb)->gso_size);
12422
12423                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12424
12425                 if (unlikely(skb_headlen(skb) > hlen))
12426                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12427                                                  hlen, bd_prod, ++nbd);
12428
12429                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12430                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12431                 pbd->tcp_flags = pbd_tcp_flags(skb);
12432
12433                 if (xmit_type & XMIT_GSO_V4) {
12434                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12435                         pbd->tcp_pseudo_csum =
12436                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12437                                                           ip_hdr(skb)->daddr,
12438                                                           0, IPPROTO_TCP, 0));
12439
12440                 } else
12441                         pbd->tcp_pseudo_csum =
12442                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12443                                                         &ipv6_hdr(skb)->daddr,
12444                                                         0, IPPROTO_TCP, 0));
12445
12446                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12447         }
12448         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12449
12450         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12451                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12452
12453                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12454                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12455                 if (total_pkt_bd == NULL)
12456                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12457
12458                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12459                                        frag->page_offset,
12460                                        frag->size, DMA_TO_DEVICE);
12461
12462                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12463                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12464                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12465                 le16_add_cpu(&pkt_size, frag->size);
12466
12467                 DP(NETIF_MSG_TX_QUEUED,
12468                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12469                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12470                    le16_to_cpu(tx_data_bd->nbytes));
12471         }
12472
12473         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12474
12475         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12476
12477         /* now send a tx doorbell, counting the next BD
12478          * if the packet contains or ends with it
12479          */
12480         if (TX_BD_POFF(bd_prod) < nbd)
12481                 nbd++;
12482
12483         if (total_pkt_bd != NULL)
12484                 total_pkt_bd->total_pkt_bytes = pkt_size;
12485
12486         if (pbd)
12487                 DP(NETIF_MSG_TX_QUEUED,
12488                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12489                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12490                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12491                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12492                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12493
12494         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12495
12496         /*
12497          * Make sure that the BD data is updated before updating the producer
12498          * since FW might read the BD right after the producer is updated.
12499          * This is only applicable for weak-ordered memory model archs such
12500          * as IA-64. The following barrier is also mandatory since FW will
12501          * assumes packets must have BDs.
12502          */
12503         wmb();
12504
12505         fp->tx_db.data.prod += nbd;
12506         barrier();
12507         DOORBELL(bp, fp->index, fp->tx_db.raw);
12508
12509         mmiowb();
12510
12511         fp->tx_bd_prod += nbd;
12512
12513         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12514                 netif_tx_stop_queue(txq);
12515
12516                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12517                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12518                  * fp->bd_tx_cons */
12519                 smp_mb();
12520
12521                 fp->eth_q_stats.driver_xoff++;
12522                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12523                         netif_tx_wake_queue(txq);
12524         }
12525         fp->tx_pkt++;
12526
12527         return NETDEV_TX_OK;
12528 }
12529
12530 /* called with rtnl_lock */
12531 static int bnx2x_open(struct net_device *dev)
12532 {
12533         struct bnx2x *bp = netdev_priv(dev);
12534
12535         netif_carrier_off(dev);
12536
12537         bnx2x_set_power_state(bp, PCI_D0);
12538
12539         if (!bnx2x_reset_is_done(bp)) {
12540                 do {
12541                         /* Reset MCP mail box sequence if there is on going
12542                          * recovery
12543                          */
12544                         bp->fw_seq = 0;
12545
12546                         /* If it's the first function to load and reset done
12547                          * is still not cleared it may mean that. We don't
12548                          * check the attention state here because it may have
12549                          * already been cleared by a "common" reset but we
12550                          * shell proceed with "process kill" anyway.
12551                          */
12552                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12553                                 bnx2x_trylock_hw_lock(bp,
12554                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12555                                 (!bnx2x_leader_reset(bp))) {
12556                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12557                                 break;
12558                         }
12559
12560                         bnx2x_set_power_state(bp, PCI_D3hot);
12561
12562                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12563                         " completed yet. Try again later. If u still see this"
12564                         " message after a few retries then power cycle is"
12565                         " required.\n", bp->dev->name);
12566
12567                         return -EAGAIN;
12568                 } while (0);
12569         }
12570
12571         bp->recovery_state = BNX2X_RECOVERY_DONE;
12572
12573         return bnx2x_nic_load(bp, LOAD_OPEN);
12574 }
12575
12576 /* called with rtnl_lock */
12577 static int bnx2x_close(struct net_device *dev)
12578 {
12579         struct bnx2x *bp = netdev_priv(dev);
12580
12581         /* Unload the driver, release IRQs */
12582         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12583         bnx2x_set_power_state(bp, PCI_D3hot);
12584
12585         return 0;
12586 }
12587
12588 /* called with netif_tx_lock from dev_mcast.c */
12589 static void bnx2x_set_rx_mode(struct net_device *dev)
12590 {
12591         struct bnx2x *bp = netdev_priv(dev);
12592         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12593         int port = BP_PORT(bp);
12594
12595         if (bp->state != BNX2X_STATE_OPEN) {
12596                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12597                 return;
12598         }
12599
12600         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12601
12602         if (dev->flags & IFF_PROMISC)
12603                 rx_mode = BNX2X_RX_MODE_PROMISC;
12604
12605         else if ((dev->flags & IFF_ALLMULTI) ||
12606                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12607                   CHIP_IS_E1(bp)))
12608                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12609
12610         else { /* some multicasts */
12611                 if (CHIP_IS_E1(bp)) {
12612                         int i, old, offset;
12613                         struct netdev_hw_addr *ha;
12614                         struct mac_configuration_cmd *config =
12615                                                 bnx2x_sp(bp, mcast_config);
12616
12617                         i = 0;
12618                         netdev_for_each_mc_addr(ha, dev) {
12619                                 config->config_table[i].
12620                                         cam_entry.msb_mac_addr =
12621                                         swab16(*(u16 *)&ha->addr[0]);
12622                                 config->config_table[i].
12623                                         cam_entry.middle_mac_addr =
12624                                         swab16(*(u16 *)&ha->addr[2]);
12625                                 config->config_table[i].
12626                                         cam_entry.lsb_mac_addr =
12627                                         swab16(*(u16 *)&ha->addr[4]);
12628                                 config->config_table[i].cam_entry.flags =
12629                                                         cpu_to_le16(port);
12630                                 config->config_table[i].
12631                                         target_table_entry.flags = 0;
12632                                 config->config_table[i].target_table_entry.
12633                                         clients_bit_vector =
12634                                                 cpu_to_le32(1 << BP_L_ID(bp));
12635                                 config->config_table[i].
12636                                         target_table_entry.vlan_id = 0;
12637
12638                                 DP(NETIF_MSG_IFUP,
12639                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12640                                    config->config_table[i].
12641                                                 cam_entry.msb_mac_addr,
12642                                    config->config_table[i].
12643                                                 cam_entry.middle_mac_addr,
12644                                    config->config_table[i].
12645                                                 cam_entry.lsb_mac_addr);
12646                                 i++;
12647                         }
12648                         old = config->hdr.length;
12649                         if (old > i) {
12650                                 for (; i < old; i++) {
12651                                         if (CAM_IS_INVALID(config->
12652                                                            config_table[i])) {
12653                                                 /* already invalidated */
12654                                                 break;
12655                                         }
12656                                         /* invalidate */
12657                                         CAM_INVALIDATE(config->
12658                                                        config_table[i]);
12659                                 }
12660                         }
12661
12662                         if (CHIP_REV_IS_SLOW(bp))
12663                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12664                         else
12665                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12666
12667                         config->hdr.length = i;
12668                         config->hdr.offset = offset;
12669                         config->hdr.client_id = bp->fp->cl_id;
12670                         config->hdr.reserved1 = 0;
12671
12672                         bp->set_mac_pending++;
12673                         smp_wmb();
12674
12675                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12676                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12677                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12678                                       0);
12679                 } else { /* E1H */
12680                         /* Accept one or more multicasts */
12681                         struct netdev_hw_addr *ha;
12682                         u32 mc_filter[MC_HASH_SIZE];
12683                         u32 crc, bit, regidx;
12684                         int i;
12685
12686                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12687
12688                         netdev_for_each_mc_addr(ha, dev) {
12689                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12690                                    ha->addr);
12691
12692                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12693                                 bit = (crc >> 24) & 0xff;
12694                                 regidx = bit >> 5;
12695                                 bit &= 0x1f;
12696                                 mc_filter[regidx] |= (1 << bit);
12697                         }
12698
12699                         for (i = 0; i < MC_HASH_SIZE; i++)
12700                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12701                                        mc_filter[i]);
12702                 }
12703         }
12704
12705         bp->rx_mode = rx_mode;
12706         bnx2x_set_storm_rx_mode(bp);
12707 }
12708
12709 /* called with rtnl_lock */
12710 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12711 {
12712         struct sockaddr *addr = p;
12713         struct bnx2x *bp = netdev_priv(dev);
12714
12715         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12716                 return -EINVAL;
12717
12718         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12719         if (netif_running(dev)) {
12720                 if (CHIP_IS_E1(bp))
12721                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12722                 else
12723                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12724         }
12725
12726         return 0;
12727 }
12728
12729 /* called with rtnl_lock */
12730 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12731                            int devad, u16 addr)
12732 {
12733         struct bnx2x *bp = netdev_priv(netdev);
12734         u16 value;
12735         int rc;
12736         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12737
12738         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12739            prtad, devad, addr);
12740
12741         if (prtad != bp->mdio.prtad) {
12742                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12743                    prtad, bp->mdio.prtad);
12744                 return -EINVAL;
12745         }
12746
12747         /* The HW expects different devad if CL22 is used */
12748         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12749
12750         bnx2x_acquire_phy_lock(bp);
12751         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12752                              devad, addr, &value);
12753         bnx2x_release_phy_lock(bp);
12754         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12755
12756         if (!rc)
12757                 rc = value;
12758         return rc;
12759 }
12760
12761 /* called with rtnl_lock */
12762 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12763                             u16 addr, u16 value)
12764 {
12765         struct bnx2x *bp = netdev_priv(netdev);
12766         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12767         int rc;
12768
12769         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12770                            " value 0x%x\n", prtad, devad, addr, value);
12771
12772         if (prtad != bp->mdio.prtad) {
12773                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12774                    prtad, bp->mdio.prtad);
12775                 return -EINVAL;
12776         }
12777
12778         /* The HW expects different devad if CL22 is used */
12779         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12780
12781         bnx2x_acquire_phy_lock(bp);
12782         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12783                               devad, addr, value);
12784         bnx2x_release_phy_lock(bp);
12785         return rc;
12786 }
12787
12788 /* called with rtnl_lock */
12789 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12790 {
12791         struct bnx2x *bp = netdev_priv(dev);
12792         struct mii_ioctl_data *mdio = if_mii(ifr);
12793
12794         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12795            mdio->phy_id, mdio->reg_num, mdio->val_in);
12796
12797         if (!netif_running(dev))
12798                 return -EAGAIN;
12799
12800         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12801 }
12802
12803 /* called with rtnl_lock */
12804 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12805 {
12806         struct bnx2x *bp = netdev_priv(dev);
12807         int rc = 0;
12808
12809         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12810                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12811                 return -EAGAIN;
12812         }
12813
12814         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12815             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12816                 return -EINVAL;
12817
12818         /* This does not race with packet allocation
12819          * because the actual alloc size is
12820          * only updated as part of load
12821          */
12822         dev->mtu = new_mtu;
12823
12824         if (netif_running(dev)) {
12825                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12826                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12827         }
12828
12829         return rc;
12830 }
12831
12832 static void bnx2x_tx_timeout(struct net_device *dev)
12833 {
12834         struct bnx2x *bp = netdev_priv(dev);
12835
12836 #ifdef BNX2X_STOP_ON_ERROR
12837         if (!bp->panic)
12838                 bnx2x_panic();
12839 #endif
12840         /* This allows the netif to be shutdown gracefully before resetting */
12841         schedule_delayed_work(&bp->reset_task, 0);
12842 }
12843
12844 #ifdef BCM_VLAN
12845 /* called with rtnl_lock */
12846 static void bnx2x_vlan_rx_register(struct net_device *dev,
12847                                    struct vlan_group *vlgrp)
12848 {
12849         struct bnx2x *bp = netdev_priv(dev);
12850
12851         bp->vlgrp = vlgrp;
12852
12853         /* Set flags according to the required capabilities */
12854         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12855
12856         if (dev->features & NETIF_F_HW_VLAN_TX)
12857                 bp->flags |= HW_VLAN_TX_FLAG;
12858
12859         if (dev->features & NETIF_F_HW_VLAN_RX)
12860                 bp->flags |= HW_VLAN_RX_FLAG;
12861
12862         if (netif_running(dev))
12863                 bnx2x_set_client_config(bp);
12864 }
12865
12866 #endif
12867
12868 #ifdef CONFIG_NET_POLL_CONTROLLER
12869 static void poll_bnx2x(struct net_device *dev)
12870 {
12871         struct bnx2x *bp = netdev_priv(dev);
12872
12873         disable_irq(bp->pdev->irq);
12874         bnx2x_interrupt(bp->pdev->irq, dev);
12875         enable_irq(bp->pdev->irq);
12876 }
12877 #endif
12878
12879 static const struct net_device_ops bnx2x_netdev_ops = {
12880         .ndo_open               = bnx2x_open,
12881         .ndo_stop               = bnx2x_close,
12882         .ndo_start_xmit         = bnx2x_start_xmit,
12883         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12884         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12885         .ndo_validate_addr      = eth_validate_addr,
12886         .ndo_do_ioctl           = bnx2x_ioctl,
12887         .ndo_change_mtu         = bnx2x_change_mtu,
12888         .ndo_tx_timeout         = bnx2x_tx_timeout,
12889 #ifdef BCM_VLAN
12890         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12891 #endif
12892 #ifdef CONFIG_NET_POLL_CONTROLLER
12893         .ndo_poll_controller    = poll_bnx2x,
12894 #endif
12895 };
12896
12897 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12898                                     struct net_device *dev)
12899 {
12900         struct bnx2x *bp;
12901         int rc;
12902
12903         SET_NETDEV_DEV(dev, &pdev->dev);
12904         bp = netdev_priv(dev);
12905
12906         bp->dev = dev;
12907         bp->pdev = pdev;
12908         bp->flags = 0;
12909         bp->func = PCI_FUNC(pdev->devfn);
12910
12911         rc = pci_enable_device(pdev);
12912         if (rc) {
12913                 dev_err(&bp->pdev->dev,
12914                         "Cannot enable PCI device, aborting\n");
12915                 goto err_out;
12916         }
12917
12918         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12919                 dev_err(&bp->pdev->dev,
12920                         "Cannot find PCI device base address, aborting\n");
12921                 rc = -ENODEV;
12922                 goto err_out_disable;
12923         }
12924
12925         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12926                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12927                        " base address, aborting\n");
12928                 rc = -ENODEV;
12929                 goto err_out_disable;
12930         }
12931
12932         if (atomic_read(&pdev->enable_cnt) == 1) {
12933                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12934                 if (rc) {
12935                         dev_err(&bp->pdev->dev,
12936                                 "Cannot obtain PCI resources, aborting\n");
12937                         goto err_out_disable;
12938                 }
12939
12940                 pci_set_master(pdev);
12941                 pci_save_state(pdev);
12942         }
12943
12944         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12945         if (bp->pm_cap == 0) {
12946                 dev_err(&bp->pdev->dev,
12947                         "Cannot find power management capability, aborting\n");
12948                 rc = -EIO;
12949                 goto err_out_release;
12950         }
12951
12952         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12953         if (bp->pcie_cap == 0) {
12954                 dev_err(&bp->pdev->dev,
12955                         "Cannot find PCI Express capability, aborting\n");
12956                 rc = -EIO;
12957                 goto err_out_release;
12958         }
12959
12960         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12961                 bp->flags |= USING_DAC_FLAG;
12962                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12963                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12964                                " failed, aborting\n");
12965                         rc = -EIO;
12966                         goto err_out_release;
12967                 }
12968
12969         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12970                 dev_err(&bp->pdev->dev,
12971                         "System does not support DMA, aborting\n");
12972                 rc = -EIO;
12973                 goto err_out_release;
12974         }
12975
12976         dev->mem_start = pci_resource_start(pdev, 0);
12977         dev->base_addr = dev->mem_start;
12978         dev->mem_end = pci_resource_end(pdev, 0);
12979
12980         dev->irq = pdev->irq;
12981
12982         bp->regview = pci_ioremap_bar(pdev, 0);
12983         if (!bp->regview) {
12984                 dev_err(&bp->pdev->dev,
12985                         "Cannot map register space, aborting\n");
12986                 rc = -ENOMEM;
12987                 goto err_out_release;
12988         }
12989
12990         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12991                                         min_t(u64, BNX2X_DB_SIZE,
12992                                               pci_resource_len(pdev, 2)));
12993         if (!bp->doorbells) {
12994                 dev_err(&bp->pdev->dev,
12995                         "Cannot map doorbell space, aborting\n");
12996                 rc = -ENOMEM;
12997                 goto err_out_unmap;
12998         }
12999
13000         bnx2x_set_power_state(bp, PCI_D0);
13001
13002         /* clean indirect addresses */
13003         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13004                                PCICFG_VENDOR_ID_OFFSET);
13005         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13006         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13007         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13008         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13009
13010         /* Reset the load counter */
13011         bnx2x_clear_load_cnt(bp);
13012
13013         dev->watchdog_timeo = TX_TIMEOUT;
13014
13015         dev->netdev_ops = &bnx2x_netdev_ops;
13016         dev->ethtool_ops = &bnx2x_ethtool_ops;
13017         dev->features |= NETIF_F_SG;
13018         dev->features |= NETIF_F_HW_CSUM;
13019         if (bp->flags & USING_DAC_FLAG)
13020                 dev->features |= NETIF_F_HIGHDMA;
13021         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13022         dev->features |= NETIF_F_TSO6;
13023 #ifdef BCM_VLAN
13024         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13025         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13026
13027         dev->vlan_features |= NETIF_F_SG;
13028         dev->vlan_features |= NETIF_F_HW_CSUM;
13029         if (bp->flags & USING_DAC_FLAG)
13030                 dev->vlan_features |= NETIF_F_HIGHDMA;
13031         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13032         dev->vlan_features |= NETIF_F_TSO6;
13033 #endif
13034
13035         /* get_port_hwinfo() will set prtad and mmds properly */
13036         bp->mdio.prtad = MDIO_PRTAD_NONE;
13037         bp->mdio.mmds = 0;
13038         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13039         bp->mdio.dev = dev;
13040         bp->mdio.mdio_read = bnx2x_mdio_read;
13041         bp->mdio.mdio_write = bnx2x_mdio_write;
13042
13043         return 0;
13044
13045 err_out_unmap:
13046         if (bp->regview) {
13047                 iounmap(bp->regview);
13048                 bp->regview = NULL;
13049         }
13050         if (bp->doorbells) {
13051                 iounmap(bp->doorbells);
13052                 bp->doorbells = NULL;
13053         }
13054
13055 err_out_release:
13056         if (atomic_read(&pdev->enable_cnt) == 1)
13057                 pci_release_regions(pdev);
13058
13059 err_out_disable:
13060         pci_disable_device(pdev);
13061         pci_set_drvdata(pdev, NULL);
13062
13063 err_out:
13064         return rc;
13065 }
13066
13067 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13068                                                  int *width, int *speed)
13069 {
13070         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13071
13072         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13073
13074         /* return value of 1=2.5GHz 2=5GHz */
13075         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13076 }
13077
13078 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13079 {
13080         const struct firmware *firmware = bp->firmware;
13081         struct bnx2x_fw_file_hdr *fw_hdr;
13082         struct bnx2x_fw_file_section *sections;
13083         u32 offset, len, num_ops;
13084         u16 *ops_offsets;
13085         int i;
13086         const u8 *fw_ver;
13087
13088         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13089                 return -EINVAL;
13090
13091         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13092         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13093
13094         /* Make sure none of the offsets and sizes make us read beyond
13095          * the end of the firmware data */
13096         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13097                 offset = be32_to_cpu(sections[i].offset);
13098                 len = be32_to_cpu(sections[i].len);
13099                 if (offset + len > firmware->size) {
13100                         dev_err(&bp->pdev->dev,
13101                                 "Section %d length is out of bounds\n", i);
13102                         return -EINVAL;
13103                 }
13104         }
13105
13106         /* Likewise for the init_ops offsets */
13107         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13108         ops_offsets = (u16 *)(firmware->data + offset);
13109         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13110
13111         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13112                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13113                         dev_err(&bp->pdev->dev,
13114                                 "Section offset %d is out of bounds\n", i);
13115                         return -EINVAL;
13116                 }
13117         }
13118
13119         /* Check FW version */
13120         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13121         fw_ver = firmware->data + offset;
13122         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13123             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13124             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13125             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13126                 dev_err(&bp->pdev->dev,
13127                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13128                        fw_ver[0], fw_ver[1], fw_ver[2],
13129                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13130                        BCM_5710_FW_MINOR_VERSION,
13131                        BCM_5710_FW_REVISION_VERSION,
13132                        BCM_5710_FW_ENGINEERING_VERSION);
13133                 return -EINVAL;
13134         }
13135
13136         return 0;
13137 }
13138
13139 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13140 {
13141         const __be32 *source = (const __be32 *)_source;
13142         u32 *target = (u32 *)_target;
13143         u32 i;
13144
13145         for (i = 0; i < n/4; i++)
13146                 target[i] = be32_to_cpu(source[i]);
13147 }
13148
13149 /*
13150    Ops array is stored in the following format:
13151    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13152  */
13153 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13154 {
13155         const __be32 *source = (const __be32 *)_source;
13156         struct raw_op *target = (struct raw_op *)_target;
13157         u32 i, j, tmp;
13158
13159         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13160                 tmp = be32_to_cpu(source[j]);
13161                 target[i].op = (tmp >> 24) & 0xff;
13162                 target[i].offset = tmp & 0xffffff;
13163                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13164         }
13165 }
13166
13167 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13168 {
13169         const __be16 *source = (const __be16 *)_source;
13170         u16 *target = (u16 *)_target;
13171         u32 i;
13172
13173         for (i = 0; i < n/2; i++)
13174                 target[i] = be16_to_cpu(source[i]);
13175 }
13176
13177 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13178 do {                                                                    \
13179         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13180         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13181         if (!bp->arr) {                                                 \
13182                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13183                 goto lbl;                                               \
13184         }                                                               \
13185         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13186              (u8 *)bp->arr, len);                                       \
13187 } while (0)
13188
13189 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13190 {
13191         const char *fw_file_name;
13192         struct bnx2x_fw_file_hdr *fw_hdr;
13193         int rc;
13194
13195         if (CHIP_IS_E1(bp))
13196                 fw_file_name = FW_FILE_NAME_E1;
13197         else if (CHIP_IS_E1H(bp))
13198                 fw_file_name = FW_FILE_NAME_E1H;
13199         else {
13200                 dev_err(dev, "Unsupported chip revision\n");
13201                 return -EINVAL;
13202         }
13203
13204         dev_info(dev, "Loading %s\n", fw_file_name);
13205
13206         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13207         if (rc) {
13208                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13209                 goto request_firmware_exit;
13210         }
13211
13212         rc = bnx2x_check_firmware(bp);
13213         if (rc) {
13214                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13215                 goto request_firmware_exit;
13216         }
13217
13218         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13219
13220         /* Initialize the pointers to the init arrays */
13221         /* Blob */
13222         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13223
13224         /* Opcodes */
13225         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13226
13227         /* Offsets */
13228         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13229                             be16_to_cpu_n);
13230
13231         /* STORMs firmware */
13232         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13233                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13234         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13235                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13236         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13237                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13238         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13239                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13240         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13241                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13242         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13243                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13244         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13245                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13246         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13247                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13248
13249         return 0;
13250
13251 init_offsets_alloc_err:
13252         kfree(bp->init_ops);
13253 init_ops_alloc_err:
13254         kfree(bp->init_data);
13255 request_firmware_exit:
13256         release_firmware(bp->firmware);
13257
13258         return rc;
13259 }
13260
13261
13262 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13263                                     const struct pci_device_id *ent)
13264 {
13265         struct net_device *dev = NULL;
13266         struct bnx2x *bp;
13267         int pcie_width, pcie_speed;
13268         int rc;
13269
13270         /* dev zeroed in init_etherdev */
13271         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13272         if (!dev) {
13273                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13274                 return -ENOMEM;
13275         }
13276
13277         bp = netdev_priv(dev);
13278         bp->msg_enable = debug;
13279
13280         pci_set_drvdata(pdev, dev);
13281
13282         rc = bnx2x_init_dev(pdev, dev);
13283         if (rc < 0) {
13284                 free_netdev(dev);
13285                 return rc;
13286         }
13287
13288         rc = bnx2x_init_bp(bp);
13289         if (rc)
13290                 goto init_one_exit;
13291
13292         /* Set init arrays */
13293         rc = bnx2x_init_firmware(bp, &pdev->dev);
13294         if (rc) {
13295                 dev_err(&pdev->dev, "Error loading firmware\n");
13296                 goto init_one_exit;
13297         }
13298
13299         rc = register_netdev(dev);
13300         if (rc) {
13301                 dev_err(&pdev->dev, "Cannot register net device\n");
13302                 goto init_one_exit;
13303         }
13304
13305         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13306         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13307                " IRQ %d, ", board_info[ent->driver_data].name,
13308                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13309                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13310                dev->base_addr, bp->pdev->irq);
13311         pr_cont("node addr %pM\n", dev->dev_addr);
13312
13313         return 0;
13314
13315 init_one_exit:
13316         if (bp->regview)
13317                 iounmap(bp->regview);
13318
13319         if (bp->doorbells)
13320                 iounmap(bp->doorbells);
13321
13322         free_netdev(dev);
13323
13324         if (atomic_read(&pdev->enable_cnt) == 1)
13325                 pci_release_regions(pdev);
13326
13327         pci_disable_device(pdev);
13328         pci_set_drvdata(pdev, NULL);
13329
13330         return rc;
13331 }
13332
13333 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13334 {
13335         struct net_device *dev = pci_get_drvdata(pdev);
13336         struct bnx2x *bp;
13337
13338         if (!dev) {
13339                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13340                 return;
13341         }
13342         bp = netdev_priv(dev);
13343
13344         unregister_netdev(dev);
13345
13346         /* Make sure RESET task is not scheduled before continuing */
13347         cancel_delayed_work_sync(&bp->reset_task);
13348
13349         kfree(bp->init_ops_offsets);
13350         kfree(bp->init_ops);
13351         kfree(bp->init_data);
13352         release_firmware(bp->firmware);
13353
13354         if (bp->regview)
13355                 iounmap(bp->regview);
13356
13357         if (bp->doorbells)
13358                 iounmap(bp->doorbells);
13359
13360         free_netdev(dev);
13361
13362         if (atomic_read(&pdev->enable_cnt) == 1)
13363                 pci_release_regions(pdev);
13364
13365         pci_disable_device(pdev);
13366         pci_set_drvdata(pdev, NULL);
13367 }
13368
13369 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13370 {
13371         struct net_device *dev = pci_get_drvdata(pdev);
13372         struct bnx2x *bp;
13373
13374         if (!dev) {
13375                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13376                 return -ENODEV;
13377         }
13378         bp = netdev_priv(dev);
13379
13380         rtnl_lock();
13381
13382         pci_save_state(pdev);
13383
13384         if (!netif_running(dev)) {
13385                 rtnl_unlock();
13386                 return 0;
13387         }
13388
13389         netif_device_detach(dev);
13390
13391         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13392
13393         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13394
13395         rtnl_unlock();
13396
13397         return 0;
13398 }
13399
13400 static int bnx2x_resume(struct pci_dev *pdev)
13401 {
13402         struct net_device *dev = pci_get_drvdata(pdev);
13403         struct bnx2x *bp;
13404         int rc;
13405
13406         if (!dev) {
13407                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13408                 return -ENODEV;
13409         }
13410         bp = netdev_priv(dev);
13411
13412         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13413                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13414                 return -EAGAIN;
13415         }
13416
13417         rtnl_lock();
13418
13419         pci_restore_state(pdev);
13420
13421         if (!netif_running(dev)) {
13422                 rtnl_unlock();
13423                 return 0;
13424         }
13425
13426         bnx2x_set_power_state(bp, PCI_D0);
13427         netif_device_attach(dev);
13428
13429         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13430
13431         rtnl_unlock();
13432
13433         return rc;
13434 }
13435
13436 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13437 {
13438         int i;
13439
13440         bp->state = BNX2X_STATE_ERROR;
13441
13442         bp->rx_mode = BNX2X_RX_MODE_NONE;
13443
13444         bnx2x_netif_stop(bp, 0);
13445         netif_carrier_off(bp->dev);
13446
13447         del_timer_sync(&bp->timer);
13448         bp->stats_state = STATS_STATE_DISABLED;
13449         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13450
13451         /* Release IRQs */
13452         bnx2x_free_irq(bp, false);
13453
13454         if (CHIP_IS_E1(bp)) {
13455                 struct mac_configuration_cmd *config =
13456                                                 bnx2x_sp(bp, mcast_config);
13457
13458                 for (i = 0; i < config->hdr.length; i++)
13459                         CAM_INVALIDATE(config->config_table[i]);
13460         }
13461
13462         /* Free SKBs, SGEs, TPA pool and driver internals */
13463         bnx2x_free_skbs(bp);
13464         for_each_queue(bp, i)
13465                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13466         for_each_queue(bp, i)
13467                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13468         bnx2x_free_mem(bp);
13469
13470         bp->state = BNX2X_STATE_CLOSED;
13471
13472         return 0;
13473 }
13474
13475 static void bnx2x_eeh_recover(struct bnx2x *bp)
13476 {
13477         u32 val;
13478
13479         mutex_init(&bp->port.phy_mutex);
13480
13481         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13482         bp->link_params.shmem_base = bp->common.shmem_base;
13483         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13484
13485         if (!bp->common.shmem_base ||
13486             (bp->common.shmem_base < 0xA0000) ||
13487             (bp->common.shmem_base >= 0xC0000)) {
13488                 BNX2X_DEV_INFO("MCP not active\n");
13489                 bp->flags |= NO_MCP_FLAG;
13490                 return;
13491         }
13492
13493         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13494         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13495                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13496                 BNX2X_ERR("BAD MCP validity signature\n");
13497
13498         if (!BP_NOMCP(bp)) {
13499                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13500                               & DRV_MSG_SEQ_NUMBER_MASK);
13501                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13502         }
13503 }
13504
13505 /**
13506  * bnx2x_io_error_detected - called when PCI error is detected
13507  * @pdev: Pointer to PCI device
13508  * @state: The current pci connection state
13509  *
13510  * This function is called after a PCI bus error affecting
13511  * this device has been detected.
13512  */
13513 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13514                                                 pci_channel_state_t state)
13515 {
13516         struct net_device *dev = pci_get_drvdata(pdev);
13517         struct bnx2x *bp = netdev_priv(dev);
13518
13519         rtnl_lock();
13520
13521         netif_device_detach(dev);
13522
13523         if (state == pci_channel_io_perm_failure) {
13524                 rtnl_unlock();
13525                 return PCI_ERS_RESULT_DISCONNECT;
13526         }
13527
13528         if (netif_running(dev))
13529                 bnx2x_eeh_nic_unload(bp);
13530
13531         pci_disable_device(pdev);
13532
13533         rtnl_unlock();
13534
13535         /* Request a slot reset */
13536         return PCI_ERS_RESULT_NEED_RESET;
13537 }
13538
13539 /**
13540  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13541  * @pdev: Pointer to PCI device
13542  *
13543  * Restart the card from scratch, as if from a cold-boot.
13544  */
13545 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13546 {
13547         struct net_device *dev = pci_get_drvdata(pdev);
13548         struct bnx2x *bp = netdev_priv(dev);
13549
13550         rtnl_lock();
13551
13552         if (pci_enable_device(pdev)) {
13553                 dev_err(&pdev->dev,
13554                         "Cannot re-enable PCI device after reset\n");
13555                 rtnl_unlock();
13556                 return PCI_ERS_RESULT_DISCONNECT;
13557         }
13558
13559         pci_set_master(pdev);
13560         pci_restore_state(pdev);
13561
13562         if (netif_running(dev))
13563                 bnx2x_set_power_state(bp, PCI_D0);
13564
13565         rtnl_unlock();
13566
13567         return PCI_ERS_RESULT_RECOVERED;
13568 }
13569
13570 /**
13571  * bnx2x_io_resume - called when traffic can start flowing again
13572  * @pdev: Pointer to PCI device
13573  *
13574  * This callback is called when the error recovery driver tells us that
13575  * its OK to resume normal operation.
13576  */
13577 static void bnx2x_io_resume(struct pci_dev *pdev)
13578 {
13579         struct net_device *dev = pci_get_drvdata(pdev);
13580         struct bnx2x *bp = netdev_priv(dev);
13581
13582         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13583                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13584                 return;
13585         }
13586
13587         rtnl_lock();
13588
13589         bnx2x_eeh_recover(bp);
13590
13591         if (netif_running(dev))
13592                 bnx2x_nic_load(bp, LOAD_NORMAL);
13593
13594         netif_device_attach(dev);
13595
13596         rtnl_unlock();
13597 }
13598
13599 static struct pci_error_handlers bnx2x_err_handler = {
13600         .error_detected = bnx2x_io_error_detected,
13601         .slot_reset     = bnx2x_io_slot_reset,
13602         .resume         = bnx2x_io_resume,
13603 };
13604
13605 static struct pci_driver bnx2x_pci_driver = {
13606         .name        = DRV_MODULE_NAME,
13607         .id_table    = bnx2x_pci_tbl,
13608         .probe       = bnx2x_init_one,
13609         .remove      = __devexit_p(bnx2x_remove_one),
13610         .suspend     = bnx2x_suspend,
13611         .resume      = bnx2x_resume,
13612         .err_handler = &bnx2x_err_handler,
13613 };
13614
13615 static int __init bnx2x_init(void)
13616 {
13617         int ret;
13618
13619         pr_info("%s", version);
13620
13621         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13622         if (bnx2x_wq == NULL) {
13623                 pr_err("Cannot create workqueue\n");
13624                 return -ENOMEM;
13625         }
13626
13627         ret = pci_register_driver(&bnx2x_pci_driver);
13628         if (ret) {
13629                 pr_err("Cannot register driver\n");
13630                 destroy_workqueue(bnx2x_wq);
13631         }
13632         return ret;
13633 }
13634
13635 static void __exit bnx2x_cleanup(void)
13636 {
13637         pci_unregister_driver(&bnx2x_pci_driver);
13638
13639         destroy_workqueue(bnx2x_wq);
13640 }
13641
13642 module_init(bnx2x_init);
13643 module_exit(bnx2x_cleanup);
13644
13645 #ifdef BCM_CNIC
13646
13647 /* count denotes the number of new completions we have seen */
13648 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13649 {
13650         struct eth_spe *spe;
13651
13652 #ifdef BNX2X_STOP_ON_ERROR
13653         if (unlikely(bp->panic))
13654                 return;
13655 #endif
13656
13657         spin_lock_bh(&bp->spq_lock);
13658         bp->cnic_spq_pending -= count;
13659
13660         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13661              bp->cnic_spq_pending++) {
13662
13663                 if (!bp->cnic_kwq_pending)
13664                         break;
13665
13666                 spe = bnx2x_sp_get_next(bp);
13667                 *spe = *bp->cnic_kwq_cons;
13668
13669                 bp->cnic_kwq_pending--;
13670
13671                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13672                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13673
13674                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13675                         bp->cnic_kwq_cons = bp->cnic_kwq;
13676                 else
13677                         bp->cnic_kwq_cons++;
13678         }
13679         bnx2x_sp_prod_update(bp);
13680         spin_unlock_bh(&bp->spq_lock);
13681 }
13682
13683 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13684                                struct kwqe_16 *kwqes[], u32 count)
13685 {
13686         struct bnx2x *bp = netdev_priv(dev);
13687         int i;
13688
13689 #ifdef BNX2X_STOP_ON_ERROR
13690         if (unlikely(bp->panic))
13691                 return -EIO;
13692 #endif
13693
13694         spin_lock_bh(&bp->spq_lock);
13695
13696         for (i = 0; i < count; i++) {
13697                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13698
13699                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13700                         break;
13701
13702                 *bp->cnic_kwq_prod = *spe;
13703
13704                 bp->cnic_kwq_pending++;
13705
13706                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13707                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13708                    spe->data.mac_config_addr.hi,
13709                    spe->data.mac_config_addr.lo,
13710                    bp->cnic_kwq_pending);
13711
13712                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13713                         bp->cnic_kwq_prod = bp->cnic_kwq;
13714                 else
13715                         bp->cnic_kwq_prod++;
13716         }
13717
13718         spin_unlock_bh(&bp->spq_lock);
13719
13720         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13721                 bnx2x_cnic_sp_post(bp, 0);
13722
13723         return i;
13724 }
13725
13726 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13727 {
13728         struct cnic_ops *c_ops;
13729         int rc = 0;
13730
13731         mutex_lock(&bp->cnic_mutex);
13732         c_ops = bp->cnic_ops;
13733         if (c_ops)
13734                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13735         mutex_unlock(&bp->cnic_mutex);
13736
13737         return rc;
13738 }
13739
13740 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13741 {
13742         struct cnic_ops *c_ops;
13743         int rc = 0;
13744
13745         rcu_read_lock();
13746         c_ops = rcu_dereference(bp->cnic_ops);
13747         if (c_ops)
13748                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13749         rcu_read_unlock();
13750
13751         return rc;
13752 }
13753
13754 /*
13755  * for commands that have no data
13756  */
13757 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13758 {
13759         struct cnic_ctl_info ctl = {0};
13760
13761         ctl.cmd = cmd;
13762
13763         return bnx2x_cnic_ctl_send(bp, &ctl);
13764 }
13765
13766 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13767 {
13768         struct cnic_ctl_info ctl;
13769
13770         /* first we tell CNIC and only then we count this as a completion */
13771         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13772         ctl.data.comp.cid = cid;
13773
13774         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13775         bnx2x_cnic_sp_post(bp, 1);
13776 }
13777
13778 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13779 {
13780         struct bnx2x *bp = netdev_priv(dev);
13781         int rc = 0;
13782
13783         switch (ctl->cmd) {
13784         case DRV_CTL_CTXTBL_WR_CMD: {
13785                 u32 index = ctl->data.io.offset;
13786                 dma_addr_t addr = ctl->data.io.dma_addr;
13787
13788                 bnx2x_ilt_wr(bp, index, addr);
13789                 break;
13790         }
13791
13792         case DRV_CTL_COMPLETION_CMD: {
13793                 int count = ctl->data.comp.comp_count;
13794
13795                 bnx2x_cnic_sp_post(bp, count);
13796                 break;
13797         }
13798
13799         /* rtnl_lock is held.  */
13800         case DRV_CTL_START_L2_CMD: {
13801                 u32 cli = ctl->data.ring.client_id;
13802
13803                 bp->rx_mode_cl_mask |= (1 << cli);
13804                 bnx2x_set_storm_rx_mode(bp);
13805                 break;
13806         }
13807
13808         /* rtnl_lock is held.  */
13809         case DRV_CTL_STOP_L2_CMD: {
13810                 u32 cli = ctl->data.ring.client_id;
13811
13812                 bp->rx_mode_cl_mask &= ~(1 << cli);
13813                 bnx2x_set_storm_rx_mode(bp);
13814                 break;
13815         }
13816
13817         default:
13818                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13819                 rc = -EINVAL;
13820         }
13821
13822         return rc;
13823 }
13824
13825 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13826 {
13827         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13828
13829         if (bp->flags & USING_MSIX_FLAG) {
13830                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13831                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13832                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13833         } else {
13834                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13835                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13836         }
13837         cp->irq_arr[0].status_blk = bp->cnic_sb;
13838         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13839         cp->irq_arr[1].status_blk = bp->def_status_blk;
13840         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13841
13842         cp->num_irq = 2;
13843 }
13844
13845 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13846                                void *data)
13847 {
13848         struct bnx2x *bp = netdev_priv(dev);
13849         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13850
13851         if (ops == NULL)
13852                 return -EINVAL;
13853
13854         if (atomic_read(&bp->intr_sem) != 0)
13855                 return -EBUSY;
13856
13857         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13858         if (!bp->cnic_kwq)
13859                 return -ENOMEM;
13860
13861         bp->cnic_kwq_cons = bp->cnic_kwq;
13862         bp->cnic_kwq_prod = bp->cnic_kwq;
13863         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13864
13865         bp->cnic_spq_pending = 0;
13866         bp->cnic_kwq_pending = 0;
13867
13868         bp->cnic_data = data;
13869
13870         cp->num_irq = 0;
13871         cp->drv_state = CNIC_DRV_STATE_REGD;
13872
13873         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13874
13875         bnx2x_setup_cnic_irq_info(bp);
13876         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13877         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13878         rcu_assign_pointer(bp->cnic_ops, ops);
13879
13880         return 0;
13881 }
13882
13883 static int bnx2x_unregister_cnic(struct net_device *dev)
13884 {
13885         struct bnx2x *bp = netdev_priv(dev);
13886         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13887
13888         mutex_lock(&bp->cnic_mutex);
13889         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13890                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13891                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13892         }
13893         cp->drv_state = 0;
13894         rcu_assign_pointer(bp->cnic_ops, NULL);
13895         mutex_unlock(&bp->cnic_mutex);
13896         synchronize_rcu();
13897         kfree(bp->cnic_kwq);
13898         bp->cnic_kwq = NULL;
13899
13900         return 0;
13901 }
13902
13903 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13904 {
13905         struct bnx2x *bp = netdev_priv(dev);
13906         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13907
13908         cp->drv_owner = THIS_MODULE;
13909         cp->chip_id = CHIP_ID(bp);
13910         cp->pdev = bp->pdev;
13911         cp->io_base = bp->regview;
13912         cp->io_base2 = bp->doorbells;
13913         cp->max_kwqe_pending = 8;
13914         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13915         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13916         cp->ctx_tbl_len = CNIC_ILT_LINES;
13917         cp->starting_cid = BCM_CNIC_CID_START;
13918         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13919         cp->drv_ctl = bnx2x_drv_ctl;
13920         cp->drv_register_cnic = bnx2x_register_cnic;
13921         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13922
13923         return cp;
13924 }
13925 EXPORT_SYMBOL(bnx2x_cnic_probe);
13926
13927 #endif /* BCM_CNIC */
13928