]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/bnx2x/bnx2x_main.c
bnx2x: (NPAR mode) Fix FW initialization
[mv-sheeva.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58 #include "bnx2x_dcb.h"
59
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION                                 \
64         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
67         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81                    "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91                              "(0 Disable; 1 Enable (default))");
92
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96                                 " (default is as a number of CPUs)");
97
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105                                 "(1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static struct workqueue_struct *bnx2x_wq;
124
125 #ifdef BCM_CNIC
126 static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127 #endif
128
129 enum bnx2x_board_type {
130         BCM57710 = 0,
131         BCM57711 = 1,
132         BCM57711E = 2,
133         BCM57712 = 3,
134         BCM57712E = 4
135 };
136
137 /* indexed by board_type, above */
138 static struct {
139         char *name;
140 } board_info[] __devinitdata = {
141         { "Broadcom NetXtreme II BCM57710 XGb" },
142         { "Broadcom NetXtreme II BCM57711 XGb" },
143         { "Broadcom NetXtreme II BCM57711E XGb" },
144         { "Broadcom NetXtreme II BCM57712 XGb" },
145         { "Broadcom NetXtreme II BCM57712E XGb" }
146 };
147
148 #ifndef PCI_DEVICE_ID_NX2_57712
149 #define PCI_DEVICE_ID_NX2_57712         0x1662
150 #endif
151 #ifndef PCI_DEVICE_ID_NX2_57712E
152 #define PCI_DEVICE_ID_NX2_57712E        0x1663
153 #endif
154
155 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
159         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
161         { 0 }
162 };
163
164 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166 /****************************************************************************
167 * General service functions
168 ****************************************************************************/
169
170 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171                                        u32 addr, dma_addr_t mapping)
172 {
173         REG_WR(bp,  addr, U64_LO(mapping));
174         REG_WR(bp,  addr + 4, U64_HI(mapping));
175 }
176
177 static inline void __storm_memset_fill(struct bnx2x *bp,
178                                        u32 addr, size_t size, u32 val)
179 {
180         int i;
181         for (i = 0; i < size/4; i++)
182                 REG_WR(bp,  addr + (i * 4), val);
183 }
184
185 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186                                             u8 port, u16 stat_id)
187 {
188         size_t size = sizeof(struct ustorm_per_client_stats);
189
190         u32 addr = BAR_USTRORM_INTMEM +
191                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193         __storm_memset_fill(bp, addr, size, 0);
194 }
195
196 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197                                             u8 port, u16 stat_id)
198 {
199         size_t size = sizeof(struct tstorm_per_client_stats);
200
201         u32 addr = BAR_TSTRORM_INTMEM +
202                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204         __storm_memset_fill(bp, addr, size, 0);
205 }
206
207 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208                                             u8 port, u16 stat_id)
209 {
210         size_t size = sizeof(struct xstorm_per_client_stats);
211
212         u32 addr = BAR_XSTRORM_INTMEM +
213                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215         __storm_memset_fill(bp, addr, size, 0);
216 }
217
218
219 static inline void storm_memset_spq_addr(struct bnx2x *bp,
220                                          dma_addr_t mapping, u16 abs_fid)
221 {
222         u32 addr = XSEM_REG_FAST_MEMORY +
223                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225         __storm_memset_dma_mapping(bp, addr, mapping);
226 }
227
228 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229 {
230         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231 }
232
233 static inline void storm_memset_func_cfg(struct bnx2x *bp,
234                                 struct tstorm_eth_function_common_config *tcfg,
235                                 u16 abs_fid)
236 {
237         size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239         u32 addr = BAR_TSTRORM_INTMEM +
240                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243 }
244
245 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246                                 struct stats_indication_flags *flags,
247                                 u16 abs_fid)
248 {
249         size_t size = sizeof(struct stats_indication_flags);
250
251         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253         __storm_memset_struct(bp, addr, size, (u32 *)flags);
254 }
255
256 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257                                 struct stats_indication_flags *flags,
258                                 u16 abs_fid)
259 {
260         size_t size = sizeof(struct stats_indication_flags);
261
262         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264         __storm_memset_struct(bp, addr, size, (u32 *)flags);
265 }
266
267 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268                                 struct stats_indication_flags *flags,
269                                 u16 abs_fid)
270 {
271         size_t size = sizeof(struct stats_indication_flags);
272
273         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275         __storm_memset_struct(bp, addr, size, (u32 *)flags);
276 }
277
278 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279                                 struct stats_indication_flags *flags,
280                                 u16 abs_fid)
281 {
282         size_t size = sizeof(struct stats_indication_flags);
283
284         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286         __storm_memset_struct(bp, addr, size, (u32 *)flags);
287 }
288
289 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290                                            dma_addr_t mapping, u16 abs_fid)
291 {
292         u32 addr = BAR_XSTRORM_INTMEM +
293                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295         __storm_memset_dma_mapping(bp, addr, mapping);
296 }
297
298 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299                                            dma_addr_t mapping, u16 abs_fid)
300 {
301         u32 addr = BAR_TSTRORM_INTMEM +
302                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304         __storm_memset_dma_mapping(bp, addr, mapping);
305 }
306
307 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308                                            dma_addr_t mapping, u16 abs_fid)
309 {
310         u32 addr = BAR_USTRORM_INTMEM +
311                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313         __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317                                            dma_addr_t mapping, u16 abs_fid)
318 {
319         u32 addr = BAR_CSTRORM_INTMEM +
320                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322         __storm_memset_dma_mapping(bp, addr, mapping);
323 }
324
325 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326                                          u16 pf_id)
327 {
328         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331                 pf_id);
332         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333                 pf_id);
334         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335                 pf_id);
336 }
337
338 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339                                         u8 enable)
340 {
341         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344                 enable);
345         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346                 enable);
347         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348                 enable);
349 }
350
351 static inline void storm_memset_eq_data(struct bnx2x *bp,
352                                 struct event_ring_data *eq_data,
353                                 u16 pfid)
354 {
355         size_t size = sizeof(struct event_ring_data);
356
357         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360 }
361
362 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363                                         u16 pfid)
364 {
365         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366         REG_WR16(bp, addr, eq_prod);
367 }
368
369 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370                                              u16 fw_sb_id, u8 sb_index,
371                                              u8 ticks)
372 {
373
374         int index_offset = CHIP_IS_E2(bp) ?
375                 offsetof(struct hc_status_block_data_e2, index_data) :
376                 offsetof(struct hc_status_block_data_e1x, index_data);
377         u32 addr = BAR_CSTRORM_INTMEM +
378                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379                         index_offset +
380                         sizeof(struct hc_index_data)*sb_index +
381                         offsetof(struct hc_index_data, timeout);
382         REG_WR8(bp, addr, ticks);
383         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384                           port, fw_sb_id, sb_index, ticks);
385 }
386 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387                                              u16 fw_sb_id, u8 sb_index,
388                                              u8 disable)
389 {
390         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
391         int index_offset = CHIP_IS_E2(bp) ?
392                 offsetof(struct hc_status_block_data_e2, index_data) :
393                 offsetof(struct hc_status_block_data_e1x, index_data);
394         u32 addr = BAR_CSTRORM_INTMEM +
395                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396                         index_offset +
397                         sizeof(struct hc_index_data)*sb_index +
398                         offsetof(struct hc_index_data, flags);
399         u16 flags = REG_RD16(bp, addr);
400         /* clear and set */
401         flags &= ~HC_INDEX_DATA_HC_ENABLED;
402         flags |= enable_flag;
403         REG_WR16(bp, addr, flags);
404         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405                           port, fw_sb_id, sb_index, disable);
406 }
407
408 /* used only at init
409  * locking is done by mcp
410  */
411 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
412 {
413         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416                                PCICFG_VENDOR_ID_OFFSET);
417 }
418
419 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420 {
421         u32 val;
422
423         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426                                PCICFG_VENDOR_ID_OFFSET);
427
428         return val;
429 }
430
431 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
432 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
433 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
434 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
435 #define DMAE_DP_DST_NONE        "dst_addr [none]"
436
437 static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438                           int msglvl)
439 {
440         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442         switch (dmae->opcode & DMAE_COMMAND_DST) {
443         case DMAE_CMD_DST_PCI:
444                 if (src_type == DMAE_CMD_SRC_PCI)
445                         DP(msglvl, "DMAE: opcode 0x%08x\n"
446                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
448                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450                            dmae->comp_addr_hi, dmae->comp_addr_lo,
451                            dmae->comp_val);
452                 else
453                         DP(msglvl, "DMAE: opcode 0x%08x\n"
454                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
455                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
456                            dmae->opcode, dmae->src_addr_lo >> 2,
457                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458                            dmae->comp_addr_hi, dmae->comp_addr_lo,
459                            dmae->comp_val);
460                 break;
461         case DMAE_CMD_DST_GRC:
462                 if (src_type == DMAE_CMD_SRC_PCI)
463                         DP(msglvl, "DMAE: opcode 0x%08x\n"
464                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
466                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467                            dmae->len, dmae->dst_addr_lo >> 2,
468                            dmae->comp_addr_hi, dmae->comp_addr_lo,
469                            dmae->comp_val);
470                 else
471                         DP(msglvl, "DMAE: opcode 0x%08x\n"
472                            "src [%08x], len [%d*4], dst [%08x]\n"
473                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
474                            dmae->opcode, dmae->src_addr_lo >> 2,
475                            dmae->len, dmae->dst_addr_lo >> 2,
476                            dmae->comp_addr_hi, dmae->comp_addr_lo,
477                            dmae->comp_val);
478                 break;
479         default:
480                 if (src_type == DMAE_CMD_SRC_PCI)
481                         DP(msglvl, "DMAE: opcode 0x%08x\n"
482                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
483                                     "dst_addr [none]\n"
484                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
485                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487                            dmae->comp_val);
488                 else
489                         DP(msglvl, "DMAE: opcode 0x%08x\n"
490                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
491                                     "dst_addr [none]\n"
492                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
493                            dmae->opcode, dmae->src_addr_lo >> 2,
494                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495                            dmae->comp_val);
496                 break;
497         }
498
499 }
500
501 const u32 dmae_reg_go_c[] = {
502         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506 };
507
508 /* copy command into DMAE command memory and set DMAE command go */
509 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
510 {
511         u32 cmd_offset;
512         int i;
513
514         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
518                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
520         }
521         REG_WR(bp, dmae_reg_go_c[idx], 1);
522 }
523
524 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525 {
526         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527                            DMAE_CMD_C_ENABLE);
528 }
529
530 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531 {
532         return opcode & ~DMAE_CMD_SRC_RESET;
533 }
534
535 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536                              bool with_comp, u8 comp_type)
537 {
538         u32 opcode = 0;
539
540         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541                    (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550 #ifdef __BIG_ENDIAN
551         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552 #else
553         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554 #endif
555         if (with_comp)
556                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557         return opcode;
558 }
559
560 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561                                       struct dmae_command *dmae,
562                                       u8 src_type, u8 dst_type)
563 {
564         memset(dmae, 0, sizeof(struct dmae_command));
565
566         /* set the opcode */
567         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568                                          true, DMAE_COMP_PCI);
569
570         /* fill in the completion parameters */
571         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573         dmae->comp_val = DMAE_COMP_VAL;
574 }
575
576 /* issue a dmae command over the init-channel and wailt for completion */
577 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578                                       struct dmae_command *dmae)
579 {
580         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582         int rc = 0;
583
584         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588         /* lock the dmae channel */
589         mutex_lock(&bp->dmae_mutex);
590
591         /* reset completion */
592         *wb_comp = 0;
593
594         /* post the command on the channel used for initializations */
595         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597         /* wait for completion */
598         udelay(5);
599         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602                 if (!cnt) {
603                         BNX2X_ERR("DMAE timeout!\n");
604                         rc = DMAE_TIMEOUT;
605                         goto unlock;
606                 }
607                 cnt--;
608                 udelay(50);
609         }
610         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611                 BNX2X_ERR("DMAE PCI error!\n");
612                 rc = DMAE_PCI_ERROR;
613         }
614
615         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619 unlock:
620         mutex_unlock(&bp->dmae_mutex);
621         return rc;
622 }
623
624 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625                       u32 len32)
626 {
627         struct dmae_command dmae;
628
629         if (!bp->dmae_ready) {
630                 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
633                    "  using indirect\n", dst_addr, len32);
634                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635                 return;
636         }
637
638         /* set opcode and fixed command fields */
639         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641         /* fill in addresses and len */
642         dmae.src_addr_lo = U64_LO(dma_addr);
643         dmae.src_addr_hi = U64_HI(dma_addr);
644         dmae.dst_addr_lo = dst_addr >> 2;
645         dmae.dst_addr_hi = 0;
646         dmae.len = len32;
647
648         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650         /* issue the command and wait for completion */
651         bnx2x_issue_dmae_with_comp(bp, &dmae);
652 }
653
654 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
655 {
656         struct dmae_command dmae;
657
658         if (!bp->dmae_ready) {
659                 u32 *data = bnx2x_sp(bp, wb_data[0]);
660                 int i;
661
662                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
663                    "  using indirect\n", src_addr, len32);
664                 for (i = 0; i < len32; i++)
665                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666                 return;
667         }
668
669         /* set opcode and fixed command fields */
670         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
671
672         /* fill in addresses and len */
673         dmae.src_addr_lo = src_addr >> 2;
674         dmae.src_addr_hi = 0;
675         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677         dmae.len = len32;
678
679         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
680
681         /* issue the command and wait for completion */
682         bnx2x_issue_dmae_with_comp(bp, &dmae);
683 }
684
685 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686                                       u32 addr, u32 len)
687 {
688         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
689         int offset = 0;
690
691         while (len > dmae_wr_max) {
692                 bnx2x_write_dmae(bp, phys_addr + offset,
693                                  addr + offset, dmae_wr_max);
694                 offset += dmae_wr_max * 4;
695                 len -= dmae_wr_max;
696         }
697
698         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699 }
700
701 /* used only for slowpath so not inlined */
702 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703 {
704         u32 wb_write[2];
705
706         wb_write[0] = val_hi;
707         wb_write[1] = val_lo;
708         REG_WR_DMAE(bp, reg, wb_write, 2);
709 }
710
711 #ifdef USE_WB_RD
712 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713 {
714         u32 wb_data[2];
715
716         REG_RD_DMAE(bp, reg, wb_data, 2);
717
718         return HILO_U64(wb_data[0], wb_data[1]);
719 }
720 #endif
721
722 static int bnx2x_mc_assert(struct bnx2x *bp)
723 {
724         char last_idx;
725         int i, rc = 0;
726         u32 row0, row1, row2, row3;
727
728         /* XSTORM */
729         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
731         if (last_idx)
732                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734         /* print the asserts */
735         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738                               XSTORM_ASSERT_LIST_OFFSET(i));
739                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748                                   " 0x%08x 0x%08x 0x%08x\n",
749                                   i, row3, row2, row1, row0);
750                         rc++;
751                 } else {
752                         break;
753                 }
754         }
755
756         /* TSTORM */
757         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
759         if (last_idx)
760                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762         /* print the asserts */
763         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766                               TSTORM_ASSERT_LIST_OFFSET(i));
767                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776                                   " 0x%08x 0x%08x 0x%08x\n",
777                                   i, row3, row2, row1, row0);
778                         rc++;
779                 } else {
780                         break;
781                 }
782         }
783
784         /* CSTORM */
785         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
787         if (last_idx)
788                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790         /* print the asserts */
791         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794                               CSTORM_ASSERT_LIST_OFFSET(i));
795                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804                                   " 0x%08x 0x%08x 0x%08x\n",
805                                   i, row3, row2, row1, row0);
806                         rc++;
807                 } else {
808                         break;
809                 }
810         }
811
812         /* USTORM */
813         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814                            USTORM_ASSERT_LIST_INDEX_OFFSET);
815         if (last_idx)
816                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818         /* print the asserts */
819         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822                               USTORM_ASSERT_LIST_OFFSET(i));
823                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
825                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
827                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832                                   " 0x%08x 0x%08x 0x%08x\n",
833                                   i, row3, row2, row1, row0);
834                         rc++;
835                 } else {
836                         break;
837                 }
838         }
839
840         return rc;
841 }
842
843 static void bnx2x_fw_dump(struct bnx2x *bp)
844 {
845         u32 addr;
846         u32 mark, offset;
847         __be32 data[9];
848         int word;
849         u32 trace_shmem_base;
850         if (BP_NOMCP(bp)) {
851                 BNX2X_ERR("NO MCP - can not dump\n");
852                 return;
853         }
854
855         if (BP_PATH(bp) == 0)
856                 trace_shmem_base = bp->common.shmem_base;
857         else
858                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859         addr = trace_shmem_base - 0x0800 + 4;
860         mark = REG_RD(bp, addr);
861         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862                         + ((mark + 0x3) & ~0x3) - 0x08000000;
863         pr_err("begin fw dump (mark 0x%x)\n", mark);
864
865         pr_err("");
866         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
867                 for (word = 0; word < 8; word++)
868                         data[word] = htonl(REG_RD(bp, offset + 4*word));
869                 data[8] = 0x0;
870                 pr_cont("%s", (char *)data);
871         }
872         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
873                 for (word = 0; word < 8; word++)
874                         data[word] = htonl(REG_RD(bp, offset + 4*word));
875                 data[8] = 0x0;
876                 pr_cont("%s", (char *)data);
877         }
878         pr_err("end of fw dump\n");
879 }
880
881 void bnx2x_panic_dump(struct bnx2x *bp)
882 {
883         int i;
884         u16 j;
885         struct hc_sp_status_block_data sp_sb_data;
886         int func = BP_FUNC(bp);
887 #ifdef BNX2X_STOP_ON_ERROR
888         u16 start = 0, end = 0;
889 #endif
890
891         bp->stats_state = STATS_STATE_DISABLED;
892         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
894         BNX2X_ERR("begin crash dump -----------------\n");
895
896         /* Indices */
897         /* Common */
898         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
899                   "  spq_prod_idx(0x%x)\n",
900                   bp->def_idx, bp->def_att_idx,
901                   bp->attn_state, bp->spq_prod_idx);
902         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
903                   bp->def_status_blk->atten_status_block.attn_bits,
904                   bp->def_status_blk->atten_status_block.attn_bits_ack,
905                   bp->def_status_blk->atten_status_block.status_block_id,
906                   bp->def_status_blk->atten_status_block.attn_bits_index);
907         BNX2X_ERR("     def (");
908         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909                 pr_cont("0x%x%s",
910                        bp->def_status_blk->sp_sb.index_values[i],
911                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
912
913         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916                         i*sizeof(u32));
917
918         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
919                          "pf_id(0x%x)  vnic_id(0x%x)  "
920                          "vf_id(0x%x)  vf_valid (0x%x)\n",
921                sp_sb_data.igu_sb_id,
922                sp_sb_data.igu_seg_id,
923                sp_sb_data.p_func.pf_id,
924                sp_sb_data.p_func.vnic_id,
925                sp_sb_data.p_func.vf_id,
926                sp_sb_data.p_func.vf_valid);
927
928
929         for_each_eth_queue(bp, i) {
930                 struct bnx2x_fastpath *fp = &bp->fp[i];
931                 int loop;
932                 struct hc_status_block_data_e2 sb_data_e2;
933                 struct hc_status_block_data_e1x sb_data_e1x;
934                 struct hc_status_block_sm  *hc_sm_p =
935                         CHIP_IS_E2(bp) ?
936                         sb_data_e2.common.state_machine :
937                         sb_data_e1x.common.state_machine;
938                 struct hc_index_data *hc_index_p =
939                         CHIP_IS_E2(bp) ?
940                         sb_data_e2.index_data :
941                         sb_data_e1x.index_data;
942                 int data_size;
943                 u32 *sb_data_p;
944
945                 /* Rx */
946                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
947                           "  rx_comp_prod(0x%x)"
948                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
949                           i, fp->rx_bd_prod, fp->rx_bd_cons,
950                           fp->rx_comp_prod,
951                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
952                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
953                           "  fp_hc_idx(0x%x)\n",
954                           fp->rx_sge_prod, fp->last_max_sge,
955                           le16_to_cpu(fp->fp_hc_idx));
956
957                 /* Tx */
958                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
959                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
960                           "  *tx_cons_sb(0x%x)\n",
961                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
963
964                 loop = CHIP_IS_E2(bp) ?
965                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
966
967                 /* host sb data */
968
969 #ifdef BCM_CNIC
970                 if (IS_FCOE_FP(fp))
971                         continue;
972 #endif
973                 BNX2X_ERR("     run indexes (");
974                 for (j = 0; j < HC_SB_MAX_SM; j++)
975                         pr_cont("0x%x%s",
976                                fp->sb_running_index[j],
977                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979                 BNX2X_ERR("     indexes (");
980                 for (j = 0; j < loop; j++)
981                         pr_cont("0x%x%s",
982                                fp->sb_index_values[j],
983                                (j == loop - 1) ? ")" : " ");
984                 /* fw sb data */
985                 data_size = CHIP_IS_E2(bp) ?
986                         sizeof(struct hc_status_block_data_e2) :
987                         sizeof(struct hc_status_block_data_e1x);
988                 data_size /= sizeof(u32);
989                 sb_data_p = CHIP_IS_E2(bp) ?
990                         (u32 *)&sb_data_e2 :
991                         (u32 *)&sb_data_e1x;
992                 /* copy sb data in here */
993                 for (j = 0; j < data_size; j++)
994                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996                                 j * sizeof(u32));
997
998                 if (CHIP_IS_E2(bp)) {
999                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1000                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1001                                 sb_data_e2.common.p_func.pf_id,
1002                                 sb_data_e2.common.p_func.vf_id,
1003                                 sb_data_e2.common.p_func.vf_valid,
1004                                 sb_data_e2.common.p_func.vnic_id,
1005                                 sb_data_e2.common.same_igu_sb_1b);
1006                 } else {
1007                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1008                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1009                                 sb_data_e1x.common.p_func.pf_id,
1010                                 sb_data_e1x.common.p_func.vf_id,
1011                                 sb_data_e1x.common.p_func.vf_valid,
1012                                 sb_data_e1x.common.p_func.vnic_id,
1013                                 sb_data_e1x.common.same_igu_sb_1b);
1014                 }
1015
1016                 /* SB_SMs data */
1017                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018                         pr_cont("SM[%d] __flags (0x%x) "
1019                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1020                                "time_to_expire (0x%x) "
1021                                "timer_value(0x%x)\n", j,
1022                                hc_sm_p[j].__flags,
1023                                hc_sm_p[j].igu_sb_id,
1024                                hc_sm_p[j].igu_seg_id,
1025                                hc_sm_p[j].time_to_expire,
1026                                hc_sm_p[j].timer_value);
1027                 }
1028
1029                 /* Indecies data */
1030                 for (j = 0; j < loop; j++) {
1031                         pr_cont("INDEX[%d] flags (0x%x) "
1032                                          "timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039         /* Rings */
1040         /* Rx */
1041         for_each_rx_queue(bp, i) {
1042                 struct bnx2x_fastpath *fp = &bp->fp[i];
1043
1044                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1046                 for (j = start; j != end; j = RX_BD(j + 1)) {
1047                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
1050                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1051                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1052                 }
1053
1054                 start = RX_SGE(fp->rx_sge_prod);
1055                 end = RX_SGE(fp->last_max_sge);
1056                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1057                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
1060                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1061                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1062                 }
1063
1064                 start = RCQ_BD(fp->rx_comp_cons - 10);
1065                 end = RCQ_BD(fp->rx_comp_cons + 503);
1066                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1067                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
1069                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1071                 }
1072         }
1073
1074         /* Tx */
1075         for_each_tx_queue(bp, i) {
1076                 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080                 for (j = start; j != end; j = TX_BD(j + 1)) {
1081                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
1083                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084                                   i, j, sw_bd->skb, sw_bd->first_bd);
1085                 }
1086
1087                 start = TX_BD(fp->tx_bd_cons - 10);
1088                 end = TX_BD(fp->tx_bd_cons + 254);
1089                 for (j = start; j != end; j = TX_BD(j + 1)) {
1090                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
1092                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1094                 }
1095         }
1096 #endif
1097         bnx2x_fw_dump(bp);
1098         bnx2x_mc_assert(bp);
1099         BNX2X_ERR("end crash dump -----------------\n");
1100 }
1101
1102 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1103 {
1104         int port = BP_PORT(bp);
1105         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106         u32 val = REG_RD(bp, addr);
1107         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1108         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1109
1110         if (msix) {
1111                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1113                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115         } else if (msi) {
1116                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1120         } else {
1121                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1122                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1123                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1125
1126                 if (!CHIP_IS_E1(bp)) {
1127                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128                            val, port, addr);
1129
1130                         REG_WR(bp, addr, val);
1131
1132                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133                 }
1134         }
1135
1136         if (CHIP_IS_E1(bp))
1137                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
1139         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1140            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1141
1142         REG_WR(bp, addr, val);
1143         /*
1144          * Ensure that HC_CONFIG is written before leading/trailing edge config
1145          */
1146         mmiowb();
1147         barrier();
1148
1149         if (!CHIP_IS_E1(bp)) {
1150                 /* init leading/trailing edge */
1151                 if (IS_MF(bp)) {
1152                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1153                         if (bp->port.pmf)
1154                                 /* enable nig and gpio3 attention */
1155                                 val |= 0x1100;
1156                 } else
1157                         val = 0xffff;
1158
1159                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161         }
1162
1163         /* Make sure that interrupts are indeed enabled from here on */
1164         mmiowb();
1165 }
1166
1167 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168 {
1169         u32 val;
1170         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175         if (msix) {
1176                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177                          IGU_PF_CONF_SINGLE_ISR_EN);
1178                 val |= (IGU_PF_CONF_FUNC_EN |
1179                         IGU_PF_CONF_MSI_MSIX_EN |
1180                         IGU_PF_CONF_ATTN_BIT_EN);
1181         } else if (msi) {
1182                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183                 val |= (IGU_PF_CONF_FUNC_EN |
1184                         IGU_PF_CONF_MSI_MSIX_EN |
1185                         IGU_PF_CONF_ATTN_BIT_EN |
1186                         IGU_PF_CONF_SINGLE_ISR_EN);
1187         } else {
1188                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189                 val |= (IGU_PF_CONF_FUNC_EN |
1190                         IGU_PF_CONF_INT_LINE_EN |
1191                         IGU_PF_CONF_ATTN_BIT_EN |
1192                         IGU_PF_CONF_SINGLE_ISR_EN);
1193         }
1194
1195         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1196            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200         barrier();
1201
1202         /* init leading/trailing edge */
1203         if (IS_MF(bp)) {
1204                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205                 if (bp->port.pmf)
1206                         /* enable nig and gpio3 attention */
1207                         val |= 0x1100;
1208         } else
1209                 val = 0xffff;
1210
1211         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214         /* Make sure that interrupts are indeed enabled from here on */
1215         mmiowb();
1216 }
1217
1218 void bnx2x_int_enable(struct bnx2x *bp)
1219 {
1220         if (bp->common.int_block == INT_BLOCK_HC)
1221                 bnx2x_hc_int_enable(bp);
1222         else
1223                 bnx2x_igu_int_enable(bp);
1224 }
1225
1226 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1227 {
1228         int port = BP_PORT(bp);
1229         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230         u32 val = REG_RD(bp, addr);
1231
1232         /*
1233          * in E1 we must use only PCI configuration space to disable
1234          * MSI/MSIX capablility
1235          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236          */
1237         if (CHIP_IS_E1(bp)) {
1238                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239                  *  Use mask register to prevent from HC sending interrupts
1240                  *  after we exit the function
1241                  */
1242                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247         } else
1248                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1252
1253         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254            val, port, addr);
1255
1256         /* flush all outstanding writes */
1257         mmiowb();
1258
1259         REG_WR(bp, addr, val);
1260         if (REG_RD(bp, addr) != val)
1261                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262 }
1263
1264 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265 {
1266         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269                  IGU_PF_CONF_INT_LINE_EN |
1270                  IGU_PF_CONF_ATTN_BIT_EN);
1271
1272         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274         /* flush all outstanding writes */
1275         mmiowb();
1276
1277         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280 }
1281
1282 static void bnx2x_int_disable(struct bnx2x *bp)
1283 {
1284         if (bp->common.int_block == INT_BLOCK_HC)
1285                 bnx2x_hc_int_disable(bp);
1286         else
1287                 bnx2x_igu_int_disable(bp);
1288 }
1289
1290 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1291 {
1292         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1293         int i, offset;
1294
1295         /* disable interrupt handling */
1296         atomic_inc(&bp->intr_sem);
1297         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
1299         if (disable_hw)
1300                 /* prevent the HW from sending interrupts */
1301                 bnx2x_int_disable(bp);
1302
1303         /* make sure all ISRs are done */
1304         if (msix) {
1305                 synchronize_irq(bp->msix_table[0].vector);
1306                 offset = 1;
1307 #ifdef BCM_CNIC
1308                 offset++;
1309 #endif
1310                 for_each_eth_queue(bp, i)
1311                         synchronize_irq(bp->msix_table[i + offset].vector);
1312         } else
1313                 synchronize_irq(bp->pdev->irq);
1314
1315         /* make sure sp_task is not running */
1316         cancel_delayed_work(&bp->sp_task);
1317         flush_workqueue(bnx2x_wq);
1318 }
1319
1320 /* fast path */
1321
1322 /*
1323  * General service functions
1324  */
1325
1326 /* Return true if succeeded to acquire the lock */
1327 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328 {
1329         u32 lock_status;
1330         u32 resource_bit = (1 << resource);
1331         int func = BP_FUNC(bp);
1332         u32 hw_lock_control_reg;
1333
1334         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336         /* Validating that the resource is within range */
1337         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338                 DP(NETIF_MSG_HW,
1339                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1341                 return false;
1342         }
1343
1344         if (func <= 5)
1345                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346         else
1347                 hw_lock_control_reg =
1348                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350         /* Try to acquire the lock */
1351         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352         lock_status = REG_RD(bp, hw_lock_control_reg);
1353         if (lock_status & resource_bit)
1354                 return true;
1355
1356         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357         return false;
1358 }
1359
1360 #ifdef BCM_CNIC
1361 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362 #endif
1363
1364 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1365                            union eth_rx_cqe *rr_cqe)
1366 {
1367         struct bnx2x *bp = fp->bp;
1368         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
1371         DP(BNX2X_MSG_SP,
1372            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1373            fp->index, cid, command, bp->state,
1374            rr_cqe->ramrod_cqe.ramrod_type);
1375
1376         switch (command | fp->state) {
1377         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379                 fp->state = BNX2X_FP_STATE_OPEN;
1380                 break;
1381
1382         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1384                 fp->state = BNX2X_FP_STATE_HALTED;
1385                 break;
1386
1387         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389                 fp->state = BNX2X_FP_STATE_TERMINATED;
1390                 break;
1391
1392         default:
1393                 BNX2X_ERR("unexpected MC reply (%d)  "
1394                           "fp[%d] state is %x\n",
1395                           command, fp->index, fp->state);
1396                 break;
1397         }
1398
1399         smp_mb__before_atomic_inc();
1400         atomic_inc(&bp->spq_left);
1401         /* push the change in fp->state and towards the memory */
1402         smp_wmb();
1403
1404         return;
1405 }
1406
1407 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1408 {
1409         struct bnx2x *bp = netdev_priv(dev_instance);
1410         u16 status = bnx2x_ack_int(bp);
1411         u16 mask;
1412         int i;
1413
1414         /* Return here if interrupt is shared and it's not for us */
1415         if (unlikely(status == 0)) {
1416                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417                 return IRQ_NONE;
1418         }
1419         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1420
1421         /* Return here if interrupt is disabled */
1422         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424                 return IRQ_HANDLED;
1425         }
1426
1427 #ifdef BNX2X_STOP_ON_ERROR
1428         if (unlikely(bp->panic))
1429                 return IRQ_HANDLED;
1430 #endif
1431
1432         for_each_eth_queue(bp, i) {
1433                 struct bnx2x_fastpath *fp = &bp->fp[i];
1434
1435                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1436                 if (status & mask) {
1437                         /* Handle Rx and Tx according to SB id */
1438                         prefetch(fp->rx_cons_sb);
1439                         prefetch(fp->tx_cons_sb);
1440                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1441                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1442                         status &= ~mask;
1443                 }
1444         }
1445
1446 #ifdef BCM_CNIC
1447         mask = 0x2;
1448         if (status & (mask | 0x1)) {
1449                 struct cnic_ops *c_ops = NULL;
1450
1451                 rcu_read_lock();
1452                 c_ops = rcu_dereference(bp->cnic_ops);
1453                 if (c_ops)
1454                         c_ops->cnic_handler(bp->cnic_data, NULL);
1455                 rcu_read_unlock();
1456
1457                 status &= ~mask;
1458         }
1459 #endif
1460
1461         if (unlikely(status & 0x1)) {
1462                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1463
1464                 status &= ~0x1;
1465                 if (!status)
1466                         return IRQ_HANDLED;
1467         }
1468
1469         if (unlikely(status))
1470                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1471                    status);
1472
1473         return IRQ_HANDLED;
1474 }
1475
1476 /* end of fast path */
1477
1478
1479 /* Link */
1480
1481 /*
1482  * General service functions
1483  */
1484
1485 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1486 {
1487         u32 lock_status;
1488         u32 resource_bit = (1 << resource);
1489         int func = BP_FUNC(bp);
1490         u32 hw_lock_control_reg;
1491         int cnt;
1492
1493         /* Validating that the resource is within range */
1494         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495                 DP(NETIF_MSG_HW,
1496                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498                 return -EINVAL;
1499         }
1500
1501         if (func <= 5) {
1502                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503         } else {
1504                 hw_lock_control_reg =
1505                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506         }
1507
1508         /* Validating that the resource is not already taken */
1509         lock_status = REG_RD(bp, hw_lock_control_reg);
1510         if (lock_status & resource_bit) {
1511                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1512                    lock_status, resource_bit);
1513                 return -EEXIST;
1514         }
1515
1516         /* Try for 5 second every 5ms */
1517         for (cnt = 0; cnt < 1000; cnt++) {
1518                 /* Try to acquire the lock */
1519                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520                 lock_status = REG_RD(bp, hw_lock_control_reg);
1521                 if (lock_status & resource_bit)
1522                         return 0;
1523
1524                 msleep(5);
1525         }
1526         DP(NETIF_MSG_HW, "Timeout\n");
1527         return -EAGAIN;
1528 }
1529
1530 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1531 {
1532         u32 lock_status;
1533         u32 resource_bit = (1 << resource);
1534         int func = BP_FUNC(bp);
1535         u32 hw_lock_control_reg;
1536
1537         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
1539         /* Validating that the resource is within range */
1540         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541                 DP(NETIF_MSG_HW,
1542                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544                 return -EINVAL;
1545         }
1546
1547         if (func <= 5) {
1548                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549         } else {
1550                 hw_lock_control_reg =
1551                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552         }
1553
1554         /* Validating that the resource is currently taken */
1555         lock_status = REG_RD(bp, hw_lock_control_reg);
1556         if (!(lock_status & resource_bit)) {
1557                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1558                    lock_status, resource_bit);
1559                 return -EFAULT;
1560         }
1561
1562         REG_WR(bp, hw_lock_control_reg, resource_bit);
1563         return 0;
1564 }
1565
1566
1567 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568 {
1569         /* The GPIO should be swapped if swap register is set and active */
1570         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572         int gpio_shift = gpio_num +
1573                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574         u32 gpio_mask = (1 << gpio_shift);
1575         u32 gpio_reg;
1576         int value;
1577
1578         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580                 return -EINVAL;
1581         }
1582
1583         /* read GPIO value */
1584         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586         /* get the requested pin value */
1587         if ((gpio_reg & gpio_mask) == gpio_mask)
1588                 value = 1;
1589         else
1590                 value = 0;
1591
1592         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1593
1594         return value;
1595 }
1596
1597 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1598 {
1599         /* The GPIO should be swapped if swap register is set and active */
1600         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1601                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1602         int gpio_shift = gpio_num +
1603                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604         u32 gpio_mask = (1 << gpio_shift);
1605         u32 gpio_reg;
1606
1607         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609                 return -EINVAL;
1610         }
1611
1612         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613         /* read GPIO and mask except the float bits */
1614         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616         switch (mode) {
1617         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619                    gpio_num, gpio_shift);
1620                 /* clear FLOAT and set CLR */
1621                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623                 break;
1624
1625         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627                    gpio_num, gpio_shift);
1628                 /* clear FLOAT and set SET */
1629                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631                 break;
1632
1633         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1634                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635                    gpio_num, gpio_shift);
1636                 /* set FLOAT */
1637                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638                 break;
1639
1640         default:
1641                 break;
1642         }
1643
1644         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1645         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1646
1647         return 0;
1648 }
1649
1650 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651 {
1652         /* The GPIO should be swapped if swap register is set and active */
1653         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655         int gpio_shift = gpio_num +
1656                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657         u32 gpio_mask = (1 << gpio_shift);
1658         u32 gpio_reg;
1659
1660         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662                 return -EINVAL;
1663         }
1664
1665         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666         /* read GPIO int */
1667         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669         switch (mode) {
1670         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672                                    "output low\n", gpio_num, gpio_shift);
1673                 /* clear SET and set CLR */
1674                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676                 break;
1677
1678         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680                                    "output high\n", gpio_num, gpio_shift);
1681                 /* clear CLR and set SET */
1682                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684                 break;
1685
1686         default:
1687                 break;
1688         }
1689
1690         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693         return 0;
1694 }
1695
1696 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697 {
1698         u32 spio_mask = (1 << spio_num);
1699         u32 spio_reg;
1700
1701         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702             (spio_num > MISC_REGISTERS_SPIO_7)) {
1703                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704                 return -EINVAL;
1705         }
1706
1707         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708         /* read SPIO and mask except the float bits */
1709         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711         switch (mode) {
1712         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1713                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714                 /* clear FLOAT and set CLR */
1715                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717                 break;
1718
1719         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1720                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721                 /* clear FLOAT and set SET */
1722                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724                 break;
1725
1726         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728                 /* set FLOAT */
1729                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730                 break;
1731
1732         default:
1733                 break;
1734         }
1735
1736         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1737         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1738
1739         return 0;
1740 }
1741
1742 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743 {
1744         u32 sel_phy_idx = 0;
1745         if (bp->link_vars.link_up) {
1746                 sel_phy_idx = EXT_PHY1;
1747                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750                         sel_phy_idx = EXT_PHY2;
1751         } else {
1752
1753                 switch (bnx2x_phy_selection(&bp->link_params)) {
1754                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757                        sel_phy_idx = EXT_PHY1;
1758                        break;
1759                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761                        sel_phy_idx = EXT_PHY2;
1762                        break;
1763                 }
1764         }
1765         /*
1766         * The selected actived PHY is always after swapping (in case PHY
1767         * swapping is enabled). So when swapping is enabled, we need to reverse
1768         * the configuration
1769         */
1770
1771         if (bp->link_params.multi_phy_config &
1772             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773                 if (sel_phy_idx == EXT_PHY1)
1774                         sel_phy_idx = EXT_PHY2;
1775                 else if (sel_phy_idx == EXT_PHY2)
1776                         sel_phy_idx = EXT_PHY1;
1777         }
1778         return LINK_CONFIG_IDX(sel_phy_idx);
1779 }
1780
1781 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1782 {
1783         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1784         switch (bp->link_vars.ieee_fc &
1785                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1786         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1787                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1788                                                    ADVERTISED_Pause);
1789                 break;
1790
1791         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1792                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1793                                                   ADVERTISED_Pause);
1794                 break;
1795
1796         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1797                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1798                 break;
1799
1800         default:
1801                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1802                                                    ADVERTISED_Pause);
1803                 break;
1804         }
1805 }
1806
1807 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1808 {
1809         if (!BP_NOMCP(bp)) {
1810                 u8 rc;
1811                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1813                 /* Initialize link parameters structure variables */
1814                 /* It is recommended to turn off RX FC for jumbo frames
1815                    for better performance */
1816                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1817                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1818                 else
1819                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1820
1821                 bnx2x_acquire_phy_lock(bp);
1822
1823                 if (load_mode == LOAD_DIAG) {
1824                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1825                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826                 }
1827
1828                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1829
1830                 bnx2x_release_phy_lock(bp);
1831
1832                 bnx2x_calc_fc_adv(bp);
1833
1834                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1836                         bnx2x_link_report(bp);
1837                 }
1838                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1839                 return rc;
1840         }
1841         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1842         return -EINVAL;
1843 }
1844
1845 void bnx2x_link_set(struct bnx2x *bp)
1846 {
1847         if (!BP_NOMCP(bp)) {
1848                 bnx2x_acquire_phy_lock(bp);
1849                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1850                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1851                 bnx2x_release_phy_lock(bp);
1852
1853                 bnx2x_calc_fc_adv(bp);
1854         } else
1855                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1856 }
1857
1858 static void bnx2x__link_reset(struct bnx2x *bp)
1859 {
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1863                 bnx2x_release_phy_lock(bp);
1864         } else
1865                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1866 }
1867
1868 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1869 {
1870         u8 rc = 0;
1871
1872         if (!BP_NOMCP(bp)) {
1873                 bnx2x_acquire_phy_lock(bp);
1874                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875                                      is_serdes);
1876                 bnx2x_release_phy_lock(bp);
1877         } else
1878                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1879
1880         return rc;
1881 }
1882
1883 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1884 {
1885         u32 r_param = bp->link_vars.line_speed / 8;
1886         u32 fair_periodic_timeout_usec;
1887         u32 t_fair;
1888
1889         memset(&(bp->cmng.rs_vars), 0,
1890                sizeof(struct rate_shaping_vars_per_port));
1891         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1892
1893         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1895
1896         /* this is the threshold below which no timer arming will occur
1897            1.25 coefficient is for the threshold to be a little bigger
1898            than the real time, to compensate for timer in-accuracy */
1899         bp->cmng.rs_vars.rs_threshold =
1900                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
1902         /* resolution of fairness timer */
1903         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1906
1907         /* this is the threshold below which we won't arm the timer anymore */
1908         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1909
1910         /* we multiply by 1e3/8 to get bytes/msec.
1911            We don't want the credits to pass a credit
1912            of the t_fair*FAIR_MEM (algorithm resolution) */
1913         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914         /* since each tick is 4 usec */
1915         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1916 }
1917
1918 /* Calculates the sum of vn_min_rates.
1919    It's needed for further normalizing of the min_rates.
1920    Returns:
1921      sum of vn_min_rates.
1922        or
1923      0 - if all the min_rates are 0.
1924      In the later case fainess algorithm should be deactivated.
1925      If not all min_rates are zero then those that are zeroes will be set to 1.
1926  */
1927 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928 {
1929         int all_zero = 1;
1930         int vn;
1931
1932         bp->vn_weight_sum = 0;
1933         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1934                 u32 vn_cfg = bp->mf_config[vn];
1935                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938                 /* Skip hidden vns */
1939                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940                         continue;
1941
1942                 /* If min rate is zero - set it to 1 */
1943                 if (!vn_min_rate)
1944                         vn_min_rate = DEF_MIN_RATE;
1945                 else
1946                         all_zero = 0;
1947
1948                 bp->vn_weight_sum += vn_min_rate;
1949         }
1950
1951         /* ... only if all min rates are zeros - disable fairness */
1952         if (all_zero) {
1953                 bp->cmng.flags.cmng_enables &=
1954                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956                    "  fairness will be disabled\n");
1957         } else
1958                 bp->cmng.flags.cmng_enables |=
1959                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1960 }
1961
1962 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1963 {
1964         struct rate_shaping_vars_per_vn m_rs_vn;
1965         struct fairness_vars_per_vn m_fair_vn;
1966         u32 vn_cfg = bp->mf_config[vn];
1967         int func = 2*vn + BP_PORT(bp);
1968         u16 vn_min_rate, vn_max_rate;
1969         int i;
1970
1971         /* If function is hidden - set min and max to zeroes */
1972         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973                 vn_min_rate = 0;
1974                 vn_max_rate = 0;
1975
1976         } else {
1977                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1979                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1981                 /* If fairness is enabled (not all min rates are zeroes) and
1982                    if current min rate is zero - set it to 1.
1983                    This is a requirement of the algorithm. */
1984                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1985                         vn_min_rate = DEF_MIN_RATE;
1986
1987                 if (IS_MF_SI(bp))
1988                         /* maxCfg in percents of linkspeed */
1989                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990                 else
1991                         /* maxCfg is absolute in 100Mb units */
1992                         vn_max_rate = maxCfg * 100;
1993         }
1994
1995         DP(NETIF_MSG_IFUP,
1996            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1997            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1998
1999         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002         /* global vn counter - maximal Mbps for this vn */
2003         m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005         /* quota - number of bytes transmitted in this period */
2006         m_rs_vn.vn_counter.quota =
2007                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
2009         if (bp->vn_weight_sum) {
2010                 /* credit for each period of the fairness algorithm:
2011                    number of bytes in T_FAIR (the vn share the port rate).
2012                    vn_weight_sum should not be larger than 10000, thus
2013                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014                    than zero */
2015                 m_fair_vn.vn_credit_delta =
2016                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017                                                    (8 * bp->vn_weight_sum))),
2018                               (bp->cmng.fair_vars.fair_threshold * 2));
2019                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2020                    m_fair_vn.vn_credit_delta);
2021         }
2022
2023         /* Store it to internal memory */
2024         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2025                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2026                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2027                        ((u32 *)(&m_rs_vn))[i]);
2028
2029         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2030                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2031                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2032                        ((u32 *)(&m_fair_vn))[i]);
2033 }
2034
2035 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2036 {
2037         if (CHIP_REV_IS_SLOW(bp))
2038                 return CMNG_FNS_NONE;
2039         if (IS_MF(bp))
2040                 return CMNG_FNS_MINMAX;
2041
2042         return CMNG_FNS_NONE;
2043 }
2044
2045 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2046 {
2047         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2048
2049         if (BP_NOMCP(bp))
2050                 return; /* what should be the default bvalue in this case */
2051
2052         /* For 2 port configuration the absolute function number formula
2053          * is:
2054          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2055          *
2056          *      and there are 4 functions per port
2057          *
2058          * For 4 port configuration it is
2059          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2060          *
2061          *      and there are 2 functions per port
2062          */
2063         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2064                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2065
2066                 if (func >= E1H_FUNC_MAX)
2067                         break;
2068
2069                 bp->mf_config[vn] =
2070                         MF_CFG_RD(bp, func_mf_config[func].config);
2071         }
2072 }
2073
2074 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2075 {
2076
2077         if (cmng_type == CMNG_FNS_MINMAX) {
2078                 int vn;
2079
2080                 /* clear cmng_enables */
2081                 bp->cmng.flags.cmng_enables = 0;
2082
2083                 /* read mf conf from shmem */
2084                 if (read_cfg)
2085                         bnx2x_read_mf_cfg(bp);
2086
2087                 /* Init rate shaping and fairness contexts */
2088                 bnx2x_init_port_minmax(bp);
2089
2090                 /* vn_weight_sum and enable fairness if not 0 */
2091                 bnx2x_calc_vn_weight_sum(bp);
2092
2093                 /* calculate and set min-max rate for each vn */
2094                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2095                         bnx2x_init_vn_minmax(bp, vn);
2096
2097                 /* always enable rate shaping and fairness */
2098                 bp->cmng.flags.cmng_enables |=
2099                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2100                 if (!bp->vn_weight_sum)
2101                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102                                    "  fairness will be disabled\n");
2103                 return;
2104         }
2105
2106         /* rate shaping and fairness are disabled */
2107         DP(NETIF_MSG_IFUP,
2108            "rate shaping and fairness are disabled\n");
2109 }
2110
2111 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2112 {
2113         int port = BP_PORT(bp);
2114         int func;
2115         int vn;
2116
2117         /* Set the attention towards other drivers on the same port */
2118         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2119                 if (vn == BP_E1HVN(bp))
2120                         continue;
2121
2122                 func = ((vn << 1) | port);
2123                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2124                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2125         }
2126 }
2127
2128 /* This function is called upon link interrupt */
2129 static void bnx2x_link_attn(struct bnx2x *bp)
2130 {
2131         u32 prev_link_status = bp->link_vars.link_status;
2132         /* Make sure that we are synced with the current statistics */
2133         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2134
2135         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2136
2137         if (bp->link_vars.link_up) {
2138
2139                 /* dropless flow control */
2140                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2141                         int port = BP_PORT(bp);
2142                         u32 pause_enabled = 0;
2143
2144                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2145                                 pause_enabled = 1;
2146
2147                         REG_WR(bp, BAR_USTRORM_INTMEM +
2148                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2149                                pause_enabled);
2150                 }
2151
2152                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2153                         struct host_port_stats *pstats;
2154
2155                         pstats = bnx2x_sp(bp, port_stats);
2156                         /* reset old bmac stats */
2157                         memset(&(pstats->mac_stx[0]), 0,
2158                                sizeof(struct mac_stx));
2159                 }
2160                 if (bp->state == BNX2X_STATE_OPEN)
2161                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2162         }
2163
2164         /* indicate link status only if link status actually changed */
2165         if (prev_link_status != bp->link_vars.link_status)
2166                 bnx2x_link_report(bp);
2167
2168         if (IS_MF(bp))
2169                 bnx2x_link_sync_notify(bp);
2170
2171         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2172                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2173
2174                 if (cmng_fns != CMNG_FNS_NONE) {
2175                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2176                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2177                 } else
2178                         /* rate shaping and fairness are disabled */
2179                         DP(NETIF_MSG_IFUP,
2180                            "single function mode without fairness\n");
2181         }
2182 }
2183
2184 void bnx2x__link_status_update(struct bnx2x *bp)
2185 {
2186         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2187                 return;
2188
2189         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2190
2191         if (bp->link_vars.link_up)
2192                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2193         else
2194                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2195
2196         /* the link status update could be the result of a DCC event
2197            hence re-read the shmem mf configuration */
2198         bnx2x_read_mf_cfg(bp);
2199
2200         /* indicate link status */
2201         bnx2x_link_report(bp);
2202 }
2203
2204 static void bnx2x_pmf_update(struct bnx2x *bp)
2205 {
2206         int port = BP_PORT(bp);
2207         u32 val;
2208
2209         bp->port.pmf = 1;
2210         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2211
2212         /* enable nig attention */
2213         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2214         if (bp->common.int_block == INT_BLOCK_HC) {
2215                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2216                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2217         } else if (CHIP_IS_E2(bp)) {
2218                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2219                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2220         }
2221
2222         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2223 }
2224
2225 /* end of Link */
2226
2227 /* slow path */
2228
2229 /*
2230  * General service functions
2231  */
2232
2233 /* send the MCP a request, block until there is a reply */
2234 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2235 {
2236         int mb_idx = BP_FW_MB_IDX(bp);
2237         u32 seq = ++bp->fw_seq;
2238         u32 rc = 0;
2239         u32 cnt = 1;
2240         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2241
2242         mutex_lock(&bp->fw_mb_mutex);
2243         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2244         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2245
2246         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2247
2248         do {
2249                 /* let the FW do it's magic ... */
2250                 msleep(delay);
2251
2252                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2253
2254                 /* Give the FW up to 5 second (500*10ms) */
2255         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2256
2257         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2258            cnt*delay, rc, seq);
2259
2260         /* is this a reply to our command? */
2261         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2262                 rc &= FW_MSG_CODE_MASK;
2263         else {
2264                 /* FW BUG! */
2265                 BNX2X_ERR("FW failed to respond!\n");
2266                 bnx2x_fw_dump(bp);
2267                 rc = 0;
2268         }
2269         mutex_unlock(&bp->fw_mb_mutex);
2270
2271         return rc;
2272 }
2273
2274 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2275 {
2276 #ifdef BCM_CNIC
2277         if (IS_FCOE_FP(fp) && IS_MF(bp))
2278                 return false;
2279 #endif
2280         return true;
2281 }
2282
2283 /* must be called under rtnl_lock */
2284 static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2285 {
2286         u32 mask = (1 << cl_id);
2287
2288         /* initial seeting is BNX2X_ACCEPT_NONE */
2289         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2290         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2291         u8 unmatched_unicast = 0;
2292
2293         if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2294                 unmatched_unicast = 1;
2295
2296         if (filters & BNX2X_PROMISCUOUS_MODE) {
2297                 /* promiscious - accept all, drop none */
2298                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2299                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2300                 if (IS_MF_SI(bp)) {
2301                         /*
2302                          * SI mode defines to accept in promiscuos mode
2303                          * only unmatched packets
2304                          */
2305                         unmatched_unicast = 1;
2306                         accp_all_ucast = 0;
2307                 }
2308         }
2309         if (filters & BNX2X_ACCEPT_UNICAST) {
2310                 /* accept matched ucast */
2311                 drop_all_ucast = 0;
2312         }
2313         if (filters & BNX2X_ACCEPT_MULTICAST)
2314                 /* accept matched mcast */
2315                 drop_all_mcast = 0;
2316
2317         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2318                 /* accept all mcast */
2319                 drop_all_ucast = 0;
2320                 accp_all_ucast = 1;
2321         }
2322         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2323                 /* accept all mcast */
2324                 drop_all_mcast = 0;
2325                 accp_all_mcast = 1;
2326         }
2327         if (filters & BNX2X_ACCEPT_BROADCAST) {
2328                 /* accept (all) bcast */
2329                 drop_all_bcast = 0;
2330                 accp_all_bcast = 1;
2331         }
2332
2333         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2334                 bp->mac_filters.ucast_drop_all | mask :
2335                 bp->mac_filters.ucast_drop_all & ~mask;
2336
2337         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2338                 bp->mac_filters.mcast_drop_all | mask :
2339                 bp->mac_filters.mcast_drop_all & ~mask;
2340
2341         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2342                 bp->mac_filters.bcast_drop_all | mask :
2343                 bp->mac_filters.bcast_drop_all & ~mask;
2344
2345         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2346                 bp->mac_filters.ucast_accept_all | mask :
2347                 bp->mac_filters.ucast_accept_all & ~mask;
2348
2349         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2350                 bp->mac_filters.mcast_accept_all | mask :
2351                 bp->mac_filters.mcast_accept_all & ~mask;
2352
2353         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2354                 bp->mac_filters.bcast_accept_all | mask :
2355                 bp->mac_filters.bcast_accept_all & ~mask;
2356
2357         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2358                 bp->mac_filters.unmatched_unicast | mask :
2359                 bp->mac_filters.unmatched_unicast & ~mask;
2360 }
2361
2362 static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2363 {
2364         struct tstorm_eth_function_common_config tcfg = {0};
2365         u16 rss_flgs;
2366
2367         /* tpa */
2368         if (p->func_flgs & FUNC_FLG_TPA)
2369                 tcfg.config_flags |=
2370                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2371
2372         /* set rss flags */
2373         rss_flgs = (p->rss->mode <<
2374                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2375
2376         if (p->rss->cap & RSS_IPV4_CAP)
2377                 rss_flgs |= RSS_IPV4_CAP_MASK;
2378         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2379                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2380         if (p->rss->cap & RSS_IPV6_CAP)
2381                 rss_flgs |= RSS_IPV6_CAP_MASK;
2382         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2383                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2384
2385         tcfg.config_flags |= rss_flgs;
2386         tcfg.rss_result_mask = p->rss->result_mask;
2387
2388         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2389
2390         /* Enable the function in the FW */
2391         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2392         storm_memset_func_en(bp, p->func_id, 1);
2393
2394         /* statistics */
2395         if (p->func_flgs & FUNC_FLG_STATS) {
2396                 struct stats_indication_flags stats_flags = {0};
2397                 stats_flags.collect_eth = 1;
2398
2399                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2400                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2401
2402                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2403                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2404
2405                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2406                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2407
2408                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2409                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2410         }
2411
2412         /* spq */
2413         if (p->func_flgs & FUNC_FLG_SPQ) {
2414                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2415                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2416                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2417         }
2418 }
2419
2420 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2421                                      struct bnx2x_fastpath *fp)
2422 {
2423         u16 flags = 0;
2424
2425         /* calculate queue flags */
2426         flags |= QUEUE_FLG_CACHE_ALIGN;
2427         flags |= QUEUE_FLG_HC;
2428         flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2429
2430         flags |= QUEUE_FLG_VLAN;
2431         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2432
2433         if (!fp->disable_tpa)
2434                 flags |= QUEUE_FLG_TPA;
2435
2436         flags = stat_counter_valid(bp, fp) ?
2437                         (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2438
2439         return flags;
2440 }
2441
2442 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2443         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2444         struct bnx2x_rxq_init_params *rxq_init)
2445 {
2446         u16 max_sge = 0;
2447         u16 sge_sz = 0;
2448         u16 tpa_agg_size = 0;
2449
2450         /* calculate queue flags */
2451         u16 flags = bnx2x_get_cl_flags(bp, fp);
2452
2453         if (!fp->disable_tpa) {
2454                 pause->sge_th_hi = 250;
2455                 pause->sge_th_lo = 150;
2456                 tpa_agg_size = min_t(u32,
2457                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2458                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2459                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2460                         SGE_PAGE_SHIFT;
2461                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2462                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2463                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2464                                     0xffff);
2465         }
2466
2467         /* pause - not for e1 */
2468         if (!CHIP_IS_E1(bp)) {
2469                 pause->bd_th_hi = 350;
2470                 pause->bd_th_lo = 250;
2471                 pause->rcq_th_hi = 350;
2472                 pause->rcq_th_lo = 250;
2473                 pause->sge_th_hi = 0;
2474                 pause->sge_th_lo = 0;
2475                 pause->pri_map = 1;
2476         }
2477
2478         /* rxq setup */
2479         rxq_init->flags = flags;
2480         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2481         rxq_init->dscr_map = fp->rx_desc_mapping;
2482         rxq_init->sge_map = fp->rx_sge_mapping;
2483         rxq_init->rcq_map = fp->rx_comp_mapping;
2484         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2485         rxq_init->mtu = bp->dev->mtu;
2486         rxq_init->buf_sz = bp->rx_buf_size;
2487         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2488         rxq_init->cl_id = fp->cl_id;
2489         rxq_init->spcl_id = fp->cl_id;
2490         rxq_init->stat_id = fp->cl_id;
2491         rxq_init->tpa_agg_sz = tpa_agg_size;
2492         rxq_init->sge_buf_sz = sge_sz;
2493         rxq_init->max_sges_pkt = max_sge;
2494         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2495         rxq_init->fw_sb_id = fp->fw_sb_id;
2496
2497         if (IS_FCOE_FP(fp))
2498                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2499         else
2500                 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2501
2502         rxq_init->cid = HW_CID(bp, fp->cid);
2503
2504         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2505 }
2506
2507 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2508         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2509 {
2510         u16 flags = bnx2x_get_cl_flags(bp, fp);
2511
2512         txq_init->flags = flags;
2513         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2514         txq_init->dscr_map = fp->tx_desc_mapping;
2515         txq_init->stat_id = fp->cl_id;
2516         txq_init->cid = HW_CID(bp, fp->cid);
2517         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2518         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2519         txq_init->fw_sb_id = fp->fw_sb_id;
2520
2521         if (IS_FCOE_FP(fp)) {
2522                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2523                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2524         }
2525
2526         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2527 }
2528
2529 static void bnx2x_pf_init(struct bnx2x *bp)
2530 {
2531         struct bnx2x_func_init_params func_init = {0};
2532         struct bnx2x_rss_params rss = {0};
2533         struct event_ring_data eq_data = { {0} };
2534         u16 flags;
2535
2536         /* pf specific setups */
2537         if (!CHIP_IS_E1(bp))
2538                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2539
2540         if (CHIP_IS_E2(bp)) {
2541                 /* reset IGU PF statistics: MSIX + ATTN */
2542                 /* PF */
2543                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2544                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2545                            (CHIP_MODE_IS_4_PORT(bp) ?
2546                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2547                 /* ATTN */
2548                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2549                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2550                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2551                            (CHIP_MODE_IS_4_PORT(bp) ?
2552                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2553         }
2554
2555         /* function setup flags */
2556         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2557
2558         if (CHIP_IS_E1x(bp))
2559                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2560         else
2561                 flags |= FUNC_FLG_TPA;
2562
2563         /* function setup */
2564
2565         /**
2566          * Although RSS is meaningless when there is a single HW queue we
2567          * still need it enabled in order to have HW Rx hash generated.
2568          */
2569         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2570                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2571         rss.mode = bp->multi_mode;
2572         rss.result_mask = MULTI_MASK;
2573         func_init.rss = &rss;
2574
2575         func_init.func_flgs = flags;
2576         func_init.pf_id = BP_FUNC(bp);
2577         func_init.func_id = BP_FUNC(bp);
2578         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2579         func_init.spq_map = bp->spq_mapping;
2580         func_init.spq_prod = bp->spq_prod_idx;
2581
2582         bnx2x_func_init(bp, &func_init);
2583
2584         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2585
2586         /*
2587         Congestion management values depend on the link rate
2588         There is no active link so initial link rate is set to 10 Gbps.
2589         When the link comes up The congestion management values are
2590         re-calculated according to the actual link rate.
2591         */
2592         bp->link_vars.line_speed = SPEED_10000;
2593         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2594
2595         /* Only the PMF sets the HW */
2596         if (bp->port.pmf)
2597                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2598
2599         /* no rx until link is up */
2600         bp->rx_mode = BNX2X_RX_MODE_NONE;
2601         bnx2x_set_storm_rx_mode(bp);
2602
2603         /* init Event Queue */
2604         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2605         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2606         eq_data.producer = bp->eq_prod;
2607         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2608         eq_data.sb_id = DEF_SB_ID;
2609         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2610 }
2611
2612
2613 static void bnx2x_e1h_disable(struct bnx2x *bp)
2614 {
2615         int port = BP_PORT(bp);
2616
2617         netif_tx_disable(bp->dev);
2618
2619         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2620
2621         netif_carrier_off(bp->dev);
2622 }
2623
2624 static void bnx2x_e1h_enable(struct bnx2x *bp)
2625 {
2626         int port = BP_PORT(bp);
2627
2628         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2629
2630         /* Tx queue should be only reenabled */
2631         netif_tx_wake_all_queues(bp->dev);
2632
2633         /*
2634          * Should not call netif_carrier_on since it will be called if the link
2635          * is up when checking for link state
2636          */
2637 }
2638
2639 /* called due to MCP event (on pmf):
2640  *      reread new bandwidth configuration
2641  *      configure FW
2642  *      notify others function about the change
2643  */
2644 static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2645 {
2646         if (bp->link_vars.link_up) {
2647                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2648                 bnx2x_link_sync_notify(bp);
2649         }
2650         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2651 }
2652
2653 static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2654 {
2655         bnx2x_config_mf_bw(bp);
2656         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2657 }
2658
2659 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2660 {
2661         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2662
2663         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2664
2665                 /*
2666                  * This is the only place besides the function initialization
2667                  * where the bp->flags can change so it is done without any
2668                  * locks
2669                  */
2670                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2671                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2672                         bp->flags |= MF_FUNC_DIS;
2673
2674                         bnx2x_e1h_disable(bp);
2675                 } else {
2676                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2677                         bp->flags &= ~MF_FUNC_DIS;
2678
2679                         bnx2x_e1h_enable(bp);
2680                 }
2681                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2682         }
2683         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2684                 bnx2x_config_mf_bw(bp);
2685                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2686         }
2687
2688         /* Report results to MCP */
2689         if (dcc_event)
2690                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2691         else
2692                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2693 }
2694
2695 /* must be called under the spq lock */
2696 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2697 {
2698         struct eth_spe *next_spe = bp->spq_prod_bd;
2699
2700         if (bp->spq_prod_bd == bp->spq_last_bd) {
2701                 bp->spq_prod_bd = bp->spq;
2702                 bp->spq_prod_idx = 0;
2703                 DP(NETIF_MSG_TIMER, "end of spq\n");
2704         } else {
2705                 bp->spq_prod_bd++;
2706                 bp->spq_prod_idx++;
2707         }
2708         return next_spe;
2709 }
2710
2711 /* must be called under the spq lock */
2712 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2713 {
2714         int func = BP_FUNC(bp);
2715
2716         /* Make sure that BD data is updated before writing the producer */
2717         wmb();
2718
2719         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2720                  bp->spq_prod_idx);
2721         mmiowb();
2722 }
2723
2724 /* the slow path queue is odd since completions arrive on the fastpath ring */
2725 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2726                   u32 data_hi, u32 data_lo, int common)
2727 {
2728         struct eth_spe *spe;
2729         u16 type;
2730
2731 #ifdef BNX2X_STOP_ON_ERROR
2732         if (unlikely(bp->panic))
2733                 return -EIO;
2734 #endif
2735
2736         spin_lock_bh(&bp->spq_lock);
2737
2738         if (!atomic_read(&bp->spq_left)) {
2739                 BNX2X_ERR("BUG! SPQ ring full!\n");
2740                 spin_unlock_bh(&bp->spq_lock);
2741                 bnx2x_panic();
2742                 return -EBUSY;
2743         }
2744
2745         spe = bnx2x_sp_get_next(bp);
2746
2747         /* CID needs port number to be encoded int it */
2748         spe->hdr.conn_and_cmd_data =
2749                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2750                                     HW_CID(bp, cid));
2751
2752         if (common)
2753                 /* Common ramrods:
2754                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2755                  *      TRAFFIC_STOP, TRAFFIC_START
2756                  */
2757                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2758                         & SPE_HDR_CONN_TYPE;
2759         else
2760                 /* ETH ramrods: SETUP, HALT */
2761                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2762                         & SPE_HDR_CONN_TYPE;
2763
2764         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2765                  SPE_HDR_FUNCTION_ID);
2766
2767         spe->hdr.type = cpu_to_le16(type);
2768
2769         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2770         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2771
2772         /* stats ramrod has it's own slot on the spq */
2773         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2774                 /* It's ok if the actual decrement is issued towards the memory
2775                  * somewhere between the spin_lock and spin_unlock. Thus no
2776                  * more explict memory barrier is needed.
2777                  */
2778                 atomic_dec(&bp->spq_left);
2779
2780         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2781            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2782            "type(0x%x) left %x\n",
2783            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2784            (u32)(U64_LO(bp->spq_mapping) +
2785            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2786            HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2787
2788         bnx2x_sp_prod_update(bp);
2789         spin_unlock_bh(&bp->spq_lock);
2790         return 0;
2791 }
2792
2793 /* acquire split MCP access lock register */
2794 static int bnx2x_acquire_alr(struct bnx2x *bp)
2795 {
2796         u32 j, val;
2797         int rc = 0;
2798
2799         might_sleep();
2800         for (j = 0; j < 1000; j++) {
2801                 val = (1UL << 31);
2802                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2803                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2804                 if (val & (1L << 31))
2805                         break;
2806
2807                 msleep(5);
2808         }
2809         if (!(val & (1L << 31))) {
2810                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2811                 rc = -EBUSY;
2812         }
2813
2814         return rc;
2815 }
2816
2817 /* release split MCP access lock register */
2818 static void bnx2x_release_alr(struct bnx2x *bp)
2819 {
2820         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2821 }
2822
2823 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2824 #define BNX2X_DEF_SB_IDX        0x0002
2825
2826 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2827 {
2828         struct host_sp_status_block *def_sb = bp->def_status_blk;
2829         u16 rc = 0;
2830
2831         barrier(); /* status block is written to by the chip */
2832         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2833                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2834                 rc |= BNX2X_DEF_SB_ATT_IDX;
2835         }
2836
2837         if (bp->def_idx != def_sb->sp_sb.running_index) {
2838                 bp->def_idx = def_sb->sp_sb.running_index;
2839                 rc |= BNX2X_DEF_SB_IDX;
2840         }
2841
2842         /* Do not reorder: indecies reading should complete before handling */
2843         barrier();
2844         return rc;
2845 }
2846
2847 /*
2848  * slow path service functions
2849  */
2850
2851 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852 {
2853         int port = BP_PORT(bp);
2854         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2855                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2856         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2857                                        NIG_REG_MASK_INTERRUPT_PORT0;
2858         u32 aeu_mask;
2859         u32 nig_mask = 0;
2860         u32 reg_addr;
2861
2862         if (bp->attn_state & asserted)
2863                 BNX2X_ERR("IGU ERROR\n");
2864
2865         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2866         aeu_mask = REG_RD(bp, aeu_addr);
2867
2868         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2869            aeu_mask, asserted);
2870         aeu_mask &= ~(asserted & 0x3ff);
2871         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2872
2873         REG_WR(bp, aeu_addr, aeu_mask);
2874         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2875
2876         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2877         bp->attn_state |= asserted;
2878         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2879
2880         if (asserted & ATTN_HARD_WIRED_MASK) {
2881                 if (asserted & ATTN_NIG_FOR_FUNC) {
2882
2883                         bnx2x_acquire_phy_lock(bp);
2884
2885                         /* save nig interrupt mask */
2886                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2887                         REG_WR(bp, nig_int_mask_addr, 0);
2888
2889                         bnx2x_link_attn(bp);
2890
2891                         /* handle unicore attn? */
2892                 }
2893                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2894                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2895
2896                 if (asserted & GPIO_2_FUNC)
2897                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2898
2899                 if (asserted & GPIO_3_FUNC)
2900                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2901
2902                 if (asserted & GPIO_4_FUNC)
2903                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2904
2905                 if (port == 0) {
2906                         if (asserted & ATTN_GENERAL_ATTN_1) {
2907                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2908                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2909                         }
2910                         if (asserted & ATTN_GENERAL_ATTN_2) {
2911                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2912                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2913                         }
2914                         if (asserted & ATTN_GENERAL_ATTN_3) {
2915                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2916                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2917                         }
2918                 } else {
2919                         if (asserted & ATTN_GENERAL_ATTN_4) {
2920                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2921                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2922                         }
2923                         if (asserted & ATTN_GENERAL_ATTN_5) {
2924                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2925                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2926                         }
2927                         if (asserted & ATTN_GENERAL_ATTN_6) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2930                         }
2931                 }
2932
2933         } /* if hardwired */
2934
2935         if (bp->common.int_block == INT_BLOCK_HC)
2936                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2937                             COMMAND_REG_ATTN_BITS_SET);
2938         else
2939                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2940
2941         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2942            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2943         REG_WR(bp, reg_addr, asserted);
2944
2945         /* now set back the mask */
2946         if (asserted & ATTN_NIG_FOR_FUNC) {
2947                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2948                 bnx2x_release_phy_lock(bp);
2949         }
2950 }
2951
2952 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2953 {
2954         int port = BP_PORT(bp);
2955         u32 ext_phy_config;
2956         /* mark the failure */
2957         ext_phy_config =
2958                 SHMEM_RD(bp,
2959                          dev_info.port_hw_config[port].external_phy_config);
2960
2961         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2962         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2963         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2964                  ext_phy_config);
2965
2966         /* log the failure */
2967         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2968                " the driver to shutdown the card to prevent permanent"
2969                " damage.  Please contact OEM Support for assistance\n");
2970 }
2971
2972 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2973 {
2974         int port = BP_PORT(bp);
2975         int reg_offset;
2976         u32 val;
2977
2978         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2979                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2980
2981         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2982
2983                 val = REG_RD(bp, reg_offset);
2984                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2985                 REG_WR(bp, reg_offset, val);
2986
2987                 BNX2X_ERR("SPIO5 hw attention\n");
2988
2989                 /* Fan failure attention */
2990                 bnx2x_hw_reset_phy(&bp->link_params);
2991                 bnx2x_fan_failure(bp);
2992         }
2993
2994         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2995                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2996                 bnx2x_acquire_phy_lock(bp);
2997                 bnx2x_handle_module_detect_int(&bp->link_params);
2998                 bnx2x_release_phy_lock(bp);
2999         }
3000
3001         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3002
3003                 val = REG_RD(bp, reg_offset);
3004                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3005                 REG_WR(bp, reg_offset, val);
3006
3007                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3008                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3009                 bnx2x_panic();
3010         }
3011 }
3012
3013 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3014 {
3015         u32 val;
3016
3017         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3018
3019                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3020                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3021                 /* DORQ discard attention */
3022                 if (val & 0x2)
3023                         BNX2X_ERR("FATAL error from DORQ\n");
3024         }
3025
3026         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3027
3028                 int port = BP_PORT(bp);
3029                 int reg_offset;
3030
3031                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3032                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3033
3034                 val = REG_RD(bp, reg_offset);
3035                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3036                 REG_WR(bp, reg_offset, val);
3037
3038                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3039                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3040                 bnx2x_panic();
3041         }
3042 }
3043
3044 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3045 {
3046         u32 val;
3047
3048         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3049
3050                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3051                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3052                 /* CFC error attention */
3053                 if (val & 0x2)
3054                         BNX2X_ERR("FATAL error from CFC\n");
3055         }
3056
3057         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3058
3059                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3060                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3061                 /* RQ_USDMDP_FIFO_OVERFLOW */
3062                 if (val & 0x18000)
3063                         BNX2X_ERR("FATAL error from PXP\n");
3064                 if (CHIP_IS_E2(bp)) {
3065                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3066                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3067                 }
3068         }
3069
3070         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3071
3072                 int port = BP_PORT(bp);
3073                 int reg_offset;
3074
3075                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3076                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3077
3078                 val = REG_RD(bp, reg_offset);
3079                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3080                 REG_WR(bp, reg_offset, val);
3081
3082                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3083                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3084                 bnx2x_panic();
3085         }
3086 }
3087
3088 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3089 {
3090         u32 val;
3091
3092         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3093
3094                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3095                         int func = BP_FUNC(bp);
3096
3097                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3098                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3099                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3100                         val = SHMEM_RD(bp,
3101                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3102                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3103                                 bnx2x_dcc_event(bp,
3104                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3105
3106                         if (val & DRV_STATUS_SET_MF_BW)
3107                                 bnx2x_set_mf_bw(bp);
3108
3109                         bnx2x__link_status_update(bp);
3110                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3111                                 bnx2x_pmf_update(bp);
3112
3113                         if (bp->port.pmf &&
3114                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3115                                 bp->dcbx_enabled > 0)
3116                                 /* start dcbx state machine */
3117                                 bnx2x_dcbx_set_params(bp,
3118                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
3119                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3120
3121                         BNX2X_ERR("MC assert!\n");
3122                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3123                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3125                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3126                         bnx2x_panic();
3127
3128                 } else if (attn & BNX2X_MCP_ASSERT) {
3129
3130                         BNX2X_ERR("MCP assert!\n");
3131                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3132                         bnx2x_fw_dump(bp);
3133
3134                 } else
3135                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3136         }
3137
3138         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3139                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3140                 if (attn & BNX2X_GRC_TIMEOUT) {
3141                         val = CHIP_IS_E1(bp) ? 0 :
3142                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3143                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3144                 }
3145                 if (attn & BNX2X_GRC_RSV) {
3146                         val = CHIP_IS_E1(bp) ? 0 :
3147                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3148                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3149                 }
3150                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3151         }
3152 }
3153
3154 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3155 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3156 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3157 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3158 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3159
3160 /*
3161  * should be run under rtnl lock
3162  */
3163 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3164 {
3165         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3166         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3167         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3168         barrier();
3169         mmiowb();
3170 }
3171
3172 /*
3173  * should be run under rtnl lock
3174  */
3175 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3176 {
3177         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178         val |= (1 << 16);
3179         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3180         barrier();
3181         mmiowb();
3182 }
3183
3184 /*
3185  * should be run under rtnl lock
3186  */
3187 bool bnx2x_reset_is_done(struct bnx2x *bp)
3188 {
3189         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3191         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3198 {
3199         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200
3201         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3202
3203         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3204         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3205         barrier();
3206         mmiowb();
3207 }
3208
3209 /*
3210  * should be run under rtnl lock
3211  */
3212 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3213 {
3214         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3215
3216         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3217
3218         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3219         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3220         barrier();
3221         mmiowb();
3222
3223         return val1;
3224 }
3225
3226 /*
3227  * should be run under rtnl lock
3228  */
3229 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3230 {
3231         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3232 }
3233
3234 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3235 {
3236         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3237         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3238 }
3239
3240 static inline void _print_next_block(int idx, const char *blk)
3241 {
3242         if (idx)
3243                 pr_cont(", ");
3244         pr_cont("%s", blk);
3245 }
3246
3247 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3248 {
3249         int i = 0;
3250         u32 cur_bit = 0;
3251         for (i = 0; sig; i++) {
3252                 cur_bit = ((u32)0x1 << i);
3253                 if (sig & cur_bit) {
3254                         switch (cur_bit) {
3255                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3256                                 _print_next_block(par_num++, "BRB");
3257                                 break;
3258                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3259                                 _print_next_block(par_num++, "PARSER");
3260                                 break;
3261                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3262                                 _print_next_block(par_num++, "TSDM");
3263                                 break;
3264                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3265                                 _print_next_block(par_num++, "SEARCHER");
3266                                 break;
3267                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3268                                 _print_next_block(par_num++, "TSEMI");
3269                                 break;
3270                         }
3271
3272                         /* Clear the bit */
3273                         sig &= ~cur_bit;
3274                 }
3275         }
3276
3277         return par_num;
3278 }
3279
3280 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3281 {
3282         int i = 0;
3283         u32 cur_bit = 0;
3284         for (i = 0; sig; i++) {
3285                 cur_bit = ((u32)0x1 << i);
3286                 if (sig & cur_bit) {
3287                         switch (cur_bit) {
3288                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3289                                 _print_next_block(par_num++, "PBCLIENT");
3290                                 break;
3291                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3292                                 _print_next_block(par_num++, "QM");
3293                                 break;
3294                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3295                                 _print_next_block(par_num++, "XSDM");
3296                                 break;
3297                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3298                                 _print_next_block(par_num++, "XSEMI");
3299                                 break;
3300                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3301                                 _print_next_block(par_num++, "DOORBELLQ");
3302                                 break;
3303                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3304                                 _print_next_block(par_num++, "VAUX PCI CORE");
3305                                 break;
3306                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3307                                 _print_next_block(par_num++, "DEBUG");
3308                                 break;
3309                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3310                                 _print_next_block(par_num++, "USDM");
3311                                 break;
3312                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3313                                 _print_next_block(par_num++, "USEMI");
3314                                 break;
3315                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3316                                 _print_next_block(par_num++, "UPB");
3317                                 break;
3318                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3319                                 _print_next_block(par_num++, "CSDM");
3320                                 break;
3321                         }
3322
3323                         /* Clear the bit */
3324                         sig &= ~cur_bit;
3325                 }
3326         }
3327
3328         return par_num;
3329 }
3330
3331 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3332 {
3333         int i = 0;
3334         u32 cur_bit = 0;
3335         for (i = 0; sig; i++) {
3336                 cur_bit = ((u32)0x1 << i);
3337                 if (sig & cur_bit) {
3338                         switch (cur_bit) {
3339                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3340                                 _print_next_block(par_num++, "CSEMI");
3341                                 break;
3342                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3343                                 _print_next_block(par_num++, "PXP");
3344                                 break;
3345                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3346                                 _print_next_block(par_num++,
3347                                         "PXPPCICLOCKCLIENT");
3348                                 break;
3349                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CFC");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "CDU");
3354                                 break;
3355                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3356                                 _print_next_block(par_num++, "IGU");
3357                                 break;
3358                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3359                                 _print_next_block(par_num++, "MISC");
3360                                 break;
3361                         }
3362
3363                         /* Clear the bit */
3364                         sig &= ~cur_bit;
3365                 }
3366         }
3367
3368         return par_num;
3369 }
3370
3371 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3372 {
3373         int i = 0;
3374         u32 cur_bit = 0;
3375         for (i = 0; sig; i++) {
3376                 cur_bit = ((u32)0x1 << i);
3377                 if (sig & cur_bit) {
3378                         switch (cur_bit) {
3379                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3380                                 _print_next_block(par_num++, "MCP ROM");
3381                                 break;
3382                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3383                                 _print_next_block(par_num++, "MCP UMP RX");
3384                                 break;
3385                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3386                                 _print_next_block(par_num++, "MCP UMP TX");
3387                                 break;
3388                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3389                                 _print_next_block(par_num++, "MCP SCPAD");
3390                                 break;
3391                         }
3392
3393                         /* Clear the bit */
3394                         sig &= ~cur_bit;
3395                 }
3396         }
3397
3398         return par_num;
3399 }
3400
3401 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3402                                      u32 sig2, u32 sig3)
3403 {
3404         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3405             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3406                 int par_num = 0;
3407                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3408                         "[0]:0x%08x [1]:0x%08x "
3409                         "[2]:0x%08x [3]:0x%08x\n",
3410                           sig0 & HW_PRTY_ASSERT_SET_0,
3411                           sig1 & HW_PRTY_ASSERT_SET_1,
3412                           sig2 & HW_PRTY_ASSERT_SET_2,
3413                           sig3 & HW_PRTY_ASSERT_SET_3);
3414                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3415                        bp->dev->name);
3416                 par_num = bnx2x_print_blocks_with_parity0(
3417                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3418                 par_num = bnx2x_print_blocks_with_parity1(
3419                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3420                 par_num = bnx2x_print_blocks_with_parity2(
3421                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3422                 par_num = bnx2x_print_blocks_with_parity3(
3423                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3424                 printk("\n");
3425                 return true;
3426         } else
3427                 return false;
3428 }
3429
3430 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3431 {
3432         struct attn_route attn;
3433         int port = BP_PORT(bp);
3434
3435         attn.sig[0] = REG_RD(bp,
3436                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3437                              port*4);
3438         attn.sig[1] = REG_RD(bp,
3439                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3440                              port*4);
3441         attn.sig[2] = REG_RD(bp,
3442                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3443                              port*4);
3444         attn.sig[3] = REG_RD(bp,
3445                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3446                              port*4);
3447
3448         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3449                                         attn.sig[3]);
3450 }
3451
3452
3453 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3454 {
3455         u32 val;
3456         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3457
3458                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3459                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3460                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3461                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3462                                   "ADDRESS_ERROR\n");
3463                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3464                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3465                                   "INCORRECT_RCV_BEHAVIOR\n");
3466                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3467                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3468                                   "WAS_ERROR_ATTN\n");
3469                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3470                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3471                                   "VF_LENGTH_VIOLATION_ATTN\n");
3472                 if (val &
3473                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3474                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3476                 if (val &
3477                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3478                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3479                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3480                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3481                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3482                                   "TCPL_ERROR_ATTN\n");
3483                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3484                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3485                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3486                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3487                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3488                                   "CSSNOOP_FIFO_OVERFLOW\n");
3489         }
3490         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3491                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3492                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3493                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3494                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3495                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3496                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3497                                   "_ATC_TCPL_TO_NOT_PEND\n");
3498                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3499                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3500                                   "ATC_GPA_MULTIPLE_HITS\n");
3501                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3502                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3503                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3504                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3505                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3506                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3507                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3508                                   "ATC_IREQ_LESS_THAN_STU\n");
3509         }
3510
3511         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3512                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3513                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3514                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3515                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3516         }
3517
3518 }
3519
3520 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3521 {
3522         struct attn_route attn, *group_mask;
3523         int port = BP_PORT(bp);
3524         int index;
3525         u32 reg_addr;
3526         u32 val;
3527         u32 aeu_mask;
3528
3529         /* need to take HW lock because MCP or other port might also
3530            try to handle this event */
3531         bnx2x_acquire_alr(bp);
3532
3533         if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3534                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3535                 bnx2x_set_reset_in_progress(bp);
3536                 schedule_delayed_work(&bp->reset_task, 0);
3537                 /* Disable HW interrupts */
3538                 bnx2x_int_disable(bp);
3539                 bnx2x_release_alr(bp);
3540                 /* In case of parity errors don't handle attentions so that
3541                  * other function would "see" parity errors.
3542                  */
3543                 return;
3544         }
3545
3546         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3547         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3548         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3549         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3550         if (CHIP_IS_E2(bp))
3551                 attn.sig[4] =
3552                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3553         else
3554                 attn.sig[4] = 0;
3555
3556         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3557            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3558
3559         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3560                 if (deasserted & (1 << index)) {
3561                         group_mask = &bp->attn_group[index];
3562
3563                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3564                                          "%08x %08x %08x\n",
3565                            index,
3566                            group_mask->sig[0], group_mask->sig[1],
3567                            group_mask->sig[2], group_mask->sig[3],
3568                            group_mask->sig[4]);
3569
3570                         bnx2x_attn_int_deasserted4(bp,
3571                                         attn.sig[4] & group_mask->sig[4]);
3572                         bnx2x_attn_int_deasserted3(bp,
3573                                         attn.sig[3] & group_mask->sig[3]);
3574                         bnx2x_attn_int_deasserted1(bp,
3575                                         attn.sig[1] & group_mask->sig[1]);
3576                         bnx2x_attn_int_deasserted2(bp,
3577                                         attn.sig[2] & group_mask->sig[2]);
3578                         bnx2x_attn_int_deasserted0(bp,
3579                                         attn.sig[0] & group_mask->sig[0]);
3580                 }
3581         }
3582
3583         bnx2x_release_alr(bp);
3584
3585         if (bp->common.int_block == INT_BLOCK_HC)
3586                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3587                             COMMAND_REG_ATTN_BITS_CLR);
3588         else
3589                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3590
3591         val = ~deasserted;
3592         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3593            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3594         REG_WR(bp, reg_addr, val);
3595
3596         if (~bp->attn_state & deasserted)
3597                 BNX2X_ERR("IGU ERROR\n");
3598
3599         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3600                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3601
3602         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3603         aeu_mask = REG_RD(bp, reg_addr);
3604
3605         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3606            aeu_mask, deasserted);
3607         aeu_mask |= (deasserted & 0x3ff);
3608         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3609
3610         REG_WR(bp, reg_addr, aeu_mask);
3611         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3612
3613         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3614         bp->attn_state &= ~deasserted;
3615         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3616 }
3617
3618 static void bnx2x_attn_int(struct bnx2x *bp)
3619 {
3620         /* read local copy of bits */
3621         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3622                                                                 attn_bits);
3623         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3624                                                                 attn_bits_ack);
3625         u32 attn_state = bp->attn_state;
3626
3627         /* look for changed bits */
3628         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3629         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3630
3631         DP(NETIF_MSG_HW,
3632            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3633            attn_bits, attn_ack, asserted, deasserted);
3634
3635         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3636                 BNX2X_ERR("BAD attention state\n");
3637
3638         /* handle bits that were raised */
3639         if (asserted)
3640                 bnx2x_attn_int_asserted(bp, asserted);
3641
3642         if (deasserted)
3643                 bnx2x_attn_int_deasserted(bp, deasserted);
3644 }
3645
3646 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3647 {
3648         /* No memory barriers */
3649         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3650         mmiowb(); /* keep prod updates ordered */
3651 }
3652
3653 #ifdef BCM_CNIC
3654 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3655                                       union event_ring_elem *elem)
3656 {
3657         if (!bp->cnic_eth_dev.starting_cid  ||
3658             cid < bp->cnic_eth_dev.starting_cid)
3659                 return 1;
3660
3661         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3662
3663         if (unlikely(elem->message.data.cfc_del_event.error)) {
3664                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3665                           cid);
3666                 bnx2x_panic_dump(bp);
3667         }
3668         bnx2x_cnic_cfc_comp(bp, cid);
3669         return 0;
3670 }
3671 #endif
3672
3673 static void bnx2x_eq_int(struct bnx2x *bp)
3674 {
3675         u16 hw_cons, sw_cons, sw_prod;
3676         union event_ring_elem *elem;
3677         u32 cid;
3678         u8 opcode;
3679         int spqe_cnt = 0;
3680
3681         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3682
3683         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3684          * when we get the the next-page we nned to adjust so the loop
3685          * condition below will be met. The next element is the size of a
3686          * regular element and hence incrementing by 1
3687          */
3688         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3689                 hw_cons++;
3690
3691         /* This function may never run in parralel with itself for a
3692          * specific bp, thus there is no need in "paired" read memory
3693          * barrier here.
3694          */
3695         sw_cons = bp->eq_cons;
3696         sw_prod = bp->eq_prod;
3697
3698         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
3699                         hw_cons, sw_cons, atomic_read(&bp->spq_left));
3700
3701         for (; sw_cons != hw_cons;
3702               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3703
3704
3705                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3706
3707                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3708                 opcode = elem->message.opcode;
3709
3710
3711                 /* handle eq element */
3712                 switch (opcode) {
3713                 case EVENT_RING_OPCODE_STAT_QUERY:
3714                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3715                         /* nothing to do with stats comp */
3716                         continue;
3717
3718                 case EVENT_RING_OPCODE_CFC_DEL:
3719                         /* handle according to cid range */
3720                         /*
3721                          * we may want to verify here that the bp state is
3722                          * HALTING
3723                          */
3724                         DP(NETIF_MSG_IFDOWN,
3725                            "got delete ramrod for MULTI[%d]\n", cid);
3726 #ifdef BCM_CNIC
3727                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3728                                 goto next_spqe;
3729                         if (cid == BNX2X_FCOE_ETH_CID)
3730                                 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3731                         else
3732 #endif
3733                                 bnx2x_fp(bp, cid, state) =
3734                                                 BNX2X_FP_STATE_CLOSED;
3735
3736                         goto next_spqe;
3737
3738                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3739                         DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3740                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3741                         goto next_spqe;
3742                 case EVENT_RING_OPCODE_START_TRAFFIC:
3743                         DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3744                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3745                         goto next_spqe;
3746                 }
3747
3748                 switch (opcode | bp->state) {
3749                 case (EVENT_RING_OPCODE_FUNCTION_START |
3750                       BNX2X_STATE_OPENING_WAIT4_PORT):
3751                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3752                         bp->state = BNX2X_STATE_FUNC_STARTED;
3753                         break;
3754
3755                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3756                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3757                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3758                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3759                         break;
3760
3761                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3762                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3763                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3764                         bp->set_mac_pending = 0;
3765                         break;
3766
3767                 case (EVENT_RING_OPCODE_SET_MAC |
3768                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3769                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3770                         bp->set_mac_pending = 0;
3771                         break;
3772                 default:
3773                         /* unknown event log error and continue */
3774                         BNX2X_ERR("Unknown EQ event %d\n",
3775                                   elem->message.opcode);
3776                 }
3777 next_spqe:
3778                 spqe_cnt++;
3779         } /* for */
3780
3781         smp_mb__before_atomic_inc();
3782         atomic_add(spqe_cnt, &bp->spq_left);
3783
3784         bp->eq_cons = sw_cons;
3785         bp->eq_prod = sw_prod;
3786         /* Make sure that above mem writes were issued towards the memory */
3787         smp_wmb();
3788
3789         /* update producer */
3790         bnx2x_update_eq_prod(bp, bp->eq_prod);
3791 }
3792
3793 static void bnx2x_sp_task(struct work_struct *work)
3794 {
3795         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3796         u16 status;
3797
3798         /* Return here if interrupt is disabled */
3799         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3800                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3801                 return;
3802         }
3803
3804         status = bnx2x_update_dsb_idx(bp);
3805 /*      if (status == 0)                                     */
3806 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3807
3808         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3809
3810         /* HW attentions */
3811         if (status & BNX2X_DEF_SB_ATT_IDX) {
3812                 bnx2x_attn_int(bp);
3813                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3814         }
3815
3816         /* SP events: STAT_QUERY and others */
3817         if (status & BNX2X_DEF_SB_IDX) {
3818 #ifdef BCM_CNIC
3819                 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3820
3821                 if ((!NO_FCOE(bp)) &&
3822                         (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3823                         napi_schedule(&bnx2x_fcoe(bp, napi));
3824 #endif
3825                 /* Handle EQ completions */
3826                 bnx2x_eq_int(bp);
3827
3828                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3829                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3830
3831                 status &= ~BNX2X_DEF_SB_IDX;
3832         }
3833
3834         if (unlikely(status))
3835                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3836                    status);
3837
3838         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3839              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3840 }
3841
3842 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3843 {
3844         struct net_device *dev = dev_instance;
3845         struct bnx2x *bp = netdev_priv(dev);
3846
3847         /* Return here if interrupt is disabled */
3848         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3849                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3850                 return IRQ_HANDLED;
3851         }
3852
3853         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3854                      IGU_INT_DISABLE, 0);
3855
3856 #ifdef BNX2X_STOP_ON_ERROR
3857         if (unlikely(bp->panic))
3858                 return IRQ_HANDLED;
3859 #endif
3860
3861 #ifdef BCM_CNIC
3862         {
3863                 struct cnic_ops *c_ops;
3864
3865                 rcu_read_lock();
3866                 c_ops = rcu_dereference(bp->cnic_ops);
3867                 if (c_ops)
3868                         c_ops->cnic_handler(bp->cnic_data, NULL);
3869                 rcu_read_unlock();
3870         }
3871 #endif
3872         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3873
3874         return IRQ_HANDLED;
3875 }
3876
3877 /* end of slow path */
3878
3879 static void bnx2x_timer(unsigned long data)
3880 {
3881         struct bnx2x *bp = (struct bnx2x *) data;
3882
3883         if (!netif_running(bp->dev))
3884                 return;
3885
3886         if (atomic_read(&bp->intr_sem) != 0)
3887                 goto timer_restart;
3888
3889         if (poll) {
3890                 struct bnx2x_fastpath *fp = &bp->fp[0];
3891                 int rc;
3892
3893                 bnx2x_tx_int(fp);
3894                 rc = bnx2x_rx_int(fp, 1000);
3895         }
3896
3897         if (!BP_NOMCP(bp)) {
3898                 int mb_idx = BP_FW_MB_IDX(bp);
3899                 u32 drv_pulse;
3900                 u32 mcp_pulse;
3901
3902                 ++bp->fw_drv_pulse_wr_seq;
3903                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3904                 /* TBD - add SYSTEM_TIME */
3905                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3906                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3907
3908                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3909                              MCP_PULSE_SEQ_MASK);
3910                 /* The delta between driver pulse and mcp response
3911                  * should be 1 (before mcp response) or 0 (after mcp response)
3912                  */
3913                 if ((drv_pulse != mcp_pulse) &&
3914                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3915                         /* someone lost a heartbeat... */
3916                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3917                                   drv_pulse, mcp_pulse);
3918                 }
3919         }
3920
3921         if (bp->state == BNX2X_STATE_OPEN)
3922                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3923
3924 timer_restart:
3925         mod_timer(&bp->timer, jiffies + bp->current_interval);
3926 }
3927
3928 /* end of Statistics */
3929
3930 /* nic init */
3931
3932 /*
3933  * nic init service functions
3934  */
3935
3936 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3937 {
3938         u32 i;
3939         if (!(len%4) && !(addr%4))
3940                 for (i = 0; i < len; i += 4)
3941                         REG_WR(bp, addr + i, fill);
3942         else
3943                 for (i = 0; i < len; i++)
3944                         REG_WR8(bp, addr + i, fill);
3945
3946 }
3947
3948 /* helper: writes FP SP data to FW - data_size in dwords */
3949 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3950                                        int fw_sb_id,
3951                                        u32 *sb_data_p,
3952                                        u32 data_size)
3953 {
3954         int index;
3955         for (index = 0; index < data_size; index++)
3956                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3957                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3958                         sizeof(u32)*index,
3959                         *(sb_data_p + index));
3960 }
3961
3962 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3963 {
3964         u32 *sb_data_p;
3965         u32 data_size = 0;
3966         struct hc_status_block_data_e2 sb_data_e2;
3967         struct hc_status_block_data_e1x sb_data_e1x;
3968
3969         /* disable the function first */
3970         if (CHIP_IS_E2(bp)) {
3971                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3972                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3973                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3974                 sb_data_e2.common.p_func.vf_valid = false;
3975                 sb_data_p = (u32 *)&sb_data_e2;
3976                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3977         } else {
3978                 memset(&sb_data_e1x, 0,
3979                        sizeof(struct hc_status_block_data_e1x));
3980                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3981                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3982                 sb_data_e1x.common.p_func.vf_valid = false;
3983                 sb_data_p = (u32 *)&sb_data_e1x;
3984                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3985         }
3986         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3987
3988         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3989                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3990                         CSTORM_STATUS_BLOCK_SIZE);
3991         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3992                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3993                         CSTORM_SYNC_BLOCK_SIZE);
3994 }
3995
3996 /* helper:  writes SP SB data to FW */
3997 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3998                 struct hc_sp_status_block_data *sp_sb_data)
3999 {
4000         int func = BP_FUNC(bp);
4001         int i;
4002         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4003                 REG_WR(bp, BAR_CSTRORM_INTMEM +
4004                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4005                         i*sizeof(u32),
4006                         *((u32 *)sp_sb_data + i));
4007 }
4008
4009 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4010 {
4011         int func = BP_FUNC(bp);
4012         struct hc_sp_status_block_data sp_sb_data;
4013         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
4015         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4016         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4017         sp_sb_data.p_func.vf_valid = false;
4018
4019         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4020
4021         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4022                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4023                         CSTORM_SP_STATUS_BLOCK_SIZE);
4024         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4025                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4026                         CSTORM_SP_SYNC_BLOCK_SIZE);
4027
4028 }
4029
4030
4031 static inline
4032 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4033                                            int igu_sb_id, int igu_seg_id)
4034 {
4035         hc_sm->igu_sb_id = igu_sb_id;
4036         hc_sm->igu_seg_id = igu_seg_id;
4037         hc_sm->timer_value = 0xFF;
4038         hc_sm->time_to_expire = 0xFFFFFFFF;
4039 }
4040
4041 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4042                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
4043 {
4044         int igu_seg_id;
4045
4046         struct hc_status_block_data_e2 sb_data_e2;
4047         struct hc_status_block_data_e1x sb_data_e1x;
4048         struct hc_status_block_sm  *hc_sm_p;
4049         struct hc_index_data *hc_index_p;
4050         int data_size;
4051         u32 *sb_data_p;
4052
4053         if (CHIP_INT_MODE_IS_BC(bp))
4054                 igu_seg_id = HC_SEG_ACCESS_NORM;
4055         else
4056                 igu_seg_id = IGU_SEG_ACCESS_NORM;
4057
4058         bnx2x_zero_fp_sb(bp, fw_sb_id);
4059
4060         if (CHIP_IS_E2(bp)) {
4061                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4062                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4063                 sb_data_e2.common.p_func.vf_id = vfid;
4064                 sb_data_e2.common.p_func.vf_valid = vf_valid;
4065                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4066                 sb_data_e2.common.same_igu_sb_1b = true;
4067                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4068                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4069                 hc_sm_p = sb_data_e2.common.state_machine;
4070                 hc_index_p = sb_data_e2.index_data;
4071                 sb_data_p = (u32 *)&sb_data_e2;
4072                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4073         } else {
4074                 memset(&sb_data_e1x, 0,
4075                        sizeof(struct hc_status_block_data_e1x));
4076                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4077                 sb_data_e1x.common.p_func.vf_id = 0xff;
4078                 sb_data_e1x.common.p_func.vf_valid = false;
4079                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4080                 sb_data_e1x.common.same_igu_sb_1b = true;
4081                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4082                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4083                 hc_sm_p = sb_data_e1x.common.state_machine;
4084                 hc_index_p = sb_data_e1x.index_data;
4085                 sb_data_p = (u32 *)&sb_data_e1x;
4086                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4087         }
4088
4089         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4090                                        igu_sb_id, igu_seg_id);
4091         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4092                                        igu_sb_id, igu_seg_id);
4093
4094         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4095
4096         /* write indecies to HW */
4097         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4098 }
4099
4100 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4101                                         u8 sb_index, u8 disable, u16 usec)
4102 {
4103         int port = BP_PORT(bp);
4104         u8 ticks = usec / BNX2X_BTR;
4105
4106         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4107
4108         disable = disable ? 1 : (usec ? 0 : 1);
4109         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4110 }
4111
4112 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4113                                      u16 tx_usec, u16 rx_usec)
4114 {
4115         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4116                                     false, rx_usec);
4117         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4118                                     false, tx_usec);
4119 }
4120
4121 static void bnx2x_init_def_sb(struct bnx2x *bp)
4122 {
4123         struct host_sp_status_block *def_sb = bp->def_status_blk;
4124         dma_addr_t mapping = bp->def_status_blk_mapping;
4125         int igu_sp_sb_index;
4126         int igu_seg_id;
4127         int port = BP_PORT(bp);
4128         int func = BP_FUNC(bp);
4129         int reg_offset;
4130         u64 section;
4131         int index;
4132         struct hc_sp_status_block_data sp_sb_data;
4133         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4134
4135         if (CHIP_INT_MODE_IS_BC(bp)) {
4136                 igu_sp_sb_index = DEF_SB_IGU_ID;
4137                 igu_seg_id = HC_SEG_ACCESS_DEF;
4138         } else {
4139                 igu_sp_sb_index = bp->igu_dsb_id;
4140                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4141         }
4142
4143         /* ATTN */
4144         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4145                                             atten_status_block);
4146         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4147
4148         bp->attn_state = 0;
4149
4150         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4151                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4152         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4153                 int sindex;
4154                 /* take care of sig[0]..sig[4] */
4155                 for (sindex = 0; sindex < 4; sindex++)
4156                         bp->attn_group[index].sig[sindex] =
4157                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4158
4159                 if (CHIP_IS_E2(bp))
4160                         /*
4161                          * enable5 is separate from the rest of the registers,
4162                          * and therefore the address skip is 4
4163                          * and not 16 between the different groups
4164                          */
4165                         bp->attn_group[index].sig[4] = REG_RD(bp,
4166                                         reg_offset + 0x10 + 0x4*index);
4167                 else
4168                         bp->attn_group[index].sig[4] = 0;
4169         }
4170
4171         if (bp->common.int_block == INT_BLOCK_HC) {
4172                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4173                                      HC_REG_ATTN_MSG0_ADDR_L);
4174
4175                 REG_WR(bp, reg_offset, U64_LO(section));
4176                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4177         } else if (CHIP_IS_E2(bp)) {
4178                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4179                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4180         }
4181
4182         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4183                                             sp_sb);
4184
4185         bnx2x_zero_sp_sb(bp);
4186
4187         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4188         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4189         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4190         sp_sb_data.igu_seg_id           = igu_seg_id;
4191         sp_sb_data.p_func.pf_id         = func;
4192         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4193         sp_sb_data.p_func.vf_id         = 0xff;
4194
4195         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4196
4197         bp->stats_pending = 0;
4198         bp->set_mac_pending = 0;
4199
4200         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4201 }
4202
4203 void bnx2x_update_coalesce(struct bnx2x *bp)
4204 {
4205         int i;
4206
4207         for_each_eth_queue(bp, i)
4208                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4209                                          bp->rx_ticks, bp->tx_ticks);
4210 }
4211
4212 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4213 {
4214         spin_lock_init(&bp->spq_lock);
4215         atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4216
4217         bp->spq_prod_idx = 0;
4218         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4219         bp->spq_prod_bd = bp->spq;
4220         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4221 }
4222
4223 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4224 {
4225         int i;
4226         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4227                 union event_ring_elem *elem =
4228                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4229
4230                 elem->next_page.addr.hi =
4231                         cpu_to_le32(U64_HI(bp->eq_mapping +
4232                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4233                 elem->next_page.addr.lo =
4234                         cpu_to_le32(U64_LO(bp->eq_mapping +
4235                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4236         }
4237         bp->eq_cons = 0;
4238         bp->eq_prod = NUM_EQ_DESC;
4239         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4240 }
4241
4242 static void bnx2x_init_ind_table(struct bnx2x *bp)
4243 {
4244         int func = BP_FUNC(bp);
4245         int i;
4246
4247         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4248                 return;
4249
4250         DP(NETIF_MSG_IFUP,
4251            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4252         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4253                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4254                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4255                         bp->fp->cl_id + (i % (bp->num_queues -
4256                                 NONE_ETH_CONTEXT_USE)));
4257 }
4258
4259 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4260 {
4261         int mode = bp->rx_mode;
4262         int port = BP_PORT(bp);
4263         u16 cl_id;
4264         u32 def_q_filters = 0;
4265
4266         /* All but management unicast packets should pass to the host as well */
4267         u32 llh_mask =
4268                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4269                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4270                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4271                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4272
4273         switch (mode) {
4274         case BNX2X_RX_MODE_NONE: /* no Rx */
4275                 def_q_filters = BNX2X_ACCEPT_NONE;
4276 #ifdef BCM_CNIC
4277                 if (!NO_FCOE(bp)) {
4278                         cl_id = bnx2x_fcoe(bp, cl_id);
4279                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4280                 }
4281 #endif
4282                 break;
4283
4284         case BNX2X_RX_MODE_NORMAL:
4285                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4286                                 BNX2X_ACCEPT_MULTICAST;
4287 #ifdef BCM_CNIC
4288                 if (!NO_FCOE(bp)) {
4289                         cl_id = bnx2x_fcoe(bp, cl_id);
4290                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4291                                                   BNX2X_ACCEPT_UNICAST |
4292                                                   BNX2X_ACCEPT_MULTICAST);
4293                 }
4294 #endif
4295                 break;
4296
4297         case BNX2X_RX_MODE_ALLMULTI:
4298                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4299                                 BNX2X_ACCEPT_ALL_MULTICAST;
4300 #ifdef BCM_CNIC
4301                 /*
4302                  *  Prevent duplication of multicast packets by configuring FCoE
4303                  *  L2 Client to receive only matched unicast frames.
4304                  */
4305                 if (!NO_FCOE(bp)) {
4306                         cl_id = bnx2x_fcoe(bp, cl_id);
4307                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4308                                                   BNX2X_ACCEPT_UNICAST);
4309                 }
4310 #endif
4311                 break;
4312
4313         case BNX2X_RX_MODE_PROMISC:
4314                 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4315 #ifdef BCM_CNIC
4316                 /*
4317                  *  Prevent packets duplication by configuring DROP_ALL for FCoE
4318                  *  L2 Client.
4319                  */
4320                 if (!NO_FCOE(bp)) {
4321                         cl_id = bnx2x_fcoe(bp, cl_id);
4322                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4323                 }
4324 #endif
4325                 /* pass management unicast packets as well */
4326                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4327                 break;
4328
4329         default:
4330                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4331                 break;
4332         }
4333
4334         cl_id = BP_L_ID(bp);
4335         bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4336
4337         REG_WR(bp,
4338                (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4339                        NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4340
4341         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4342                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4343                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4344                 "unmatched_ucast 0x%x\n", mode,
4345                 bp->mac_filters.ucast_drop_all,
4346                 bp->mac_filters.mcast_drop_all,
4347                 bp->mac_filters.bcast_drop_all,
4348                 bp->mac_filters.ucast_accept_all,
4349                 bp->mac_filters.mcast_accept_all,
4350                 bp->mac_filters.bcast_accept_all,
4351                 bp->mac_filters.unmatched_unicast
4352         );
4353
4354         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4355 }
4356
4357 static void bnx2x_init_internal_common(struct bnx2x *bp)
4358 {
4359         int i;
4360
4361         if (!CHIP_IS_E1(bp)) {
4362
4363                 /* xstorm needs to know whether to add  ovlan to packets or not,
4364                  * in switch-independent we'll write 0 to here... */
4365                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4366                         bp->mf_mode);
4367                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4368                         bp->mf_mode);
4369                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4370                         bp->mf_mode);
4371                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4372                         bp->mf_mode);
4373         }
4374
4375         if (IS_MF_SI(bp))
4376                 /*
4377                  * In switch independent mode, the TSTORM needs to accept
4378                  * packets that failed classification, since approximate match
4379                  * mac addresses aren't written to NIG LLH
4380                  */
4381                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4382                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4383
4384         /* Zero this manually as its initialization is
4385            currently missing in the initTool */
4386         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4387                 REG_WR(bp, BAR_USTRORM_INTMEM +
4388                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4389         if (CHIP_IS_E2(bp)) {
4390                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4391                         CHIP_INT_MODE_IS_BC(bp) ?
4392                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4393         }
4394 }
4395
4396 static void bnx2x_init_internal_port(struct bnx2x *bp)
4397 {
4398         /* port */
4399         bnx2x_dcb_init_intmem_pfc(bp);
4400 }
4401
4402 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4403 {
4404         switch (load_code) {
4405         case FW_MSG_CODE_DRV_LOAD_COMMON:
4406         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4407                 bnx2x_init_internal_common(bp);
4408                 /* no break */
4409
4410         case FW_MSG_CODE_DRV_LOAD_PORT:
4411                 bnx2x_init_internal_port(bp);
4412                 /* no break */
4413
4414         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4415                 /* internal memory per function is
4416                    initialized inside bnx2x_pf_init */
4417                 break;
4418
4419         default:
4420                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4421                 break;
4422         }
4423 }
4424
4425 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4426 {
4427         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4428
4429         fp->state = BNX2X_FP_STATE_CLOSED;
4430
4431         fp->index = fp->cid = fp_idx;
4432         fp->cl_id = BP_L_ID(bp) + fp_idx;
4433         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4434         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4435         /* qZone id equals to FW (per path) client id */
4436         fp->cl_qzone_id  = fp->cl_id +
4437                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4438                                 ETH_MAX_RX_CLIENTS_E1H);
4439         /* init shortcut */
4440         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4441                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4442                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4443         /* Setup SB indicies */
4444         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4445         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4446
4447         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4448                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4449                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4450                    fp->igu_sb_id);
4451         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4452                       fp->fw_sb_id, fp->igu_sb_id);
4453
4454         bnx2x_update_fpsb_idx(fp);
4455 }
4456
4457 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4458 {
4459         int i;
4460
4461         for_each_eth_queue(bp, i)
4462                 bnx2x_init_fp_sb(bp, i);
4463 #ifdef BCM_CNIC
4464         if (!NO_FCOE(bp))
4465                 bnx2x_init_fcoe_fp(bp);
4466
4467         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4468                       BNX2X_VF_ID_INVALID, false,
4469                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4470
4471 #endif
4472
4473         /* ensure status block indices were read */
4474         rmb();
4475
4476         bnx2x_init_def_sb(bp);
4477         bnx2x_update_dsb_idx(bp);
4478         bnx2x_init_rx_rings(bp);
4479         bnx2x_init_tx_rings(bp);
4480         bnx2x_init_sp_ring(bp);
4481         bnx2x_init_eq_ring(bp);
4482         bnx2x_init_internal(bp, load_code);
4483         bnx2x_pf_init(bp);
4484         bnx2x_init_ind_table(bp);
4485         bnx2x_stats_init(bp);
4486
4487         /* At this point, we are ready for interrupts */
4488         atomic_set(&bp->intr_sem, 0);
4489
4490         /* flush all before enabling interrupts */
4491         mb();
4492         mmiowb();
4493
4494         bnx2x_int_enable(bp);
4495
4496         /* Check for SPIO5 */
4497         bnx2x_attn_int_deasserted0(bp,
4498                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4499                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4500 }
4501
4502 /* end of nic init */
4503
4504 /*
4505  * gzip service functions
4506  */
4507
4508 static int bnx2x_gunzip_init(struct bnx2x *bp)
4509 {
4510         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4511                                             &bp->gunzip_mapping, GFP_KERNEL);
4512         if (bp->gunzip_buf  == NULL)
4513                 goto gunzip_nomem1;
4514
4515         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4516         if (bp->strm  == NULL)
4517                 goto gunzip_nomem2;
4518
4519         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4520                                       GFP_KERNEL);
4521         if (bp->strm->workspace == NULL)
4522                 goto gunzip_nomem3;
4523
4524         return 0;
4525
4526 gunzip_nomem3:
4527         kfree(bp->strm);
4528         bp->strm = NULL;
4529
4530 gunzip_nomem2:
4531         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4532                           bp->gunzip_mapping);
4533         bp->gunzip_buf = NULL;
4534
4535 gunzip_nomem1:
4536         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4537                " un-compression\n");
4538         return -ENOMEM;
4539 }
4540
4541 static void bnx2x_gunzip_end(struct bnx2x *bp)
4542 {
4543         kfree(bp->strm->workspace);
4544         kfree(bp->strm);
4545         bp->strm = NULL;
4546
4547         if (bp->gunzip_buf) {
4548                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4549                                   bp->gunzip_mapping);
4550                 bp->gunzip_buf = NULL;
4551         }
4552 }
4553
4554 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4555 {
4556         int n, rc;
4557
4558         /* check gzip header */
4559         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4560                 BNX2X_ERR("Bad gzip header\n");
4561                 return -EINVAL;
4562         }
4563
4564         n = 10;
4565
4566 #define FNAME                           0x8
4567
4568         if (zbuf[3] & FNAME)
4569                 while ((zbuf[n++] != 0) && (n < len));
4570
4571         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4572         bp->strm->avail_in = len - n;
4573         bp->strm->next_out = bp->gunzip_buf;
4574         bp->strm->avail_out = FW_BUF_SIZE;
4575
4576         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4577         if (rc != Z_OK)
4578                 return rc;
4579
4580         rc = zlib_inflate(bp->strm, Z_FINISH);
4581         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4582                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4583                            bp->strm->msg);
4584
4585         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4586         if (bp->gunzip_outlen & 0x3)
4587                 netdev_err(bp->dev, "Firmware decompression error:"
4588                                     " gunzip_outlen (%d) not aligned\n",
4589                                 bp->gunzip_outlen);
4590         bp->gunzip_outlen >>= 2;
4591
4592         zlib_inflateEnd(bp->strm);
4593
4594         if (rc == Z_STREAM_END)
4595                 return 0;
4596
4597         return rc;
4598 }
4599
4600 /* nic load/unload */
4601
4602 /*
4603  * General service functions
4604  */
4605
4606 /* send a NIG loopback debug packet */
4607 static void bnx2x_lb_pckt(struct bnx2x *bp)
4608 {
4609         u32 wb_write[3];
4610
4611         /* Ethernet source and destination addresses */
4612         wb_write[0] = 0x55555555;
4613         wb_write[1] = 0x55555555;
4614         wb_write[2] = 0x20;             /* SOP */
4615         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4616
4617         /* NON-IP protocol */
4618         wb_write[0] = 0x09000000;
4619         wb_write[1] = 0x55555555;
4620         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4621         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4622 }
4623
4624 /* some of the internal memories
4625  * are not directly readable from the driver
4626  * to test them we send debug packets
4627  */
4628 static int bnx2x_int_mem_test(struct bnx2x *bp)
4629 {
4630         int factor;
4631         int count, i;
4632         u32 val = 0;
4633
4634         if (CHIP_REV_IS_FPGA(bp))
4635                 factor = 120;
4636         else if (CHIP_REV_IS_EMUL(bp))
4637                 factor = 200;
4638         else
4639                 factor = 1;
4640
4641         /* Disable inputs of parser neighbor blocks */
4642         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4643         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4644         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4645         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4646
4647         /*  Write 0 to parser credits for CFC search request */
4648         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4649
4650         /* send Ethernet packet */
4651         bnx2x_lb_pckt(bp);
4652
4653         /* TODO do i reset NIG statistic? */
4654         /* Wait until NIG register shows 1 packet of size 0x10 */
4655         count = 1000 * factor;
4656         while (count) {
4657
4658                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4659                 val = *bnx2x_sp(bp, wb_data[0]);
4660                 if (val == 0x10)
4661                         break;
4662
4663                 msleep(10);
4664                 count--;
4665         }
4666         if (val != 0x10) {
4667                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4668                 return -1;
4669         }
4670
4671         /* Wait until PRS register shows 1 packet */
4672         count = 1000 * factor;
4673         while (count) {
4674                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4675                 if (val == 1)
4676                         break;
4677
4678                 msleep(10);
4679                 count--;
4680         }
4681         if (val != 0x1) {
4682                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4683                 return -2;
4684         }
4685
4686         /* Reset and init BRB, PRS */
4687         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4688         msleep(50);
4689         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4690         msleep(50);
4691         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4692         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4693
4694         DP(NETIF_MSG_HW, "part2\n");
4695
4696         /* Disable inputs of parser neighbor blocks */
4697         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4698         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4699         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4700         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4701
4702         /* Write 0 to parser credits for CFC search request */
4703         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4704
4705         /* send 10 Ethernet packets */
4706         for (i = 0; i < 10; i++)
4707                 bnx2x_lb_pckt(bp);
4708
4709         /* Wait until NIG register shows 10 + 1
4710            packets of size 11*0x10 = 0xb0 */
4711         count = 1000 * factor;
4712         while (count) {
4713
4714                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4715                 val = *bnx2x_sp(bp, wb_data[0]);
4716                 if (val == 0xb0)
4717                         break;
4718
4719                 msleep(10);
4720                 count--;
4721         }
4722         if (val != 0xb0) {
4723                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4724                 return -3;
4725         }
4726
4727         /* Wait until PRS register shows 2 packets */
4728         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4729         if (val != 2)
4730                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4731
4732         /* Write 1 to parser credits for CFC search request */
4733         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4734
4735         /* Wait until PRS register shows 3 packets */
4736         msleep(10 * factor);
4737         /* Wait until NIG register shows 1 packet of size 0x10 */
4738         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4739         if (val != 3)
4740                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4741
4742         /* clear NIG EOP FIFO */
4743         for (i = 0; i < 11; i++)
4744                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4745         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4746         if (val != 1) {
4747                 BNX2X_ERR("clear of NIG failed\n");
4748                 return -4;
4749         }
4750
4751         /* Reset and init BRB, PRS, NIG */
4752         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4753         msleep(50);
4754         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4755         msleep(50);
4756         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4757         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4758 #ifndef BCM_CNIC
4759         /* set NIC mode */
4760         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4761 #endif
4762
4763         /* Enable inputs of parser neighbor blocks */
4764         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4765         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4766         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4767         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4768
4769         DP(NETIF_MSG_HW, "done\n");
4770
4771         return 0; /* OK */
4772 }
4773
4774 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4775 {
4776         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4777         if (CHIP_IS_E2(bp))
4778                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4779         else
4780                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4781         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4782         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4783         /*
4784          * mask read length error interrupts in brb for parser
4785          * (parsing unit and 'checksum and crc' unit)
4786          * these errors are legal (PU reads fixed length and CAC can cause
4787          * read length error on truncated packets)
4788          */
4789         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4790         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4791         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4792         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4793         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4794         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4795 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4796 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4797         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4798         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4799         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4800 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4801 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4802         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4803         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4804         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4805         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4806 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4807 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4808
4809         if (CHIP_REV_IS_FPGA(bp))
4810                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4811         else if (CHIP_IS_E2(bp))
4812                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4813                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4814                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4815                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4816                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4817                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4818         else
4819                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4820         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4821         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4822         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4823 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4824 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4825         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4826         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4827 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4828         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
4829 }
4830
4831 static void bnx2x_reset_common(struct bnx2x *bp)
4832 {
4833         /* reset_common */
4834         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4835                0xd3ffff7f);
4836         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4837 }
4838
4839 static void bnx2x_init_pxp(struct bnx2x *bp)
4840 {
4841         u16 devctl;
4842         int r_order, w_order;
4843
4844         pci_read_config_word(bp->pdev,
4845                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4846         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4847         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4848         if (bp->mrrs == -1)
4849                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4850         else {
4851                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4852                 r_order = bp->mrrs;
4853         }
4854
4855         bnx2x_init_pxp_arb(bp, r_order, w_order);
4856 }
4857
4858 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4859 {
4860         int is_required;
4861         u32 val;
4862         int port;
4863
4864         if (BP_NOMCP(bp))
4865                 return;
4866
4867         is_required = 0;
4868         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4869               SHARED_HW_CFG_FAN_FAILURE_MASK;
4870
4871         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4872                 is_required = 1;
4873
4874         /*
4875          * The fan failure mechanism is usually related to the PHY type since
4876          * the power consumption of the board is affected by the PHY. Currently,
4877          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4878          */
4879         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4880                 for (port = PORT_0; port < PORT_MAX; port++) {
4881                         is_required |=
4882                                 bnx2x_fan_failure_det_req(
4883                                         bp,
4884                                         bp->common.shmem_base,
4885                                         bp->common.shmem2_base,
4886                                         port);
4887                 }
4888
4889         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4890
4891         if (is_required == 0)
4892                 return;
4893
4894         /* Fan failure is indicated by SPIO 5 */
4895         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4896                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4897
4898         /* set to active low mode */
4899         val = REG_RD(bp, MISC_REG_SPIO_INT);
4900         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4901                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4902         REG_WR(bp, MISC_REG_SPIO_INT, val);
4903
4904         /* enable interrupt to signal the IGU */
4905         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4906         val |= (1 << MISC_REGISTERS_SPIO_5);
4907         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4908 }
4909
4910 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4911 {
4912         u32 offset = 0;
4913
4914         if (CHIP_IS_E1(bp))
4915                 return;
4916         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4917                 return;
4918
4919         switch (BP_ABS_FUNC(bp)) {
4920         case 0:
4921                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4922                 break;
4923         case 1:
4924                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4925                 break;
4926         case 2:
4927                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4928                 break;
4929         case 3:
4930                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4931                 break;
4932         case 4:
4933                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4934                 break;
4935         case 5:
4936                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4937                 break;
4938         case 6:
4939                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4940                 break;
4941         case 7:
4942                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4943                 break;
4944         default:
4945                 return;
4946         }
4947
4948         REG_WR(bp, offset, pretend_func_num);
4949         REG_RD(bp, offset);
4950         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4951 }
4952
4953 static void bnx2x_pf_disable(struct bnx2x *bp)
4954 {
4955         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4956         val &= ~IGU_PF_CONF_FUNC_EN;
4957
4958         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4959         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4960         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4961 }
4962
4963 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4964 {
4965         u32 val, i;
4966
4967         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4968
4969         bnx2x_reset_common(bp);
4970         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4971         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4972
4973         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4974         if (!CHIP_IS_E1(bp))
4975                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4976
4977         if (CHIP_IS_E2(bp)) {
4978                 u8 fid;
4979
4980                 /**
4981                  * 4-port mode or 2-port mode we need to turn of master-enable
4982                  * for everyone, after that, turn it back on for self.
4983                  * so, we disregard multi-function or not, and always disable
4984                  * for all functions on the given path, this means 0,2,4,6 for
4985                  * path 0 and 1,3,5,7 for path 1
4986                  */
4987                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
4988                         if (fid == BP_ABS_FUNC(bp)) {
4989                                 REG_WR(bp,
4990                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4991                                     1);
4992                                 continue;
4993                         }
4994
4995                         bnx2x_pretend_func(bp, fid);
4996                         /* clear pf enable */
4997                         bnx2x_pf_disable(bp);
4998                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4999                 }
5000         }
5001
5002         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5003         if (CHIP_IS_E1(bp)) {
5004                 /* enable HW interrupt from PXP on USDM overflow
5005                    bit 16 on INT_MASK_0 */
5006                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5007         }
5008
5009         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5010         bnx2x_init_pxp(bp);
5011
5012 #ifdef __BIG_ENDIAN
5013         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5014         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5015         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5016         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5017         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5018         /* make sure this value is 0 */
5019         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5020
5021 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5022         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5023         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5024         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5025         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5026 #endif
5027
5028         bnx2x_ilt_init_page_size(bp, INITOP_SET);
5029
5030         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5031                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5032
5033         /* let the HW do it's magic ... */
5034         msleep(100);
5035         /* finish PXP init */
5036         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5037         if (val != 1) {
5038                 BNX2X_ERR("PXP2 CFG failed\n");
5039                 return -EBUSY;
5040         }
5041         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5042         if (val != 1) {
5043                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5044                 return -EBUSY;
5045         }
5046
5047         /* Timers bug workaround E2 only. We need to set the entire ILT to
5048          * have entries with value "0" and valid bit on.
5049          * This needs to be done by the first PF that is loaded in a path
5050          * (i.e. common phase)
5051          */
5052         if (CHIP_IS_E2(bp)) {
5053                 struct ilt_client_info ilt_cli;
5054                 struct bnx2x_ilt ilt;
5055                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5056                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5057
5058                 /* initialize dummy TM client */
5059                 ilt_cli.start = 0;
5060                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5061                 ilt_cli.client_num = ILT_CLIENT_TM;
5062
5063                 /* Step 1: set zeroes to all ilt page entries with valid bit on
5064                  * Step 2: set the timers first/last ilt entry to point
5065                  * to the entire range to prevent ILT range error for 3rd/4th
5066                  * vnic (this code assumes existance of the vnic)
5067                  *
5068                  * both steps performed by call to bnx2x_ilt_client_init_op()
5069                  * with dummy TM client
5070                  *
5071                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5072                  * and his brother are split registers
5073                  */
5074                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5075                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5076                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5077
5078                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5079                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5080                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5081         }
5082
5083
5084         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5085         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5086
5087         if (CHIP_IS_E2(bp)) {
5088                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5089                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5090                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5091
5092                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5093
5094                 /* let the HW do it's magic ... */
5095                 do {
5096                         msleep(200);
5097                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5098                 } while (factor-- && (val != 1));
5099
5100                 if (val != 1) {
5101                         BNX2X_ERR("ATC_INIT failed\n");
5102                         return -EBUSY;
5103                 }
5104         }
5105
5106         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5107
5108         /* clean the DMAE memory */
5109         bp->dmae_ready = 1;
5110         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5111
5112         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5113         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5114         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5115         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5116
5117         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5118         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5119         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5120         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5121
5122         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5123
5124         if (CHIP_MODE_IS_4_PORT(bp))
5125                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5126
5127         /* QM queues pointers table */
5128         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5129
5130         /* soft reset pulse */
5131         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5132         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5133
5134 #ifdef BCM_CNIC
5135         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5136 #endif
5137
5138         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5139         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5140
5141         if (!CHIP_REV_IS_SLOW(bp)) {
5142                 /* enable hw interrupt from doorbell Q */
5143                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5144         }
5145
5146         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5147         if (CHIP_MODE_IS_4_PORT(bp)) {
5148                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5149                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5150         }
5151
5152         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5153         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5154 #ifndef BCM_CNIC
5155         /* set NIC mode */
5156         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5157 #endif
5158         if (!CHIP_IS_E1(bp))
5159                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5160
5161         if (CHIP_IS_E2(bp)) {
5162                 /* Bit-map indicating which L2 hdrs may appear after the
5163                    basic Ethernet header */
5164                 int has_ovlan = IS_MF_SD(bp);
5165                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5166                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5167         }
5168
5169         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5170         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5171         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5172         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5173
5174         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5175         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5176         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5178
5179         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5180         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5181         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5182         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5183
5184         if (CHIP_MODE_IS_4_PORT(bp))
5185                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5186
5187         /* sync semi rtc */
5188         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5189                0x80000000);
5190         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5191                0x80000000);
5192
5193         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5194         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5195         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5196
5197         if (CHIP_IS_E2(bp)) {
5198                 int has_ovlan = IS_MF_SD(bp);
5199                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5200                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5201         }
5202
5203         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5204         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5205                 REG_WR(bp, i, random32());
5206
5207         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5208 #ifdef BCM_CNIC
5209         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5210         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5211         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5212         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5213         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5214         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5215         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5216         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5217         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5218         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5219 #endif
5220         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5221
5222         if (sizeof(union cdu_context) != 1024)
5223                 /* we currently assume that a context is 1024 bytes */
5224                 dev_alert(&bp->pdev->dev, "please adjust the size "
5225                                           "of cdu_context(%ld)\n",
5226                          (long)sizeof(union cdu_context));
5227
5228         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5229         val = (4 << 24) + (0 << 12) + 1024;
5230         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5231
5232         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5233         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5234         /* enable context validation interrupt from CFC */
5235         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5236
5237         /* set the thresholds to prevent CFC/CDU race */
5238         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5239
5240         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5241
5242         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5243                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5244
5245         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5246         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5247
5248         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5249         /* Reset PCIE errors for debug */
5250         REG_WR(bp, 0x2814, 0xffffffff);
5251         REG_WR(bp, 0x3820, 0xffffffff);
5252
5253         if (CHIP_IS_E2(bp)) {
5254                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5255                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5256                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5257                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5258                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5259                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5260                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5261                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5262                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5263                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5264                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5265         }
5266
5267         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5268         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5269         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5270         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5271
5272         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5273         if (!CHIP_IS_E1(bp)) {
5274                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5275                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5276         }
5277         if (CHIP_IS_E2(bp)) {
5278                 /* Bit-map indicating which L2 hdrs may appear after the
5279                    basic Ethernet header */
5280                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5281         }
5282
5283         if (CHIP_REV_IS_SLOW(bp))
5284                 msleep(200);
5285
5286         /* finish CFC init */
5287         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5288         if (val != 1) {
5289                 BNX2X_ERR("CFC LL_INIT failed\n");
5290                 return -EBUSY;
5291         }
5292         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5293         if (val != 1) {
5294                 BNX2X_ERR("CFC AC_INIT failed\n");
5295                 return -EBUSY;
5296         }
5297         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5298         if (val != 1) {
5299                 BNX2X_ERR("CFC CAM_INIT failed\n");
5300                 return -EBUSY;
5301         }
5302         REG_WR(bp, CFC_REG_DEBUG0, 0);
5303
5304         if (CHIP_IS_E1(bp)) {
5305                 /* read NIG statistic
5306                    to see if this is our first up since powerup */
5307                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5308                 val = *bnx2x_sp(bp, wb_data[0]);
5309
5310                 /* do internal memory self test */
5311                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5312                         BNX2X_ERR("internal mem self test failed\n");
5313                         return -EBUSY;
5314                 }
5315         }
5316
5317         bnx2x_setup_fan_failure_detection(bp);
5318
5319         /* clear PXP2 attentions */
5320         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5321
5322         bnx2x_enable_blocks_attention(bp);
5323         if (CHIP_PARITY_ENABLED(bp))
5324                 bnx2x_enable_blocks_parity(bp);
5325
5326         if (!BP_NOMCP(bp)) {
5327                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5328                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5329                     CHIP_IS_E1x(bp)) {
5330                         u32 shmem_base[2], shmem2_base[2];
5331                         shmem_base[0] =  bp->common.shmem_base;
5332                         shmem2_base[0] = bp->common.shmem2_base;
5333                         if (CHIP_IS_E2(bp)) {
5334                                 shmem_base[1] =
5335                                         SHMEM2_RD(bp, other_shmem_base_addr);
5336                                 shmem2_base[1] =
5337                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5338                         }
5339                         bnx2x_acquire_phy_lock(bp);
5340                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5341                                               bp->common.chip_id);
5342                         bnx2x_release_phy_lock(bp);
5343                 }
5344         } else
5345                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5346
5347         return 0;
5348 }
5349
5350 static int bnx2x_init_hw_port(struct bnx2x *bp)
5351 {
5352         int port = BP_PORT(bp);
5353         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5354         u32 low, high;
5355         u32 val;
5356
5357         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5358
5359         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5360
5361         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5362         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5363
5364         /* Timers bug workaround: disables the pf_master bit in pglue at
5365          * common phase, we need to enable it here before any dmae access are
5366          * attempted. Therefore we manually added the enable-master to the
5367          * port phase (it also happens in the function phase)
5368          */
5369         if (CHIP_IS_E2(bp))
5370                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5371
5372         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5373         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5374         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5375         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5376
5377         /* QM cid (connection) count */
5378         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5379
5380 #ifdef BCM_CNIC
5381         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5382         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5383         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5384 #endif
5385
5386         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5387
5388         if (CHIP_MODE_IS_4_PORT(bp))
5389                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5390
5391         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5392                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5393                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5394                         /* no pause for emulation and FPGA */
5395                         low = 0;
5396                         high = 513;
5397                 } else {
5398                         if (IS_MF(bp))
5399                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5400                         else if (bp->dev->mtu > 4096) {
5401                                 if (bp->flags & ONE_PORT_FLAG)
5402                                         low = 160;
5403                                 else {
5404                                         val = bp->dev->mtu;
5405                                         /* (24*1024 + val*4)/256 */
5406                                         low = 96 + (val/64) +
5407                                                         ((val % 64) ? 1 : 0);
5408                                 }
5409                         } else
5410                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5411                         high = low + 56;        /* 14*1024/256 */
5412                 }
5413                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5414                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5415         }
5416
5417         if (CHIP_MODE_IS_4_PORT(bp)) {
5418                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5419                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5420                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5421                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5422         }
5423
5424         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5425
5426         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5427         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5428         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5429         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5430
5431         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5432         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5433         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5434         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5435         if (CHIP_MODE_IS_4_PORT(bp))
5436                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5437
5438         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5439         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5440
5441         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5442
5443         if (!CHIP_IS_E2(bp)) {
5444                 /* configure PBF to work without PAUSE mtu 9000 */
5445                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5446
5447                 /* update threshold */
5448                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5449                 /* update init credit */
5450                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5451
5452                 /* probe changes */
5453                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5454                 udelay(50);
5455                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5456         }
5457
5458 #ifdef BCM_CNIC
5459         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5460 #endif
5461         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5462         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5463
5464         if (CHIP_IS_E1(bp)) {
5465                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5466                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5467         }
5468         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5469
5470         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5471
5472         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5473         /* init aeu_mask_attn_func_0/1:
5474          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5475          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5476          *             bits 4-7 are used for "per vn group attention" */
5477         val = IS_MF(bp) ? 0xF7 : 0x7;
5478         /* Enable DCBX attention for all but E1 */
5479         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5480         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5481
5482         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5483         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5484         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5485         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5486         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5487
5488         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5489
5490         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5491
5492         if (!CHIP_IS_E1(bp)) {
5493                 /* 0x2 disable mf_ov, 0x1 enable */
5494                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5495                        (IS_MF_SD(bp) ? 0x1 : 0x2));
5496
5497                 if (CHIP_IS_E2(bp)) {
5498                         val = 0;
5499                         switch (bp->mf_mode) {
5500                         case MULTI_FUNCTION_SD:
5501                                 val = 1;
5502                                 break;
5503                         case MULTI_FUNCTION_SI:
5504                                 val = 2;
5505                                 break;
5506                         }
5507
5508                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5509                                                   NIG_REG_LLH0_CLS_TYPE), val);
5510                 }
5511                 {
5512                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5513                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5514                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5515                 }
5516         }
5517
5518         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5519         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5520         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5521                                       bp->common.shmem2_base, port)) {
5522                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5523                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5524                 val = REG_RD(bp, reg_addr);
5525                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5526                 REG_WR(bp, reg_addr, val);
5527         }
5528         bnx2x__link_reset(bp);
5529
5530         return 0;
5531 }
5532
5533 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5534 {
5535         int reg;
5536
5537         if (CHIP_IS_E1(bp))
5538                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5539         else
5540                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5541
5542         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5543 }
5544
5545 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5546 {
5547         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5548 }
5549
5550 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5551 {
5552         u32 i, base = FUNC_ILT_BASE(func);
5553         for (i = base; i < base + ILT_PER_FUNC; i++)
5554                 bnx2x_ilt_wr(bp, i, 0);
5555 }
5556
5557 static int bnx2x_init_hw_func(struct bnx2x *bp)
5558 {
5559         int port = BP_PORT(bp);
5560         int func = BP_FUNC(bp);
5561         struct bnx2x_ilt *ilt = BP_ILT(bp);
5562         u16 cdu_ilt_start;
5563         u32 addr, val;
5564         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5565         int i, main_mem_width;
5566
5567         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5568
5569         /* set MSI reconfigure capability */
5570         if (bp->common.int_block == INT_BLOCK_HC) {
5571                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5572                 val = REG_RD(bp, addr);
5573                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5574                 REG_WR(bp, addr, val);
5575         }
5576
5577         ilt = BP_ILT(bp);
5578         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5579
5580         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5581                 ilt->lines[cdu_ilt_start + i].page =
5582                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5583                 ilt->lines[cdu_ilt_start + i].page_mapping =
5584                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5585                 /* cdu ilt pages are allocated manually so there's no need to
5586                 set the size */
5587         }
5588         bnx2x_ilt_init_op(bp, INITOP_SET);
5589
5590 #ifdef BCM_CNIC
5591         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5592
5593         /* T1 hash bits value determines the T1 number of entries */
5594         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5595 #endif
5596
5597 #ifndef BCM_CNIC
5598         /* set NIC mode */
5599         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5600 #endif  /* BCM_CNIC */
5601
5602         if (CHIP_IS_E2(bp)) {
5603                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5604
5605                 /* Turn on a single ISR mode in IGU if driver is going to use
5606                  * INT#x or MSI
5607                  */
5608                 if (!(bp->flags & USING_MSIX_FLAG))
5609                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5610                 /*
5611                  * Timers workaround bug: function init part.
5612                  * Need to wait 20msec after initializing ILT,
5613                  * needed to make sure there are no requests in
5614                  * one of the PXP internal queues with "old" ILT addresses
5615                  */
5616                 msleep(20);
5617                 /*
5618                  * Master enable - Due to WB DMAE writes performed before this
5619                  * register is re-initialized as part of the regular function
5620                  * init
5621                  */
5622                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5623                 /* Enable the function in IGU */
5624                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5625         }
5626
5627         bp->dmae_ready = 1;
5628
5629         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5630
5631         if (CHIP_IS_E2(bp))
5632                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5633
5634         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5635         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5636         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5637         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5638         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5639         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5640         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5641         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5642         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5643
5644         if (CHIP_IS_E2(bp)) {
5645                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5646                                                                 BP_PATH(bp));
5647                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5648                                                                 BP_PATH(bp));
5649         }
5650
5651         if (CHIP_MODE_IS_4_PORT(bp))
5652                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5653
5654         if (CHIP_IS_E2(bp))
5655                 REG_WR(bp, QM_REG_PF_EN, 1);
5656
5657         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5658
5659         if (CHIP_MODE_IS_4_PORT(bp))
5660                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5661
5662         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5663         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5664         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5665         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5666         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5667         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5668         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5669         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5670         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5671         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5672         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5673         if (CHIP_IS_E2(bp))
5674                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5675
5676         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5677
5678         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5679
5680         if (CHIP_IS_E2(bp))
5681                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5682
5683         if (IS_MF(bp)) {
5684                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5685                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5686         }
5687
5688         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5689
5690         /* HC init per function */
5691         if (bp->common.int_block == INT_BLOCK_HC) {
5692                 if (CHIP_IS_E1H(bp)) {
5693                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5694
5695                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5696                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5697                 }
5698                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5699
5700         } else {
5701                 int num_segs, sb_idx, prod_offset;
5702
5703                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5704
5705                 if (CHIP_IS_E2(bp)) {
5706                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5707                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5708                 }
5709
5710                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5711
5712                 if (CHIP_IS_E2(bp)) {
5713                         int dsb_idx = 0;
5714                         /**
5715                          * Producer memory:
5716                          * E2 mode: address 0-135 match to the mapping memory;
5717                          * 136 - PF0 default prod; 137 - PF1 default prod;
5718                          * 138 - PF2 default prod; 139 - PF3 default prod;
5719                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5720                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5721                          * 144-147 reserved.
5722                          *
5723                          * E1.5 mode - In backward compatible mode;
5724                          * for non default SB; each even line in the memory
5725                          * holds the U producer and each odd line hold
5726                          * the C producer. The first 128 producers are for
5727                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5728                          * producers are for the DSB for each PF.
5729                          * Each PF has five segments: (the order inside each
5730                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5731                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5732                          * 144-147 attn prods;
5733                          */
5734                         /* non-default-status-blocks */
5735                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5736                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5737                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5738                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5739                                         num_segs;
5740
5741                                 for (i = 0; i < num_segs; i++) {
5742                                         addr = IGU_REG_PROD_CONS_MEMORY +
5743                                                         (prod_offset + i) * 4;
5744                                         REG_WR(bp, addr, 0);
5745                                 }
5746                                 /* send consumer update with value 0 */
5747                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5748                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5749                                 bnx2x_igu_clear_sb(bp,
5750                                                    bp->igu_base_sb + sb_idx);
5751                         }
5752
5753                         /* default-status-blocks */
5754                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5755                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5756
5757                         if (CHIP_MODE_IS_4_PORT(bp))
5758                                 dsb_idx = BP_FUNC(bp);
5759                         else
5760                                 dsb_idx = BP_E1HVN(bp);
5761
5762                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5763                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5764                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5765
5766                         for (i = 0; i < (num_segs * E1HVN_MAX);
5767                              i += E1HVN_MAX) {
5768                                 addr = IGU_REG_PROD_CONS_MEMORY +
5769                                                         (prod_offset + i)*4;
5770                                 REG_WR(bp, addr, 0);
5771                         }
5772                         /* send consumer update with 0 */
5773                         if (CHIP_INT_MODE_IS_BC(bp)) {
5774                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5775                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5776                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5777                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5778                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5779                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5780                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5781                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5782                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5783                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5784                         } else {
5785                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5786                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5787                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5788                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5789                         }
5790                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5791
5792                         /* !!! these should become driver const once
5793                            rf-tool supports split-68 const */
5794                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5795                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5796                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5797                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5798                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5799                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5800                 }
5801         }
5802
5803         /* Reset PCIE errors for debug */
5804         REG_WR(bp, 0x2114, 0xffffffff);
5805         REG_WR(bp, 0x2120, 0xffffffff);
5806
5807         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5808         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5809         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5810         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5811         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5812         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5813
5814         if (CHIP_IS_E1x(bp)) {
5815                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5816                 main_mem_base = HC_REG_MAIN_MEMORY +
5817                                 BP_PORT(bp) * (main_mem_size * 4);
5818                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5819                 main_mem_width = 8;
5820
5821                 val = REG_RD(bp, main_mem_prty_clr);
5822                 if (val)
5823                         DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5824                                           "block during "
5825                                           "function init (0x%x)!\n", val);
5826
5827                 /* Clear "false" parity errors in MSI-X table */
5828                 for (i = main_mem_base;
5829                      i < main_mem_base + main_mem_size * 4;
5830                      i += main_mem_width) {
5831                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
5832                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5833                                          i, main_mem_width / 4);
5834                 }
5835                 /* Clear HC parity attention */
5836                 REG_RD(bp, main_mem_prty_clr);
5837         }
5838
5839         bnx2x_phy_probe(&bp->link_params);
5840
5841         return 0;
5842 }
5843
5844 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5845 {
5846         int rc = 0;
5847
5848         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5849            BP_ABS_FUNC(bp), load_code);
5850
5851         bp->dmae_ready = 0;
5852         mutex_init(&bp->dmae_mutex);
5853         rc = bnx2x_gunzip_init(bp);
5854         if (rc)
5855                 return rc;
5856
5857         switch (load_code) {
5858         case FW_MSG_CODE_DRV_LOAD_COMMON:
5859         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5860                 rc = bnx2x_init_hw_common(bp, load_code);
5861                 if (rc)
5862                         goto init_hw_err;
5863                 /* no break */
5864
5865         case FW_MSG_CODE_DRV_LOAD_PORT:
5866                 rc = bnx2x_init_hw_port(bp);
5867                 if (rc)
5868                         goto init_hw_err;
5869                 /* no break */
5870
5871         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5872                 rc = bnx2x_init_hw_func(bp);
5873                 if (rc)
5874                         goto init_hw_err;
5875                 break;
5876
5877         default:
5878                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5879                 break;
5880         }
5881
5882         if (!BP_NOMCP(bp)) {
5883                 int mb_idx = BP_FW_MB_IDX(bp);
5884
5885                 bp->fw_drv_pulse_wr_seq =
5886                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5887                                  DRV_PULSE_SEQ_MASK);
5888                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5889         }
5890
5891 init_hw_err:
5892         bnx2x_gunzip_end(bp);
5893
5894         return rc;
5895 }
5896
5897 void bnx2x_free_mem(struct bnx2x *bp)
5898 {
5899
5900 #define BNX2X_PCI_FREE(x, y, size) \
5901         do { \
5902                 if (x) { \
5903                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5904                         x = NULL; \
5905                         y = 0; \
5906                 } \
5907         } while (0)
5908
5909 #define BNX2X_FREE(x) \
5910         do { \
5911                 if (x) { \
5912                         kfree((void *)x); \
5913                         x = NULL; \
5914                 } \
5915         } while (0)
5916
5917         int i;
5918
5919         /* fastpath */
5920         /* Common */
5921         for_each_queue(bp, i) {
5922 #ifdef BCM_CNIC
5923                 /* FCoE client uses default status block */
5924                 if (IS_FCOE_IDX(i)) {
5925                         union host_hc_status_block *sb =
5926                                 &bnx2x_fp(bp, i, status_blk);
5927                         memset(sb, 0, sizeof(union host_hc_status_block));
5928                         bnx2x_fp(bp, i, status_blk_mapping) = 0;
5929                 } else {
5930 #endif
5931                 /* status blocks */
5932                 if (CHIP_IS_E2(bp))
5933                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5934                                        bnx2x_fp(bp, i, status_blk_mapping),
5935                                        sizeof(struct host_hc_status_block_e2));
5936                 else
5937                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5938                                        bnx2x_fp(bp, i, status_blk_mapping),
5939                                        sizeof(struct host_hc_status_block_e1x));
5940 #ifdef BCM_CNIC
5941                 }
5942 #endif
5943         }
5944         /* Rx */
5945         for_each_rx_queue(bp, i) {
5946
5947                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5948                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5949                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5950                                bnx2x_fp(bp, i, rx_desc_mapping),
5951                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5952
5953                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5954                                bnx2x_fp(bp, i, rx_comp_mapping),
5955                                sizeof(struct eth_fast_path_rx_cqe) *
5956                                NUM_RCQ_BD);
5957
5958                 /* SGE ring */
5959                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5960                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5961                                bnx2x_fp(bp, i, rx_sge_mapping),
5962                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5963         }
5964         /* Tx */
5965         for_each_tx_queue(bp, i) {
5966
5967                 /* fastpath tx rings: tx_buf tx_desc */
5968                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5969                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5970                                bnx2x_fp(bp, i, tx_desc_mapping),
5971                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5972         }
5973         /* end of fastpath */
5974
5975         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5976                        sizeof(struct host_sp_status_block));
5977
5978         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5979                        sizeof(struct bnx2x_slowpath));
5980
5981         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5982                        bp->context.size);
5983
5984         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5985
5986         BNX2X_FREE(bp->ilt->lines);
5987
5988 #ifdef BCM_CNIC
5989         if (CHIP_IS_E2(bp))
5990                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5991                                sizeof(struct host_hc_status_block_e2));
5992         else
5993                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5994                                sizeof(struct host_hc_status_block_e1x));
5995
5996         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5997 #endif
5998
5999         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6000
6001         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6002                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
6003
6004 #undef BNX2X_PCI_FREE
6005 #undef BNX2X_KFREE
6006 }
6007
6008 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6009 {
6010         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6011         if (CHIP_IS_E2(bp)) {
6012                 bnx2x_fp(bp, index, sb_index_values) =
6013                         (__le16 *)status_blk.e2_sb->sb.index_values;
6014                 bnx2x_fp(bp, index, sb_running_index) =
6015                         (__le16 *)status_blk.e2_sb->sb.running_index;
6016         } else {
6017                 bnx2x_fp(bp, index, sb_index_values) =
6018                         (__le16 *)status_blk.e1x_sb->sb.index_values;
6019                 bnx2x_fp(bp, index, sb_running_index) =
6020                         (__le16 *)status_blk.e1x_sb->sb.running_index;
6021         }
6022 }
6023
6024 int bnx2x_alloc_mem(struct bnx2x *bp)
6025 {
6026 #define BNX2X_PCI_ALLOC(x, y, size) \
6027         do { \
6028                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6029                 if (x == NULL) \
6030                         goto alloc_mem_err; \
6031                 memset(x, 0, size); \
6032         } while (0)
6033
6034 #define BNX2X_ALLOC(x, size) \
6035         do { \
6036                 x = kzalloc(size, GFP_KERNEL); \
6037                 if (x == NULL) \
6038                         goto alloc_mem_err; \
6039         } while (0)
6040
6041         int i;
6042
6043         /* fastpath */
6044         /* Common */
6045         for_each_queue(bp, i) {
6046                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6047                 bnx2x_fp(bp, i, bp) = bp;
6048                 /* status blocks */
6049 #ifdef BCM_CNIC
6050                 if (!IS_FCOE_IDX(i)) {
6051 #endif
6052                         if (CHIP_IS_E2(bp))
6053                                 BNX2X_PCI_ALLOC(sb->e2_sb,
6054                                     &bnx2x_fp(bp, i, status_blk_mapping),
6055                                     sizeof(struct host_hc_status_block_e2));
6056                         else
6057                                 BNX2X_PCI_ALLOC(sb->e1x_sb,
6058                                     &bnx2x_fp(bp, i, status_blk_mapping),
6059                                     sizeof(struct host_hc_status_block_e1x));
6060 #ifdef BCM_CNIC
6061                 }
6062 #endif
6063                 set_sb_shortcuts(bp, i);
6064         }
6065         /* Rx */
6066         for_each_queue(bp, i) {
6067
6068                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6069                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6070                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6071                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6072                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6073                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6074
6075                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6076                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6077                                 sizeof(struct eth_fast_path_rx_cqe) *
6078                                 NUM_RCQ_BD);
6079
6080                 /* SGE ring */
6081                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6082                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6083                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6084                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6085                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6086         }
6087         /* Tx */
6088         for_each_queue(bp, i) {
6089
6090                 /* fastpath tx rings: tx_buf tx_desc */
6091                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6092                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6093                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6094                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6095                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6096         }
6097         /* end of fastpath */
6098
6099 #ifdef BCM_CNIC
6100         if (CHIP_IS_E2(bp))
6101                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6102                                 sizeof(struct host_hc_status_block_e2));
6103         else
6104                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6105                                 sizeof(struct host_hc_status_block_e1x));
6106
6107         /* allocate searcher T2 table */
6108         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6109 #endif
6110
6111
6112         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6113                         sizeof(struct host_sp_status_block));
6114
6115         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6116                         sizeof(struct bnx2x_slowpath));
6117
6118         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6119
6120         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6121                         bp->context.size);
6122
6123         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6124
6125         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6126                 goto alloc_mem_err;
6127
6128         /* Slow path ring */
6129         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6130
6131         /* EQ */
6132         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6133                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
6134         return 0;
6135
6136 alloc_mem_err:
6137         bnx2x_free_mem(bp);
6138         return -ENOMEM;
6139
6140 #undef BNX2X_PCI_ALLOC
6141 #undef BNX2X_ALLOC
6142 }
6143
6144 /*
6145  * Init service functions
6146  */
6147 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6148                              int *state_p, int flags);
6149
6150 int bnx2x_func_start(struct bnx2x *bp)
6151 {
6152         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6153
6154         /* Wait for completion */
6155         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6156                                  WAIT_RAMROD_COMMON);
6157 }
6158
6159 static int bnx2x_func_stop(struct bnx2x *bp)
6160 {
6161         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6162
6163         /* Wait for completion */
6164         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6165                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6166 }
6167
6168 /**
6169  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6170  *
6171  * @param bp driver descriptor
6172  * @param set set or clear an entry (1 or 0)
6173  * @param mac pointer to a buffer containing a MAC
6174  * @param cl_bit_vec bit vector of clients to register a MAC for
6175  * @param cam_offset offset in a CAM to use
6176  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6177  */
6178 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6179                                    u32 cl_bit_vec, u8 cam_offset,
6180                                    u8 is_bcast)
6181 {
6182         struct mac_configuration_cmd *config =
6183                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6184         int ramrod_flags = WAIT_RAMROD_COMMON;
6185
6186         bp->set_mac_pending = 1;
6187         smp_wmb();
6188
6189         config->hdr.length = 1;
6190         config->hdr.offset = cam_offset;
6191         config->hdr.client_id = 0xff;
6192         config->hdr.reserved1 = 0;
6193
6194         /* primary MAC */
6195         config->config_table[0].msb_mac_addr =
6196                                         swab16(*(u16 *)&mac[0]);
6197         config->config_table[0].middle_mac_addr =
6198                                         swab16(*(u16 *)&mac[2]);
6199         config->config_table[0].lsb_mac_addr =
6200                                         swab16(*(u16 *)&mac[4]);
6201         config->config_table[0].clients_bit_vector =
6202                                         cpu_to_le32(cl_bit_vec);
6203         config->config_table[0].vlan_id = 0;
6204         config->config_table[0].pf_id = BP_FUNC(bp);
6205         if (set)
6206                 SET_FLAG(config->config_table[0].flags,
6207                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6208                         T_ETH_MAC_COMMAND_SET);
6209         else
6210                 SET_FLAG(config->config_table[0].flags,
6211                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212                         T_ETH_MAC_COMMAND_INVALIDATE);
6213
6214         if (is_bcast)
6215                 SET_FLAG(config->config_table[0].flags,
6216                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6217
6218         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6219            (set ? "setting" : "clearing"),
6220            config->config_table[0].msb_mac_addr,
6221            config->config_table[0].middle_mac_addr,
6222            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6223
6224         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6225                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6226                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6227
6228         /* Wait for a completion */
6229         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6230 }
6231
6232 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6233                              int *state_p, int flags)
6234 {
6235         /* can take a while if any port is running */
6236         int cnt = 5000;
6237         u8 poll = flags & WAIT_RAMROD_POLL;
6238         u8 common = flags & WAIT_RAMROD_COMMON;
6239
6240         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6241            poll ? "polling" : "waiting", state, idx);
6242
6243         might_sleep();
6244         while (cnt--) {
6245                 if (poll) {
6246                         if (common)
6247                                 bnx2x_eq_int(bp);
6248                         else {
6249                                 bnx2x_rx_int(bp->fp, 10);
6250                                 /* if index is different from 0
6251                                  * the reply for some commands will
6252                                  * be on the non default queue
6253                                  */
6254                                 if (idx)
6255                                         bnx2x_rx_int(&bp->fp[idx], 10);
6256                         }
6257                 }
6258
6259                 mb(); /* state is changed by bnx2x_sp_event() */
6260                 if (*state_p == state) {
6261 #ifdef BNX2X_STOP_ON_ERROR
6262                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6263 #endif
6264                         return 0;
6265                 }
6266
6267                 msleep(1);
6268
6269                 if (bp->panic)
6270                         return -EIO;
6271         }
6272
6273         /* timeout! */
6274         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6275                   poll ? "polling" : "waiting", state, idx);
6276 #ifdef BNX2X_STOP_ON_ERROR
6277         bnx2x_panic();
6278 #endif
6279
6280         return -EBUSY;
6281 }
6282
6283 static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6284 {
6285         if (CHIP_IS_E1H(bp))
6286                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6287         else if (CHIP_MODE_IS_4_PORT(bp))
6288                 return BP_FUNC(bp) * 32  + rel_offset;
6289         else
6290                 return BP_VN(bp) * 32  + rel_offset;
6291 }
6292
6293 /**
6294  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
6295  *  relevant. In addition, current implementation is tuned for a
6296  *  single ETH MAC.
6297  *
6298  *  When multiple unicast ETH MACs PF configuration in switch
6299  *  independent mode is required (NetQ, multiple netdev MACs,
6300  *  etc.), consider better utilisation of 16 per function MAC
6301  *  entries in the LLH memory.
6302  */
6303 enum {
6304         LLH_CAM_ISCSI_ETH_LINE = 0,
6305         LLH_CAM_ETH_LINE,
6306         LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6307 };
6308
6309 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6310                           int set,
6311                           unsigned char *dev_addr,
6312                           int index)
6313 {
6314         u32 wb_data[2];
6315         u32 mem_offset, ena_offset, mem_index;
6316         /**
6317          * indexes mapping:
6318          * 0..7 - goes to MEM
6319          * 8..15 - goes to MEM2
6320          */
6321
6322         if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6323                 return;
6324
6325         /* calculate memory start offset according to the mapping
6326          * and index in the memory */
6327         if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6328                 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6329                                            NIG_REG_LLH0_FUNC_MEM;
6330                 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6331                                            NIG_REG_LLH0_FUNC_MEM_ENABLE;
6332                 mem_index = index;
6333         } else {
6334                 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6335                                            NIG_REG_P0_LLH_FUNC_MEM2;
6336                 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6337                                            NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6338                 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6339         }
6340
6341         if (set) {
6342                 /* LLH_FUNC_MEM is a u64 WB register */
6343                 mem_offset += 8*mem_index;
6344
6345                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6346                               (dev_addr[4] <<  8) |  dev_addr[5]);
6347                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
6348
6349                 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6350         }
6351
6352         /* enable/disable the entry */
6353         REG_WR(bp, ena_offset + 4*mem_index, set);
6354
6355 }
6356
6357 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6358 {
6359         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6360                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6361
6362         /* networking  MAC */
6363         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6364                                (1 << bp->fp->cl_id), cam_offset , 0);
6365
6366         bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6367
6368         if (CHIP_IS_E1(bp)) {
6369                 /* broadcast MAC */
6370                 static const u8 bcast[ETH_ALEN] = {
6371                         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6372                 };
6373                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6374         }
6375 }
6376 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6377 {
6378         int i = 0, old;
6379         struct net_device *dev = bp->dev;
6380         struct netdev_hw_addr *ha;
6381         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6382         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6383
6384         netdev_for_each_mc_addr(ha, dev) {
6385                 /* copy mac */
6386                 config_cmd->config_table[i].msb_mac_addr =
6387                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6388                 config_cmd->config_table[i].middle_mac_addr =
6389                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6390                 config_cmd->config_table[i].lsb_mac_addr =
6391                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6392
6393                 config_cmd->config_table[i].vlan_id = 0;
6394                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6395                 config_cmd->config_table[i].clients_bit_vector =
6396                         cpu_to_le32(1 << BP_L_ID(bp));
6397
6398                 SET_FLAG(config_cmd->config_table[i].flags,
6399                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6400                         T_ETH_MAC_COMMAND_SET);
6401
6402                 DP(NETIF_MSG_IFUP,
6403                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6404                    config_cmd->config_table[i].msb_mac_addr,
6405                    config_cmd->config_table[i].middle_mac_addr,
6406                    config_cmd->config_table[i].lsb_mac_addr);
6407                 i++;
6408         }
6409         old = config_cmd->hdr.length;
6410         if (old > i) {
6411                 for (; i < old; i++) {
6412                         if (CAM_IS_INVALID(config_cmd->
6413                                            config_table[i])) {
6414                                 /* already invalidated */
6415                                 break;
6416                         }
6417                         /* invalidate */
6418                         SET_FLAG(config_cmd->config_table[i].flags,
6419                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6420                                 T_ETH_MAC_COMMAND_INVALIDATE);
6421                 }
6422         }
6423
6424         config_cmd->hdr.length = i;
6425         config_cmd->hdr.offset = offset;
6426         config_cmd->hdr.client_id = 0xff;
6427         config_cmd->hdr.reserved1 = 0;
6428
6429         bp->set_mac_pending = 1;
6430         smp_wmb();
6431
6432         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6433                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6434 }
6435 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6436 {
6437         int i;
6438         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6439         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6440         int ramrod_flags = WAIT_RAMROD_COMMON;
6441
6442         bp->set_mac_pending = 1;
6443         smp_wmb();
6444
6445         for (i = 0; i < config_cmd->hdr.length; i++)
6446                 SET_FLAG(config_cmd->config_table[i].flags,
6447                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6448                         T_ETH_MAC_COMMAND_INVALIDATE);
6449
6450         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6451                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6452
6453         /* Wait for a completion */
6454         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6455                                 ramrod_flags);
6456
6457 }
6458
6459 #ifdef BCM_CNIC
6460 /**
6461  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6462  * MAC(s). This function will wait until the ramdord completion
6463  * returns.
6464  *
6465  * @param bp driver handle
6466  * @param set set or clear the CAM entry
6467  *
6468  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6469  */
6470 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6471 {
6472         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6473                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6474         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6475                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6476         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6477
6478         /* Send a SET_MAC ramrod */
6479         bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6480                                cam_offset, 0);
6481
6482         bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6483
6484         return 0;
6485 }
6486
6487 /**
6488  * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6489  * ETH MAC(s). This function will wait until the ramdord
6490  * completion returns.
6491  *
6492  * @param bp driver handle
6493  * @param set set or clear the CAM entry
6494  *
6495  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6496  */
6497 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6498 {
6499         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6500         /**
6501          * CAM allocation for E1H
6502          * eth unicasts: by func number
6503          * iscsi: by func number
6504          * fip unicast: by func number
6505          * fip multicast: by func number
6506          */
6507         bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6508                 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6509
6510         return 0;
6511 }
6512
6513 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6514 {
6515         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6516
6517         /**
6518          * CAM allocation for E1H
6519          * eth unicasts: by func number
6520          * iscsi: by func number
6521          * fip unicast: by func number
6522          * fip multicast: by func number
6523          */
6524         bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6525                 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6526
6527         return 0;
6528 }
6529 #endif
6530
6531 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6532                                     struct bnx2x_client_init_params *params,
6533                                     u8 activate,
6534                                     struct client_init_ramrod_data *data)
6535 {
6536         /* Clear the buffer */
6537         memset(data, 0, sizeof(*data));
6538
6539         /* general */
6540         data->general.client_id = params->rxq_params.cl_id;
6541         data->general.statistics_counter_id = params->rxq_params.stat_id;
6542         data->general.statistics_en_flg =
6543                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6544         data->general.is_fcoe_flg =
6545                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6546         data->general.activate_flg = activate;
6547         data->general.sp_client_id = params->rxq_params.spcl_id;
6548
6549         /* Rx data */
6550         data->rx.tpa_en_flg =
6551                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6552         data->rx.vmqueue_mode_en_flg = 0;
6553         data->rx.cache_line_alignment_log_size =
6554                 params->rxq_params.cache_line_log;
6555         data->rx.enable_dynamic_hc =
6556                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6557         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6558         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6559         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6560
6561         /* We don't set drop flags */
6562         data->rx.drop_ip_cs_err_flg = 0;
6563         data->rx.drop_tcp_cs_err_flg = 0;
6564         data->rx.drop_ttl0_flg = 0;
6565         data->rx.drop_udp_cs_err_flg = 0;
6566
6567         data->rx.inner_vlan_removal_enable_flg =
6568                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6569         data->rx.outer_vlan_removal_enable_flg =
6570                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6571         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6572         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6573         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6574         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6575         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6576         data->rx.bd_page_base.lo =
6577                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6578         data->rx.bd_page_base.hi =
6579                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6580         data->rx.sge_page_base.lo =
6581                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6582         data->rx.sge_page_base.hi =
6583                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6584         data->rx.cqe_page_base.lo =
6585                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6586         data->rx.cqe_page_base.hi =
6587                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6588         data->rx.is_leading_rss =
6589                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6590         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6591
6592         /* Tx data */
6593         data->tx.enforce_security_flg = 0; /* VF specific */
6594         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6595         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6596         data->tx.mtu = 0; /* VF specific */
6597         data->tx.tx_bd_page_base.lo =
6598                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6599         data->tx.tx_bd_page_base.hi =
6600                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6601
6602         /* flow control data */
6603         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6604         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6605         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6606         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6607         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6608         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6609         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6610
6611         data->fc.safc_group_num = params->txq_params.cos;
6612         data->fc.safc_group_en_flg =
6613                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6614         data->fc.traffic_type =
6615                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6616                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6617 }
6618
6619 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6620 {
6621         /* ustorm cxt validation */
6622         cxt->ustorm_ag_context.cdu_usage =
6623                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6624                                        ETH_CONNECTION_TYPE);
6625         /* xcontext validation */
6626         cxt->xstorm_ag_context.cdu_reserved =
6627                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6628                                        ETH_CONNECTION_TYPE);
6629 }
6630
6631 static int bnx2x_setup_fw_client(struct bnx2x *bp,
6632                                  struct bnx2x_client_init_params *params,
6633                                  u8 activate,
6634                                  struct client_init_ramrod_data *data,
6635                                  dma_addr_t data_mapping)
6636 {
6637         u16 hc_usec;
6638         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6639         int ramrod_flags = 0, rc;
6640
6641         /* HC and context validation values */
6642         hc_usec = params->txq_params.hc_rate ?
6643                 1000000 / params->txq_params.hc_rate : 0;
6644         bnx2x_update_coalesce_sb_index(bp,
6645                         params->txq_params.fw_sb_id,
6646                         params->txq_params.sb_cq_index,
6647                         !(params->txq_params.flags & QUEUE_FLG_HC),
6648                         hc_usec);
6649
6650         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6651
6652         hc_usec = params->rxq_params.hc_rate ?
6653                 1000000 / params->rxq_params.hc_rate : 0;
6654         bnx2x_update_coalesce_sb_index(bp,
6655                         params->rxq_params.fw_sb_id,
6656                         params->rxq_params.sb_cq_index,
6657                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6658                         hc_usec);
6659
6660         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6661                                  params->rxq_params.cid);
6662
6663         /* zero stats */
6664         if (params->txq_params.flags & QUEUE_FLG_STATS)
6665                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6666                                          params->txq_params.stat_id);
6667
6668         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6669                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6670                                          params->rxq_params.stat_id);
6671                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6672                                          params->rxq_params.stat_id);
6673         }
6674
6675         /* Fill the ramrod data */
6676         bnx2x_fill_cl_init_data(bp, params, activate, data);
6677
6678         /* SETUP ramrod.
6679          *
6680          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6681          * barrier except from mmiowb() is needed to impose a
6682          * proper ordering of memory operations.
6683          */
6684         mmiowb();
6685
6686
6687         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6688                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6689
6690         /* Wait for completion */
6691         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6692                                  params->ramrod_params.index,
6693                                  params->ramrod_params.pstate,
6694                                  ramrod_flags);
6695         return rc;
6696 }
6697
6698 /**
6699  * Configure interrupt mode according to current configuration.
6700  * In case of MSI-X it will also try to enable MSI-X.
6701  *
6702  * @param bp
6703  *
6704  * @return int
6705  */
6706 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6707 {
6708         int rc = 0;
6709
6710         switch (bp->int_mode) {
6711         case INT_MODE_MSI:
6712                 bnx2x_enable_msi(bp);
6713                 /* falling through... */
6714         case INT_MODE_INTx:
6715                 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6716                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6717                 break;
6718         default:
6719                 /* Set number of queues according to bp->multi_mode value */
6720                 bnx2x_set_num_queues(bp);
6721
6722                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6723                    bp->num_queues);
6724
6725                 /* if we can't use MSI-X we only need one fp,
6726                  * so try to enable MSI-X with the requested number of fp's
6727                  * and fallback to MSI or legacy INTx with one fp
6728                  */
6729                 rc = bnx2x_enable_msix(bp);
6730                 if (rc) {
6731                         /* failed to enable MSI-X */
6732                         if (bp->multi_mode)
6733                                 DP(NETIF_MSG_IFUP,
6734                                           "Multi requested but failed to "
6735                                           "enable MSI-X (%d), "
6736                                           "set number of queues to %d\n",
6737                                    bp->num_queues,
6738                                    1 + NONE_ETH_CONTEXT_USE);
6739                         bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6740
6741                         if (!(bp->flags & DISABLE_MSI_FLAG))
6742                                 bnx2x_enable_msi(bp);
6743                 }
6744
6745                 break;
6746         }
6747
6748         return rc;
6749 }
6750
6751 /* must be called prioir to any HW initializations */
6752 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6753 {
6754         return L2_ILT_LINES(bp);
6755 }
6756
6757 void bnx2x_ilt_set_info(struct bnx2x *bp)
6758 {
6759         struct ilt_client_info *ilt_client;
6760         struct bnx2x_ilt *ilt = BP_ILT(bp);
6761         u16 line = 0;
6762
6763         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6764         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6765
6766         /* CDU */
6767         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6768         ilt_client->client_num = ILT_CLIENT_CDU;
6769         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6770         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6771         ilt_client->start = line;
6772         line += L2_ILT_LINES(bp);
6773 #ifdef BCM_CNIC
6774         line += CNIC_ILT_LINES;
6775 #endif
6776         ilt_client->end = line - 1;
6777
6778         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6779                                          "flags 0x%x, hw psz %d\n",
6780            ilt_client->start,
6781            ilt_client->end,
6782            ilt_client->page_size,
6783            ilt_client->flags,
6784            ilog2(ilt_client->page_size >> 12));
6785
6786         /* QM */
6787         if (QM_INIT(bp->qm_cid_count)) {
6788                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6789                 ilt_client->client_num = ILT_CLIENT_QM;
6790                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6791                 ilt_client->flags = 0;
6792                 ilt_client->start = line;
6793
6794                 /* 4 bytes for each cid */
6795                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6796                                                          QM_ILT_PAGE_SZ);
6797
6798                 ilt_client->end = line - 1;
6799
6800                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6801                                                  "flags 0x%x, hw psz %d\n",
6802                    ilt_client->start,
6803                    ilt_client->end,
6804                    ilt_client->page_size,
6805                    ilt_client->flags,
6806                    ilog2(ilt_client->page_size >> 12));
6807
6808         }
6809         /* SRC */
6810         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6811 #ifdef BCM_CNIC
6812         ilt_client->client_num = ILT_CLIENT_SRC;
6813         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6814         ilt_client->flags = 0;
6815         ilt_client->start = line;
6816         line += SRC_ILT_LINES;
6817         ilt_client->end = line - 1;
6818
6819         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6820                                          "flags 0x%x, hw psz %d\n",
6821            ilt_client->start,
6822            ilt_client->end,
6823            ilt_client->page_size,
6824            ilt_client->flags,
6825            ilog2(ilt_client->page_size >> 12));
6826
6827 #else
6828         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6829 #endif
6830
6831         /* TM */
6832         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6833 #ifdef BCM_CNIC
6834         ilt_client->client_num = ILT_CLIENT_TM;
6835         ilt_client->page_size = TM_ILT_PAGE_SZ;
6836         ilt_client->flags = 0;
6837         ilt_client->start = line;
6838         line += TM_ILT_LINES;
6839         ilt_client->end = line - 1;
6840
6841         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6842                                          "flags 0x%x, hw psz %d\n",
6843            ilt_client->start,
6844            ilt_client->end,
6845            ilt_client->page_size,
6846            ilt_client->flags,
6847            ilog2(ilt_client->page_size >> 12));
6848
6849 #else
6850         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6851 #endif
6852 }
6853
6854 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6855                        int is_leading)
6856 {
6857         struct bnx2x_client_init_params params = { {0} };
6858         int rc;
6859
6860         /* reset IGU state skip FCoE L2 queue */
6861         if (!IS_FCOE_FP(fp))
6862                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6863                              IGU_INT_ENABLE, 0);
6864
6865         params.ramrod_params.pstate = &fp->state;
6866         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6867         params.ramrod_params.index = fp->index;
6868         params.ramrod_params.cid = fp->cid;
6869
6870 #ifdef BCM_CNIC
6871         if (IS_FCOE_FP(fp))
6872                 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6873
6874 #endif
6875
6876         if (is_leading)
6877                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6878
6879         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6880
6881         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6882
6883         rc = bnx2x_setup_fw_client(bp, &params, 1,
6884                                      bnx2x_sp(bp, client_init_data),
6885                                      bnx2x_sp_mapping(bp, client_init_data));
6886         return rc;
6887 }
6888
6889 static int bnx2x_stop_fw_client(struct bnx2x *bp,
6890                                 struct bnx2x_client_ramrod_params *p)
6891 {
6892         int rc;
6893
6894         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6895
6896         /* halt the connection */
6897         *p->pstate = BNX2X_FP_STATE_HALTING;
6898         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6899                                                   p->cl_id, 0);
6900
6901         /* Wait for completion */
6902         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6903                                p->pstate, poll_flag);
6904         if (rc) /* timeout */
6905                 return rc;
6906
6907         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6908         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6909                                                        p->cl_id, 0);
6910         /* Wait for completion */
6911         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6912                                p->pstate, poll_flag);
6913         if (rc) /* timeout */
6914                 return rc;
6915
6916
6917         /* delete cfc entry */
6918         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6919
6920         /* Wait for completion */
6921         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6922                                p->pstate, WAIT_RAMROD_COMMON);
6923         return rc;
6924 }
6925
6926 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6927 {
6928         struct bnx2x_client_ramrod_params client_stop = {0};
6929         struct bnx2x_fastpath *fp = &bp->fp[index];
6930
6931         client_stop.index = index;
6932         client_stop.cid = fp->cid;
6933         client_stop.cl_id = fp->cl_id;
6934         client_stop.pstate = &(fp->state);
6935         client_stop.poll = 0;
6936
6937         return bnx2x_stop_fw_client(bp, &client_stop);
6938 }
6939
6940
6941 static void bnx2x_reset_func(struct bnx2x *bp)
6942 {
6943         int port = BP_PORT(bp);
6944         int func = BP_FUNC(bp);
6945         int i;
6946         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6947                         (CHIP_IS_E2(bp) ?
6948                          offsetof(struct hc_status_block_data_e2, common) :
6949                          offsetof(struct hc_status_block_data_e1x, common));
6950         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6951         int pfid_offset = offsetof(struct pci_entity, pf_id);
6952
6953         /* Disable the function in the FW */
6954         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6955         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6956         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6957         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6958
6959         /* FP SBs */
6960         for_each_eth_queue(bp, i) {
6961                 struct bnx2x_fastpath *fp = &bp->fp[i];
6962                 REG_WR8(bp,
6963                         BAR_CSTRORM_INTMEM +
6964                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6965                         + pfunc_offset_fp + pfid_offset,
6966                         HC_FUNCTION_DISABLED);
6967         }
6968
6969         /* SP SB */
6970         REG_WR8(bp,
6971                 BAR_CSTRORM_INTMEM +
6972                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6973                 pfunc_offset_sp + pfid_offset,
6974                 HC_FUNCTION_DISABLED);
6975
6976
6977         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6978                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6979                        0);
6980
6981         /* Configure IGU */
6982         if (bp->common.int_block == INT_BLOCK_HC) {
6983                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6984                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6985         } else {
6986                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6987                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6988         }
6989
6990 #ifdef BCM_CNIC
6991         /* Disable Timer scan */
6992         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6993         /*
6994          * Wait for at least 10ms and up to 2 second for the timers scan to
6995          * complete
6996          */
6997         for (i = 0; i < 200; i++) {
6998                 msleep(10);
6999                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7000                         break;
7001         }
7002 #endif
7003         /* Clear ILT */
7004         bnx2x_clear_func_ilt(bp, func);
7005
7006         /* Timers workaround bug for E2: if this is vnic-3,
7007          * we need to set the entire ilt range for this timers.
7008          */
7009         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7010                 struct ilt_client_info ilt_cli;
7011                 /* use dummy TM client */
7012                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7013                 ilt_cli.start = 0;
7014                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7015                 ilt_cli.client_num = ILT_CLIENT_TM;
7016
7017                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7018         }
7019
7020         /* this assumes that reset_port() called before reset_func()*/
7021         if (CHIP_IS_E2(bp))
7022                 bnx2x_pf_disable(bp);
7023
7024         bp->dmae_ready = 0;
7025 }
7026
7027 static void bnx2x_reset_port(struct bnx2x *bp)
7028 {
7029         int port = BP_PORT(bp);
7030         u32 val;
7031
7032         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7033
7034         /* Do not rcv packets to BRB */
7035         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7036         /* Do not direct rcv packets that are not for MCP to the BRB */
7037         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7038                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7039
7040         /* Configure AEU */
7041         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7042
7043         msleep(100);
7044         /* Check for BRB port occupancy */
7045         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7046         if (val)
7047                 DP(NETIF_MSG_IFDOWN,
7048                    "BRB1 is not empty  %d blocks are occupied\n", val);
7049
7050         /* TODO: Close Doorbell port? */
7051 }
7052
7053 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7054 {
7055         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7056            BP_ABS_FUNC(bp), reset_code);
7057
7058         switch (reset_code) {
7059         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7060                 bnx2x_reset_port(bp);
7061                 bnx2x_reset_func(bp);
7062                 bnx2x_reset_common(bp);
7063                 break;
7064
7065         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7066                 bnx2x_reset_port(bp);
7067                 bnx2x_reset_func(bp);
7068                 break;
7069
7070         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7071                 bnx2x_reset_func(bp);
7072                 break;
7073
7074         default:
7075                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7076                 break;
7077         }
7078 }
7079
7080 #ifdef BCM_CNIC
7081 static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7082 {
7083         if (bp->flags & FCOE_MACS_SET) {
7084                 if (!IS_MF_SD(bp))
7085                         bnx2x_set_fip_eth_mac_addr(bp, 0);
7086
7087                 bnx2x_set_all_enode_macs(bp, 0);
7088
7089                 bp->flags &= ~FCOE_MACS_SET;
7090         }
7091 }
7092 #endif
7093
7094 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7095 {
7096         int port = BP_PORT(bp);
7097         u32 reset_code = 0;
7098         int i, cnt, rc;
7099
7100         /* Wait until tx fastpath tasks complete */
7101         for_each_tx_queue(bp, i) {
7102                 struct bnx2x_fastpath *fp = &bp->fp[i];
7103
7104                 cnt = 1000;
7105                 while (bnx2x_has_tx_work_unload(fp)) {
7106
7107                         if (!cnt) {
7108                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7109                                           i);
7110 #ifdef BNX2X_STOP_ON_ERROR
7111                                 bnx2x_panic();
7112                                 return -EBUSY;
7113 #else
7114                                 break;
7115 #endif
7116                         }
7117                         cnt--;
7118                         msleep(1);
7119                 }
7120         }
7121         /* Give HW time to discard old tx messages */
7122         msleep(1);
7123
7124         if (CHIP_IS_E1(bp)) {
7125                 /* invalidate mc list,
7126                  * wait and poll (interrupts are off)
7127                  */
7128                 bnx2x_invlidate_e1_mc_list(bp);
7129                 bnx2x_set_eth_mac(bp, 0);
7130
7131         } else {
7132                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7133
7134                 bnx2x_set_eth_mac(bp, 0);
7135
7136                 for (i = 0; i < MC_HASH_SIZE; i++)
7137                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7138         }
7139
7140 #ifdef BCM_CNIC
7141         bnx2x_del_fcoe_eth_macs(bp);
7142 #endif
7143
7144         if (unload_mode == UNLOAD_NORMAL)
7145                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7146
7147         else if (bp->flags & NO_WOL_FLAG)
7148                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7149
7150         else if (bp->wol) {
7151                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7152                 u8 *mac_addr = bp->dev->dev_addr;
7153                 u32 val;
7154                 /* The mac address is written to entries 1-4 to
7155                    preserve entry 0 which is used by the PMF */
7156                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7157
7158                 val = (mac_addr[0] << 8) | mac_addr[1];
7159                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7160
7161                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7162                       (mac_addr[4] << 8) | mac_addr[5];
7163                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7164
7165                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7166
7167         } else
7168                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7169
7170         /* Close multi and leading connections
7171            Completions for ramrods are collected in a synchronous way */
7172         for_each_queue(bp, i)
7173
7174                 if (bnx2x_stop_client(bp, i))
7175 #ifdef BNX2X_STOP_ON_ERROR
7176                         return;
7177 #else
7178                         goto unload_error;
7179 #endif
7180
7181         rc = bnx2x_func_stop(bp);
7182         if (rc) {
7183                 BNX2X_ERR("Function stop failed!\n");
7184 #ifdef BNX2X_STOP_ON_ERROR
7185                 return;
7186 #else
7187                 goto unload_error;
7188 #endif
7189         }
7190 #ifndef BNX2X_STOP_ON_ERROR
7191 unload_error:
7192 #endif
7193         if (!BP_NOMCP(bp))
7194                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7195         else {
7196                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
7197                                      "%d, %d, %d\n", BP_PATH(bp),
7198                    load_count[BP_PATH(bp)][0],
7199                    load_count[BP_PATH(bp)][1],
7200                    load_count[BP_PATH(bp)][2]);
7201                 load_count[BP_PATH(bp)][0]--;
7202                 load_count[BP_PATH(bp)][1 + port]--;
7203                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
7204                                      "%d, %d, %d\n", BP_PATH(bp),
7205                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7206                    load_count[BP_PATH(bp)][2]);
7207                 if (load_count[BP_PATH(bp)][0] == 0)
7208                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7209                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7210                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7211                 else
7212                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7213         }
7214
7215         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7216             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7217                 bnx2x__link_reset(bp);
7218
7219         /* Disable HW interrupts, NAPI */
7220         bnx2x_netif_stop(bp, 1);
7221
7222         /* Release IRQs */
7223         bnx2x_free_irq(bp);
7224
7225         /* Reset the chip */
7226         bnx2x_reset_chip(bp, reset_code);
7227
7228         /* Report UNLOAD_DONE to MCP */
7229         if (!BP_NOMCP(bp))
7230                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7231
7232 }
7233
7234 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7235 {
7236         u32 val;
7237
7238         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7239
7240         if (CHIP_IS_E1(bp)) {
7241                 int port = BP_PORT(bp);
7242                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7243                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
7244
7245                 val = REG_RD(bp, addr);
7246                 val &= ~(0x300);
7247                 REG_WR(bp, addr, val);
7248         } else if (CHIP_IS_E1H(bp)) {
7249                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7250                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7251                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7252                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7253         }
7254 }
7255
7256 /* Close gates #2, #3 and #4: */
7257 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7258 {
7259         u32 val, addr;
7260
7261         /* Gates #2 and #4a are closed/opened for "not E1" only */
7262         if (!CHIP_IS_E1(bp)) {
7263                 /* #4 */
7264                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7265                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7266                        close ? (val | 0x1) : (val & (~(u32)1)));
7267                 /* #2 */
7268                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7269                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7270                        close ? (val | 0x1) : (val & (~(u32)1)));
7271         }
7272
7273         /* #3 */
7274         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7275         val = REG_RD(bp, addr);
7276         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7277
7278         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7279                 close ? "closing" : "opening");
7280         mmiowb();
7281 }
7282
7283 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
7284
7285 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7286 {
7287         /* Do some magic... */
7288         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7289         *magic_val = val & SHARED_MF_CLP_MAGIC;
7290         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7291 }
7292
7293 /* Restore the value of the `magic' bit.
7294  *
7295  * @param pdev Device handle.
7296  * @param magic_val Old value of the `magic' bit.
7297  */
7298 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7299 {
7300         /* Restore the `magic' bit value... */
7301         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7302         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7303                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7304 }
7305
7306 /**
7307  * Prepares for MCP reset: takes care of CLP configurations.
7308  *
7309  * @param bp
7310  * @param magic_val Old value of 'magic' bit.
7311  */
7312 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7313 {
7314         u32 shmem;
7315         u32 validity_offset;
7316
7317         DP(NETIF_MSG_HW, "Starting\n");
7318
7319         /* Set `magic' bit in order to save MF config */
7320         if (!CHIP_IS_E1(bp))
7321                 bnx2x_clp_reset_prep(bp, magic_val);
7322
7323         /* Get shmem offset */
7324         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7325         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7326
7327         /* Clear validity map flags */
7328         if (shmem > 0)
7329                 REG_WR(bp, shmem + validity_offset, 0);
7330 }
7331
7332 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7333 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7334
7335 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7336  * depending on the HW type.
7337  *
7338  * @param bp
7339  */
7340 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7341 {
7342         /* special handling for emulation and FPGA,
7343            wait 10 times longer */
7344         if (CHIP_REV_IS_SLOW(bp))
7345                 msleep(MCP_ONE_TIMEOUT*10);
7346         else
7347                 msleep(MCP_ONE_TIMEOUT);
7348 }
7349
7350 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7351 {
7352         u32 shmem, cnt, validity_offset, val;
7353         int rc = 0;
7354
7355         msleep(100);
7356
7357         /* Get shmem offset */
7358         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7359         if (shmem == 0) {
7360                 BNX2X_ERR("Shmem 0 return failure\n");
7361                 rc = -ENOTTY;
7362                 goto exit_lbl;
7363         }
7364
7365         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7366
7367         /* Wait for MCP to come up */
7368         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7369                 /* TBD: its best to check validity map of last port.
7370                  * currently checks on port 0.
7371                  */
7372                 val = REG_RD(bp, shmem + validity_offset);
7373                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7374                    shmem + validity_offset, val);
7375
7376                 /* check that shared memory is valid. */
7377                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7378                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7379                         break;
7380
7381                 bnx2x_mcp_wait_one(bp);
7382         }
7383
7384         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7385
7386         /* Check that shared memory is valid. This indicates that MCP is up. */
7387         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7388             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7389                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7390                 rc = -ENOTTY;
7391                 goto exit_lbl;
7392         }
7393
7394 exit_lbl:
7395         /* Restore the `magic' bit value */
7396         if (!CHIP_IS_E1(bp))
7397                 bnx2x_clp_reset_done(bp, magic_val);
7398
7399         return rc;
7400 }
7401
7402 static void bnx2x_pxp_prep(struct bnx2x *bp)
7403 {
7404         if (!CHIP_IS_E1(bp)) {
7405                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7406                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7407                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7408                 mmiowb();
7409         }
7410 }
7411
7412 /*
7413  * Reset the whole chip except for:
7414  *      - PCIE core
7415  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7416  *              one reset bit)
7417  *      - IGU
7418  *      - MISC (including AEU)
7419  *      - GRC
7420  *      - RBCN, RBCP
7421  */
7422 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7423 {
7424         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7425
7426         not_reset_mask1 =
7427                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7428                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7429                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7430
7431         not_reset_mask2 =
7432                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7433                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7434                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7435                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7436                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7437                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7438                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7439                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7440
7441         reset_mask1 = 0xffffffff;
7442
7443         if (CHIP_IS_E1(bp))
7444                 reset_mask2 = 0xffff;
7445         else
7446                 reset_mask2 = 0x1ffff;
7447
7448         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7449                reset_mask1 & (~not_reset_mask1));
7450         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7451                reset_mask2 & (~not_reset_mask2));
7452
7453         barrier();
7454         mmiowb();
7455
7456         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7457         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7458         mmiowb();
7459 }
7460
7461 static int bnx2x_process_kill(struct bnx2x *bp)
7462 {
7463         int cnt = 1000;
7464         u32 val = 0;
7465         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7466
7467
7468         /* Empty the Tetris buffer, wait for 1s */
7469         do {
7470                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7471                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7472                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7473                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7474                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7475                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7476                     ((port_is_idle_0 & 0x1) == 0x1) &&
7477                     ((port_is_idle_1 & 0x1) == 0x1) &&
7478                     (pgl_exp_rom2 == 0xffffffff))
7479                         break;
7480                 msleep(1);
7481         } while (cnt-- > 0);
7482
7483         if (cnt <= 0) {
7484                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7485                           " are still"
7486                           " outstanding read requests after 1s!\n");
7487                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7488                           " port_is_idle_0=0x%08x,"
7489                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7490                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7491                           pgl_exp_rom2);
7492                 return -EAGAIN;
7493         }
7494
7495         barrier();
7496
7497         /* Close gates #2, #3 and #4 */
7498         bnx2x_set_234_gates(bp, true);
7499
7500         /* TBD: Indicate that "process kill" is in progress to MCP */
7501
7502         /* Clear "unprepared" bit */
7503         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7504         barrier();
7505
7506         /* Make sure all is written to the chip before the reset */
7507         mmiowb();
7508
7509         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7510          * PSWHST, GRC and PSWRD Tetris buffer.
7511          */
7512         msleep(1);
7513
7514         /* Prepare to chip reset: */
7515         /* MCP */
7516         bnx2x_reset_mcp_prep(bp, &val);
7517
7518         /* PXP */
7519         bnx2x_pxp_prep(bp);
7520         barrier();
7521
7522         /* reset the chip */
7523         bnx2x_process_kill_chip_reset(bp);
7524         barrier();
7525
7526         /* Recover after reset: */
7527         /* MCP */
7528         if (bnx2x_reset_mcp_comp(bp, val))
7529                 return -EAGAIN;
7530
7531         /* PXP */
7532         bnx2x_pxp_prep(bp);
7533
7534         /* Open the gates #2, #3 and #4 */
7535         bnx2x_set_234_gates(bp, false);
7536
7537         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7538          * reset state, re-enable attentions. */
7539
7540         return 0;
7541 }
7542
7543 static int bnx2x_leader_reset(struct bnx2x *bp)
7544 {
7545         int rc = 0;
7546         /* Try to recover after the failure */
7547         if (bnx2x_process_kill(bp)) {
7548                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7549                        bp->dev->name);
7550                 rc = -EAGAIN;
7551                 goto exit_leader_reset;
7552         }
7553
7554         /* Clear "reset is in progress" bit and update the driver state */
7555         bnx2x_set_reset_done(bp);
7556         bp->recovery_state = BNX2X_RECOVERY_DONE;
7557
7558 exit_leader_reset:
7559         bp->is_leader = 0;
7560         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7561         smp_wmb();
7562         return rc;
7563 }
7564
7565 /* Assumption: runs under rtnl lock. This together with the fact
7566  * that it's called only from bnx2x_reset_task() ensure that it
7567  * will never be called when netif_running(bp->dev) is false.
7568  */
7569 static void bnx2x_parity_recover(struct bnx2x *bp)
7570 {
7571         DP(NETIF_MSG_HW, "Handling parity\n");
7572         while (1) {
7573                 switch (bp->recovery_state) {
7574                 case BNX2X_RECOVERY_INIT:
7575                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7576                         /* Try to get a LEADER_LOCK HW lock */
7577                         if (bnx2x_trylock_hw_lock(bp,
7578                                 HW_LOCK_RESOURCE_RESERVED_08))
7579                                 bp->is_leader = 1;
7580
7581                         /* Stop the driver */
7582                         /* If interface has been removed - break */
7583                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7584                                 return;
7585
7586                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7587                         /* Ensure "is_leader" and "recovery_state"
7588                          *  update values are seen on other CPUs
7589                          */
7590                         smp_wmb();
7591                         break;
7592
7593                 case BNX2X_RECOVERY_WAIT:
7594                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7595                         if (bp->is_leader) {
7596                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7597                                 if (load_counter) {
7598                                         /* Wait until all other functions get
7599                                          * down.
7600                                          */
7601                                         schedule_delayed_work(&bp->reset_task,
7602                                                                 HZ/10);
7603                                         return;
7604                                 } else {
7605                                         /* If all other functions got down -
7606                                          * try to bring the chip back to
7607                                          * normal. In any case it's an exit
7608                                          * point for a leader.
7609                                          */
7610                                         if (bnx2x_leader_reset(bp) ||
7611                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7612                                                 printk(KERN_ERR"%s: Recovery "
7613                                                 "has failed. Power cycle is "
7614                                                 "needed.\n", bp->dev->name);
7615                                                 /* Disconnect this device */
7616                                                 netif_device_detach(bp->dev);
7617                                                 /* Block ifup for all function
7618                                                  * of this ASIC until
7619                                                  * "process kill" or power
7620                                                  * cycle.
7621                                                  */
7622                                                 bnx2x_set_reset_in_progress(bp);
7623                                                 /* Shut down the power */
7624                                                 bnx2x_set_power_state(bp,
7625                                                                 PCI_D3hot);
7626                                                 return;
7627                                         }
7628
7629                                         return;
7630                                 }
7631                         } else { /* non-leader */
7632                                 if (!bnx2x_reset_is_done(bp)) {
7633                                         /* Try to get a LEADER_LOCK HW lock as
7634                                          * long as a former leader may have
7635                                          * been unloaded by the user or
7636                                          * released a leadership by another
7637                                          * reason.
7638                                          */
7639                                         if (bnx2x_trylock_hw_lock(bp,
7640                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7641                                                 /* I'm a leader now! Restart a
7642                                                  * switch case.
7643                                                  */
7644                                                 bp->is_leader = 1;
7645                                                 break;
7646                                         }
7647
7648                                         schedule_delayed_work(&bp->reset_task,
7649                                                                 HZ/10);
7650                                         return;
7651
7652                                 } else { /* A leader has completed
7653                                           * the "process kill". It's an exit
7654                                           * point for a non-leader.
7655                                           */
7656                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7657                                         bp->recovery_state =
7658                                                 BNX2X_RECOVERY_DONE;
7659                                         smp_wmb();
7660                                         return;
7661                                 }
7662                         }
7663                 default:
7664                         return;
7665                 }
7666         }
7667 }
7668
7669 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7670  * scheduled on a general queue in order to prevent a dead lock.
7671  */
7672 static void bnx2x_reset_task(struct work_struct *work)
7673 {
7674         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7675
7676 #ifdef BNX2X_STOP_ON_ERROR
7677         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7678                   " so reset not done to allow debug dump,\n"
7679          KERN_ERR " you will need to reboot when done\n");
7680         return;
7681 #endif
7682
7683         rtnl_lock();
7684
7685         if (!netif_running(bp->dev))
7686                 goto reset_task_exit;
7687
7688         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7689                 bnx2x_parity_recover(bp);
7690         else {
7691                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7692                 bnx2x_nic_load(bp, LOAD_NORMAL);
7693         }
7694
7695 reset_task_exit:
7696         rtnl_unlock();
7697 }
7698
7699 /* end of nic load/unload */
7700
7701 /*
7702  * Init service functions
7703  */
7704
7705 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7706 {
7707         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7708         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7709         return base + (BP_ABS_FUNC(bp)) * stride;
7710 }
7711
7712 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7713 {
7714         u32 reg = bnx2x_get_pretend_reg(bp);
7715
7716         /* Flush all outstanding writes */
7717         mmiowb();
7718
7719         /* Pretend to be function 0 */
7720         REG_WR(bp, reg, 0);
7721         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7722
7723         /* From now we are in the "like-E1" mode */
7724         bnx2x_int_disable(bp);
7725
7726         /* Flush all outstanding writes */
7727         mmiowb();
7728
7729         /* Restore the original function */
7730         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7731         REG_RD(bp, reg);
7732 }
7733
7734 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7735 {
7736         if (CHIP_IS_E1(bp))
7737                 bnx2x_int_disable(bp);
7738         else
7739                 bnx2x_undi_int_disable_e1h(bp);
7740 }
7741
7742 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7743 {
7744         u32 val;
7745
7746         /* Check if there is any driver already loaded */
7747         val = REG_RD(bp, MISC_REG_UNPREPARED);
7748         if (val == 0x1) {
7749                 /* Check if it is the UNDI driver
7750                  * UNDI driver initializes CID offset for normal bell to 0x7
7751                  */
7752                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7753                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7754                 if (val == 0x7) {
7755                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7756                         /* save our pf_num */
7757                         int orig_pf_num = bp->pf_num;
7758                         u32 swap_en;
7759                         u32 swap_val;
7760
7761                         /* clear the UNDI indication */
7762                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7763
7764                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7765
7766                         /* try unload UNDI on port 0 */
7767                         bp->pf_num = 0;
7768                         bp->fw_seq =
7769                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7770                                 DRV_MSG_SEQ_NUMBER_MASK);
7771                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7772
7773                         /* if UNDI is loaded on the other port */
7774                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7775
7776                                 /* send "DONE" for previous unload */
7777                                 bnx2x_fw_command(bp,
7778                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7779
7780                                 /* unload UNDI on port 1 */
7781                                 bp->pf_num = 1;
7782                                 bp->fw_seq =
7783                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7784                                         DRV_MSG_SEQ_NUMBER_MASK);
7785                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7786
7787                                 bnx2x_fw_command(bp, reset_code, 0);
7788                         }
7789
7790                         /* now it's safe to release the lock */
7791                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7792
7793                         bnx2x_undi_int_disable(bp);
7794
7795                         /* close input traffic and wait for it */
7796                         /* Do not rcv packets to BRB */
7797                         REG_WR(bp,
7798                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7799                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7800                         /* Do not direct rcv packets that are not for MCP to
7801                          * the BRB */
7802                         REG_WR(bp,
7803                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7804                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7805                         /* clear AEU */
7806                         REG_WR(bp,
7807                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7808                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7809                         msleep(10);
7810
7811                         /* save NIG port swap info */
7812                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7813                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7814                         /* reset device */
7815                         REG_WR(bp,
7816                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7817                                0xd3ffffff);
7818                         REG_WR(bp,
7819                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7820                                0x1403);
7821                         /* take the NIG out of reset and restore swap values */
7822                         REG_WR(bp,
7823                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7824                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7825                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7826                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7827
7828                         /* send unload done to the MCP */
7829                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7830
7831                         /* restore our func and fw_seq */
7832                         bp->pf_num = orig_pf_num;
7833                         bp->fw_seq =
7834                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7835                                 DRV_MSG_SEQ_NUMBER_MASK);
7836                 } else
7837                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7838         }
7839 }
7840
7841 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7842 {
7843         u32 val, val2, val3, val4, id;
7844         u16 pmc;
7845
7846         /* Get the chip revision id and number. */
7847         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7848         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7849         id = ((val & 0xffff) << 16);
7850         val = REG_RD(bp, MISC_REG_CHIP_REV);
7851         id |= ((val & 0xf) << 12);
7852         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7853         id |= ((val & 0xff) << 4);
7854         val = REG_RD(bp, MISC_REG_BOND_ID);
7855         id |= (val & 0xf);
7856         bp->common.chip_id = id;
7857
7858         /* Set doorbell size */
7859         bp->db_size = (1 << BNX2X_DB_SHIFT);
7860
7861         if (CHIP_IS_E2(bp)) {
7862                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7863                 if ((val & 1) == 0)
7864                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7865                 else
7866                         val = (val >> 1) & 1;
7867                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7868                                                        "2_PORT_MODE");
7869                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7870                                                  CHIP_2_PORT_MODE;
7871
7872                 if (CHIP_MODE_IS_4_PORT(bp))
7873                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7874                 else
7875                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7876         } else {
7877                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7878                 bp->pfid = bp->pf_num;                  /* 0..7 */
7879         }
7880
7881         /*
7882          * set base FW non-default (fast path) status block id, this value is
7883          * used to initialize the fw_sb_id saved on the fp/queue structure to
7884          * determine the id used by the FW.
7885          */
7886         if (CHIP_IS_E1x(bp))
7887                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7888         else /* E2 */
7889                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7890
7891         bp->link_params.chip_id = bp->common.chip_id;
7892         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7893
7894         val = (REG_RD(bp, 0x2874) & 0x55);
7895         if ((bp->common.chip_id & 0x1) ||
7896             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7897                 bp->flags |= ONE_PORT_FLAG;
7898                 BNX2X_DEV_INFO("single port device\n");
7899         }
7900
7901         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7902         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7903                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7904         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7905                        bp->common.flash_size, bp->common.flash_size);
7906
7907         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7908         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7909                                         MISC_REG_GENERIC_CR_1 :
7910                                         MISC_REG_GENERIC_CR_0));
7911         bp->link_params.shmem_base = bp->common.shmem_base;
7912         bp->link_params.shmem2_base = bp->common.shmem2_base;
7913         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7914                        bp->common.shmem_base, bp->common.shmem2_base);
7915
7916         if (!bp->common.shmem_base) {
7917                 BNX2X_DEV_INFO("MCP not active\n");
7918                 bp->flags |= NO_MCP_FLAG;
7919                 return;
7920         }
7921
7922         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7923         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7924                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7925                 BNX2X_ERR("BAD MCP validity signature\n");
7926
7927         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7928         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7929
7930         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7931                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7932                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7933
7934         bp->link_params.feature_config_flags = 0;
7935         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7936         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7937                 bp->link_params.feature_config_flags |=
7938                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7939         else
7940                 bp->link_params.feature_config_flags &=
7941                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7942
7943         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7944         bp->common.bc_ver = val;
7945         BNX2X_DEV_INFO("bc_ver %X\n", val);
7946         if (val < BNX2X_BC_VER) {
7947                 /* for now only warn
7948                  * later we might need to enforce this */
7949                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7950                           "please upgrade BC\n", BNX2X_BC_VER, val);
7951         }
7952         bp->link_params.feature_config_flags |=
7953                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7954                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7955
7956         bp->link_params.feature_config_flags |=
7957                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7958                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7959
7960         if (BP_E1HVN(bp) == 0) {
7961                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7962                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7963         } else {
7964                 /* no WOL capability for E1HVN != 0 */
7965                 bp->flags |= NO_WOL_FLAG;
7966         }
7967         BNX2X_DEV_INFO("%sWoL capable\n",
7968                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7969
7970         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7971         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7972         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7973         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7974
7975         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7976                  val, val2, val3, val4);
7977 }
7978
7979 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7980 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7981
7982 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7983 {
7984         int pfid = BP_FUNC(bp);
7985         int vn = BP_E1HVN(bp);
7986         int igu_sb_id;
7987         u32 val;
7988         u8 fid;
7989
7990         bp->igu_base_sb = 0xff;
7991         bp->igu_sb_cnt = 0;
7992         if (CHIP_INT_MODE_IS_BC(bp)) {
7993                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7994                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7995
7996                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7997                         FP_SB_MAX_E1x;
7998
7999                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
8000                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8001
8002                 return;
8003         }
8004
8005         /* IGU in normal mode - read CAM */
8006         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8007              igu_sb_id++) {
8008                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8009                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8010                         continue;
8011                 fid = IGU_FID(val);
8012                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8013                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8014                                 continue;
8015                         if (IGU_VEC(val) == 0)
8016                                 /* default status block */
8017                                 bp->igu_dsb_id = igu_sb_id;
8018                         else {
8019                                 if (bp->igu_base_sb == 0xff)
8020                                         bp->igu_base_sb = igu_sb_id;
8021                                 bp->igu_sb_cnt++;
8022                         }
8023                 }
8024         }
8025         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8026                                    NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8027         if (bp->igu_sb_cnt == 0)
8028                 BNX2X_ERR("CAM configuration error\n");
8029 }
8030
8031 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8032                                                     u32 switch_cfg)
8033 {
8034         int cfg_size = 0, idx, port = BP_PORT(bp);
8035
8036         /* Aggregation of supported attributes of all external phys */
8037         bp->port.supported[0] = 0;
8038         bp->port.supported[1] = 0;
8039         switch (bp->link_params.num_phys) {
8040         case 1:
8041                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8042                 cfg_size = 1;
8043                 break;
8044         case 2:
8045                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8046                 cfg_size = 1;
8047                 break;
8048         case 3:
8049                 if (bp->link_params.multi_phy_config &
8050                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8051                         bp->port.supported[1] =
8052                                 bp->link_params.phy[EXT_PHY1].supported;
8053                         bp->port.supported[0] =
8054                                 bp->link_params.phy[EXT_PHY2].supported;
8055                 } else {
8056                         bp->port.supported[0] =
8057                                 bp->link_params.phy[EXT_PHY1].supported;
8058                         bp->port.supported[1] =
8059                                 bp->link_params.phy[EXT_PHY2].supported;
8060                 }
8061                 cfg_size = 2;
8062                 break;
8063         }
8064
8065         if (!(bp->port.supported[0] || bp->port.supported[1])) {
8066                 BNX2X_ERR("NVRAM config error. BAD phy config."
8067                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
8068                            SHMEM_RD(bp,
8069                            dev_info.port_hw_config[port].external_phy_config),
8070                            SHMEM_RD(bp,
8071                            dev_info.port_hw_config[port].external_phy_config2));
8072                         return;
8073         }
8074
8075         switch (switch_cfg) {
8076         case SWITCH_CFG_1G:
8077                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8078                                            port*0x10);
8079                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8080                 break;
8081
8082         case SWITCH_CFG_10G:
8083                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8084                                            port*0x18);
8085                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8086                 break;
8087
8088         default:
8089                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8090                           bp->port.link_config[0]);
8091                 return;
8092         }
8093         /* mask what we support according to speed_cap_mask per configuration */
8094         for (idx = 0; idx < cfg_size; idx++) {
8095                 if (!(bp->link_params.speed_cap_mask[idx] &
8096                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8097                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8098
8099                 if (!(bp->link_params.speed_cap_mask[idx] &
8100                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8101                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8102
8103                 if (!(bp->link_params.speed_cap_mask[idx] &
8104                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8105                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8106
8107                 if (!(bp->link_params.speed_cap_mask[idx] &
8108                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8109                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8110
8111                 if (!(bp->link_params.speed_cap_mask[idx] &
8112                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8113                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8114                                                      SUPPORTED_1000baseT_Full);
8115
8116                 if (!(bp->link_params.speed_cap_mask[idx] &
8117                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8118                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8119
8120                 if (!(bp->link_params.speed_cap_mask[idx] &
8121                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8122                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8123
8124         }
8125
8126         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8127                        bp->port.supported[1]);
8128 }
8129
8130 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8131 {
8132         u32 link_config, idx, cfg_size = 0;
8133         bp->port.advertising[0] = 0;
8134         bp->port.advertising[1] = 0;
8135         switch (bp->link_params.num_phys) {
8136         case 1:
8137         case 2:
8138                 cfg_size = 1;
8139                 break;
8140         case 3:
8141                 cfg_size = 2;
8142                 break;
8143         }
8144         for (idx = 0; idx < cfg_size; idx++) {
8145                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8146                 link_config = bp->port.link_config[idx];
8147                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8148                 case PORT_FEATURE_LINK_SPEED_AUTO:
8149                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8150                                 bp->link_params.req_line_speed[idx] =
8151                                         SPEED_AUTO_NEG;
8152                                 bp->port.advertising[idx] |=
8153                                         bp->port.supported[idx];
8154                         } else {
8155                                 /* force 10G, no AN */
8156                                 bp->link_params.req_line_speed[idx] =
8157                                         SPEED_10000;
8158                                 bp->port.advertising[idx] |=
8159                                         (ADVERTISED_10000baseT_Full |
8160                                          ADVERTISED_FIBRE);
8161                                 continue;
8162                         }
8163                         break;
8164
8165                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8166                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8167                                 bp->link_params.req_line_speed[idx] =
8168                                         SPEED_10;
8169                                 bp->port.advertising[idx] |=
8170                                         (ADVERTISED_10baseT_Full |
8171                                          ADVERTISED_TP);
8172                         } else {
8173                                 BNX2X_ERROR("NVRAM config error. "
8174                                             "Invalid link_config 0x%x"
8175                                             "  speed_cap_mask 0x%x\n",
8176                                             link_config,
8177                                     bp->link_params.speed_cap_mask[idx]);
8178                                 return;
8179                         }
8180                         break;
8181
8182                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8183                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8184                                 bp->link_params.req_line_speed[idx] =
8185                                         SPEED_10;
8186                                 bp->link_params.req_duplex[idx] =
8187                                         DUPLEX_HALF;
8188                                 bp->port.advertising[idx] |=
8189                                         (ADVERTISED_10baseT_Half |
8190                                          ADVERTISED_TP);
8191                         } else {
8192                                 BNX2X_ERROR("NVRAM config error. "
8193                                             "Invalid link_config 0x%x"
8194                                             "  speed_cap_mask 0x%x\n",
8195                                             link_config,
8196                                           bp->link_params.speed_cap_mask[idx]);
8197                                 return;
8198                         }
8199                         break;
8200
8201                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8202                         if (bp->port.supported[idx] &
8203                             SUPPORTED_100baseT_Full) {
8204                                 bp->link_params.req_line_speed[idx] =
8205                                         SPEED_100;
8206                                 bp->port.advertising[idx] |=
8207                                         (ADVERTISED_100baseT_Full |
8208                                          ADVERTISED_TP);
8209                         } else {
8210                                 BNX2X_ERROR("NVRAM config error. "
8211                                             "Invalid link_config 0x%x"
8212                                             "  speed_cap_mask 0x%x\n",
8213                                             link_config,
8214                                           bp->link_params.speed_cap_mask[idx]);
8215                                 return;
8216                         }
8217                         break;
8218
8219                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8220                         if (bp->port.supported[idx] &
8221                             SUPPORTED_100baseT_Half) {
8222                                 bp->link_params.req_line_speed[idx] =
8223                                                                 SPEED_100;
8224                                 bp->link_params.req_duplex[idx] =
8225                                                                 DUPLEX_HALF;
8226                                 bp->port.advertising[idx] |=
8227                                         (ADVERTISED_100baseT_Half |
8228                                          ADVERTISED_TP);
8229                         } else {
8230                                 BNX2X_ERROR("NVRAM config error. "
8231                                     "Invalid link_config 0x%x"
8232                                     "  speed_cap_mask 0x%x\n",
8233                                     link_config,
8234                                     bp->link_params.speed_cap_mask[idx]);
8235                                 return;
8236                         }
8237                         break;
8238
8239                 case PORT_FEATURE_LINK_SPEED_1G:
8240                         if (bp->port.supported[idx] &
8241                             SUPPORTED_1000baseT_Full) {
8242                                 bp->link_params.req_line_speed[idx] =
8243                                         SPEED_1000;
8244                                 bp->port.advertising[idx] |=
8245                                         (ADVERTISED_1000baseT_Full |
8246                                          ADVERTISED_TP);
8247                         } else {
8248                                 BNX2X_ERROR("NVRAM config error. "
8249                                     "Invalid link_config 0x%x"
8250                                     "  speed_cap_mask 0x%x\n",
8251                                     link_config,
8252                                     bp->link_params.speed_cap_mask[idx]);
8253                                 return;
8254                         }
8255                         break;
8256
8257                 case PORT_FEATURE_LINK_SPEED_2_5G:
8258                         if (bp->port.supported[idx] &
8259                             SUPPORTED_2500baseX_Full) {
8260                                 bp->link_params.req_line_speed[idx] =
8261                                         SPEED_2500;
8262                                 bp->port.advertising[idx] |=
8263                                         (ADVERTISED_2500baseX_Full |
8264                                                 ADVERTISED_TP);
8265                         } else {
8266                                 BNX2X_ERROR("NVRAM config error. "
8267                                     "Invalid link_config 0x%x"
8268                                     "  speed_cap_mask 0x%x\n",
8269                                     link_config,
8270                                     bp->link_params.speed_cap_mask[idx]);
8271                                 return;
8272                         }
8273                         break;
8274
8275                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8276                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8277                 case PORT_FEATURE_LINK_SPEED_10G_KR:
8278                         if (bp->port.supported[idx] &
8279                             SUPPORTED_10000baseT_Full) {
8280                                 bp->link_params.req_line_speed[idx] =
8281                                         SPEED_10000;
8282                                 bp->port.advertising[idx] |=
8283                                         (ADVERTISED_10000baseT_Full |
8284                                                 ADVERTISED_FIBRE);
8285                         } else {
8286                                 BNX2X_ERROR("NVRAM config error. "
8287                                     "Invalid link_config 0x%x"
8288                                     "  speed_cap_mask 0x%x\n",
8289                                     link_config,
8290                                     bp->link_params.speed_cap_mask[idx]);
8291                                 return;
8292                         }
8293                         break;
8294
8295                 default:
8296                         BNX2X_ERROR("NVRAM config error. "
8297                                     "BAD link speed link_config 0x%x\n",
8298                                           link_config);
8299                                 bp->link_params.req_line_speed[idx] =
8300                                                         SPEED_AUTO_NEG;
8301                                 bp->port.advertising[idx] =
8302                                                 bp->port.supported[idx];
8303                         break;
8304                 }
8305
8306                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8307                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8308                 if ((bp->link_params.req_flow_ctrl[idx] ==
8309                      BNX2X_FLOW_CTRL_AUTO) &&
8310                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8311                         bp->link_params.req_flow_ctrl[idx] =
8312                                 BNX2X_FLOW_CTRL_NONE;
8313                 }
8314
8315                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8316                                " 0x%x advertising 0x%x\n",
8317                                bp->link_params.req_line_speed[idx],
8318                                bp->link_params.req_duplex[idx],
8319                                bp->link_params.req_flow_ctrl[idx],
8320                                bp->port.advertising[idx]);
8321         }
8322 }
8323
8324 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8325 {
8326         mac_hi = cpu_to_be16(mac_hi);
8327         mac_lo = cpu_to_be32(mac_lo);
8328         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8329         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8330 }
8331
8332 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8333 {
8334         int port = BP_PORT(bp);
8335         u32 config;
8336         u32 ext_phy_type, ext_phy_config;
8337
8338         bp->link_params.bp = bp;
8339         bp->link_params.port = port;
8340
8341         bp->link_params.lane_config =
8342                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8343
8344         bp->link_params.speed_cap_mask[0] =
8345                 SHMEM_RD(bp,
8346                          dev_info.port_hw_config[port].speed_capability_mask);
8347         bp->link_params.speed_cap_mask[1] =
8348                 SHMEM_RD(bp,
8349                          dev_info.port_hw_config[port].speed_capability_mask2);
8350         bp->port.link_config[0] =
8351                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8352
8353         bp->port.link_config[1] =
8354                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8355
8356         bp->link_params.multi_phy_config =
8357                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8358         /* If the device is capable of WoL, set the default state according
8359          * to the HW
8360          */
8361         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8362         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8363                    (config & PORT_FEATURE_WOL_ENABLED));
8364
8365         BNX2X_DEV_INFO("lane_config 0x%08x  "
8366                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8367                        bp->link_params.lane_config,
8368                        bp->link_params.speed_cap_mask[0],
8369                        bp->port.link_config[0]);
8370
8371         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8372                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8373         bnx2x_phy_probe(&bp->link_params);
8374         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8375
8376         bnx2x_link_settings_requested(bp);
8377
8378         /*
8379          * If connected directly, work with the internal PHY, otherwise, work
8380          * with the external PHY
8381          */
8382         ext_phy_config =
8383                 SHMEM_RD(bp,
8384                          dev_info.port_hw_config[port].external_phy_config);
8385         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8386         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8387                 bp->mdio.prtad = bp->port.phy_addr;
8388
8389         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8390                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8391                 bp->mdio.prtad =
8392                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8393
8394         /*
8395          * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8396          * In MF mode, it is set to cover self test cases
8397          */
8398         if (IS_MF(bp))
8399                 bp->port.need_hw_lock = 1;
8400         else
8401                 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8402                                                         bp->common.shmem_base,
8403                                                         bp->common.shmem2_base);
8404 }
8405
8406 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8407 {
8408         u32 val, val2;
8409         int func = BP_ABS_FUNC(bp);
8410         int port = BP_PORT(bp);
8411
8412         if (BP_NOMCP(bp)) {
8413                 BNX2X_ERROR("warning: random MAC workaround active\n");
8414                 random_ether_addr(bp->dev->dev_addr);
8415         } else if (IS_MF(bp)) {
8416                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8417                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8418                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8419                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8420                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8421
8422 #ifdef BCM_CNIC
8423                 /* iSCSI NPAR MAC */
8424                 if (IS_MF_SI(bp)) {
8425                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8426                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8427                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8428                                                      iscsi_mac_addr_upper);
8429                                 val = MF_CFG_RD(bp, func_ext_config[func].
8430                                                     iscsi_mac_addr_lower);
8431                                 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8432                         }
8433                 }
8434 #endif
8435         } else {
8436                 /* in SF read MACs from port configuration */
8437                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8438                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8439                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8440
8441 #ifdef BCM_CNIC
8442                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8443                                     iscsi_mac_upper);
8444                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8445                                    iscsi_mac_lower);
8446                 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8447 #endif
8448         }
8449
8450         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8451         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8452
8453 #ifdef BCM_CNIC
8454         /* Inform the upper layers about FCoE MAC */
8455         if (!CHIP_IS_E1x(bp)) {
8456                 if (IS_MF_SD(bp))
8457                         memcpy(bp->fip_mac, bp->dev->dev_addr,
8458                                sizeof(bp->fip_mac));
8459                 else
8460                         memcpy(bp->fip_mac, bp->iscsi_mac,
8461                                sizeof(bp->fip_mac));
8462         }
8463 #endif
8464 }
8465
8466 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8467 {
8468         int /*abs*/func = BP_ABS_FUNC(bp);
8469         int vn, port;
8470         u32 val = 0;
8471         int rc = 0;
8472
8473         bnx2x_get_common_hwinfo(bp);
8474
8475         if (CHIP_IS_E1x(bp)) {
8476                 bp->common.int_block = INT_BLOCK_HC;
8477
8478                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8479                 bp->igu_base_sb = 0;
8480                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8481                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8482         } else {
8483                 bp->common.int_block = INT_BLOCK_IGU;
8484                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8485                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8486                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8487                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8488                 } else
8489                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8490
8491                 bnx2x_get_igu_cam_info(bp);
8492
8493         }
8494         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8495                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8496
8497         /*
8498          * Initialize MF configuration
8499          */
8500
8501         bp->mf_ov = 0;
8502         bp->mf_mode = 0;
8503         vn = BP_E1HVN(bp);
8504         port = BP_PORT(bp);
8505
8506         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8507                 DP(NETIF_MSG_PROBE,
8508                             "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8509                             bp->common.shmem2_base, SHMEM2_RD(bp, size),
8510                             (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8511                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8512                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8513                 else
8514                         bp->common.mf_cfg_base = bp->common.shmem_base +
8515                                 offsetof(struct shmem_region, func_mb) +
8516                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8517                 /*
8518                  * get mf configuration:
8519                  * 1. existance of MF configuration
8520                  * 2. MAC address must be legal (check only upper bytes)
8521                  *    for  Switch-Independent mode;
8522                  *    OVLAN must be legal for Switch-Dependent mode
8523                  * 3. SF_MODE configures specific MF mode
8524                  */
8525                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8526                         /* get mf configuration */
8527                         val = SHMEM_RD(bp,
8528                                        dev_info.shared_feature_config.config);
8529                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8530
8531                         switch (val) {
8532                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8533                                 val = MF_CFG_RD(bp, func_mf_config[func].
8534                                                 mac_upper);
8535                                 /* check for legal mac (upper bytes)*/
8536                                 if (val != 0xffff) {
8537                                         bp->mf_mode = MULTI_FUNCTION_SI;
8538                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8539                                                    func_mf_config[func].config);
8540                                 } else
8541                                         DP(NETIF_MSG_PROBE, "illegal MAC "
8542                                                             "address for SI\n");
8543                                 break;
8544                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8545                                 /* get OV configuration */
8546                                 val = MF_CFG_RD(bp,
8547                                         func_mf_config[FUNC_0].e1hov_tag);
8548                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8549
8550                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8551                                         bp->mf_mode = MULTI_FUNCTION_SD;
8552                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8553                                                 func_mf_config[func].config);
8554                                 } else
8555                                         DP(NETIF_MSG_PROBE, "illegal OV for "
8556                                                             "SD\n");
8557                                 break;
8558                         default:
8559                                 /* Unknown configuration: reset mf_config */
8560                                 bp->mf_config[vn] = 0;
8561                                 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8562                                    val);
8563                         }
8564                 }
8565
8566                 BNX2X_DEV_INFO("%s function mode\n",
8567                                IS_MF(bp) ? "multi" : "single");
8568
8569                 switch (bp->mf_mode) {
8570                 case MULTI_FUNCTION_SD:
8571                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8572                               FUNC_MF_CFG_E1HOV_TAG_MASK;
8573                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8574                                 bp->mf_ov = val;
8575                                 BNX2X_DEV_INFO("MF OV for func %d is %d"
8576                                                " (0x%04x)\n", func,
8577                                                bp->mf_ov, bp->mf_ov);
8578                         } else {
8579                                 BNX2X_ERR("No valid MF OV for func %d,"
8580                                           "  aborting\n", func);
8581                                 rc = -EPERM;
8582                         }
8583                         break;
8584                 case MULTI_FUNCTION_SI:
8585                         BNX2X_DEV_INFO("func %d is in MF "
8586                                        "switch-independent mode\n", func);
8587                         break;
8588                 default:
8589                         if (vn) {
8590                                 BNX2X_ERR("VN %d in single function mode,"
8591                                           "  aborting\n", vn);
8592                                 rc = -EPERM;
8593                         }
8594                         break;
8595                 }
8596
8597         }
8598
8599         /* adjust igu_sb_cnt to MF for E1x */
8600         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8601                 bp->igu_sb_cnt /= E1HVN_MAX;
8602
8603         /*
8604          * adjust E2 sb count: to be removed when FW will support
8605          * more then 16 L2 clients
8606          */
8607 #define MAX_L2_CLIENTS                          16
8608         if (CHIP_IS_E2(bp))
8609                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8610                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8611
8612         if (!BP_NOMCP(bp)) {
8613                 bnx2x_get_port_hwinfo(bp);
8614
8615                 bp->fw_seq =
8616                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8617                          DRV_MSG_SEQ_NUMBER_MASK);
8618                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8619         }
8620
8621         /* Get MAC addresses */
8622         bnx2x_get_mac_hwinfo(bp);
8623
8624         return rc;
8625 }
8626
8627 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8628 {
8629         int cnt, i, block_end, rodi;
8630         char vpd_data[BNX2X_VPD_LEN+1];
8631         char str_id_reg[VENDOR_ID_LEN+1];
8632         char str_id_cap[VENDOR_ID_LEN+1];
8633         u8 len;
8634
8635         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8636         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8637
8638         if (cnt < BNX2X_VPD_LEN)
8639                 goto out_not_found;
8640
8641         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8642                              PCI_VPD_LRDT_RO_DATA);
8643         if (i < 0)
8644                 goto out_not_found;
8645
8646
8647         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8648                     pci_vpd_lrdt_size(&vpd_data[i]);
8649
8650         i += PCI_VPD_LRDT_TAG_SIZE;
8651
8652         if (block_end > BNX2X_VPD_LEN)
8653                 goto out_not_found;
8654
8655         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8656                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8657         if (rodi < 0)
8658                 goto out_not_found;
8659
8660         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8661
8662         if (len != VENDOR_ID_LEN)
8663                 goto out_not_found;
8664
8665         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8666
8667         /* vendor specific info */
8668         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8669         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8670         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8671             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8672
8673                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8674                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8675                 if (rodi >= 0) {
8676                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8677
8678                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8679
8680                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8681                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8682                                 bp->fw_ver[len] = ' ';
8683                         }
8684                 }
8685                 return;
8686         }
8687 out_not_found:
8688         return;
8689 }
8690
8691 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8692 {
8693         int func;
8694         int timer_interval;
8695         int rc;
8696
8697         /* Disable interrupt handling until HW is initialized */
8698         atomic_set(&bp->intr_sem, 1);
8699         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8700
8701         mutex_init(&bp->port.phy_mutex);
8702         mutex_init(&bp->fw_mb_mutex);
8703         spin_lock_init(&bp->stats_lock);
8704 #ifdef BCM_CNIC
8705         mutex_init(&bp->cnic_mutex);
8706 #endif
8707
8708         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8709         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8710
8711         rc = bnx2x_get_hwinfo(bp);
8712
8713         if (!rc)
8714                 rc = bnx2x_alloc_mem_bp(bp);
8715
8716         bnx2x_read_fwinfo(bp);
8717
8718         func = BP_FUNC(bp);
8719
8720         /* need to reset chip if undi was active */
8721         if (!BP_NOMCP(bp))
8722                 bnx2x_undi_unload(bp);
8723
8724         if (CHIP_REV_IS_FPGA(bp))
8725                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8726
8727         if (BP_NOMCP(bp) && (func == 0))
8728                 dev_err(&bp->pdev->dev, "MCP disabled, "
8729                                         "must load devices in order!\n");
8730
8731         bp->multi_mode = multi_mode;
8732         bp->int_mode = int_mode;
8733
8734         bp->dev->features |= NETIF_F_GRO;
8735
8736         /* Set TPA flags */
8737         if (disable_tpa) {
8738                 bp->flags &= ~TPA_ENABLE_FLAG;
8739                 bp->dev->features &= ~NETIF_F_LRO;
8740         } else {
8741                 bp->flags |= TPA_ENABLE_FLAG;
8742                 bp->dev->features |= NETIF_F_LRO;
8743         }
8744         bp->disable_tpa = disable_tpa;
8745
8746         if (CHIP_IS_E1(bp))
8747                 bp->dropless_fc = 0;
8748         else
8749                 bp->dropless_fc = dropless_fc;
8750
8751         bp->mrrs = mrrs;
8752
8753         bp->tx_ring_size = MAX_TX_AVAIL;
8754
8755         bp->rx_csum = 1;
8756
8757         /* make sure that the numbers are in the right granularity */
8758         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8759         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8760
8761         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8762         bp->current_interval = (poll ? poll : timer_interval);
8763
8764         init_timer(&bp->timer);
8765         bp->timer.expires = jiffies + bp->current_interval;
8766         bp->timer.data = (unsigned long) bp;
8767         bp->timer.function = bnx2x_timer;
8768
8769         bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8770         bnx2x_dcbx_init_params(bp);
8771
8772         return rc;
8773 }
8774
8775
8776 /****************************************************************************
8777 * General service functions
8778 ****************************************************************************/
8779
8780 /* called with rtnl_lock */
8781 static int bnx2x_open(struct net_device *dev)
8782 {
8783         struct bnx2x *bp = netdev_priv(dev);
8784
8785         netif_carrier_off(dev);
8786
8787         bnx2x_set_power_state(bp, PCI_D0);
8788
8789         if (!bnx2x_reset_is_done(bp)) {
8790                 do {
8791                         /* Reset MCP mail box sequence if there is on going
8792                          * recovery
8793                          */
8794                         bp->fw_seq = 0;
8795
8796                         /* If it's the first function to load and reset done
8797                          * is still not cleared it may mean that. We don't
8798                          * check the attention state here because it may have
8799                          * already been cleared by a "common" reset but we
8800                          * shell proceed with "process kill" anyway.
8801                          */
8802                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8803                                 bnx2x_trylock_hw_lock(bp,
8804                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8805                                 (!bnx2x_leader_reset(bp))) {
8806                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8807                                 break;
8808                         }
8809
8810                         bnx2x_set_power_state(bp, PCI_D3hot);
8811
8812                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8813                         " completed yet. Try again later. If u still see this"
8814                         " message after a few retries then power cycle is"
8815                         " required.\n", bp->dev->name);
8816
8817                         return -EAGAIN;
8818                 } while (0);
8819         }
8820
8821         bp->recovery_state = BNX2X_RECOVERY_DONE;
8822
8823         return bnx2x_nic_load(bp, LOAD_OPEN);
8824 }
8825
8826 /* called with rtnl_lock */
8827 static int bnx2x_close(struct net_device *dev)
8828 {
8829         struct bnx2x *bp = netdev_priv(dev);
8830
8831         /* Unload the driver, release IRQs */
8832         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8833         bnx2x_set_power_state(bp, PCI_D3hot);
8834
8835         return 0;
8836 }
8837
8838 /* called with netif_tx_lock from dev_mcast.c */
8839 void bnx2x_set_rx_mode(struct net_device *dev)
8840 {
8841         struct bnx2x *bp = netdev_priv(dev);
8842         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8843         int port = BP_PORT(bp);
8844
8845         if (bp->state != BNX2X_STATE_OPEN) {
8846                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8847                 return;
8848         }
8849
8850         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8851
8852         if (dev->flags & IFF_PROMISC)
8853                 rx_mode = BNX2X_RX_MODE_PROMISC;
8854         else if ((dev->flags & IFF_ALLMULTI) ||
8855                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8856                   CHIP_IS_E1(bp)))
8857                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8858         else { /* some multicasts */
8859                 if (CHIP_IS_E1(bp)) {
8860                         /*
8861                          * set mc list, do not wait as wait implies sleep
8862                          * and set_rx_mode can be invoked from non-sleepable
8863                          * context
8864                          */
8865                         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8866                                      BNX2X_MAX_EMUL_MULTI*(1 + port) :
8867                                      BNX2X_MAX_MULTICAST*(1 + port));
8868
8869                         bnx2x_set_e1_mc_list(bp, offset);
8870                 } else { /* E1H */
8871                         /* Accept one or more multicasts */
8872                         struct netdev_hw_addr *ha;
8873                         u32 mc_filter[MC_HASH_SIZE];
8874                         u32 crc, bit, regidx;
8875                         int i;
8876
8877                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8878
8879                         netdev_for_each_mc_addr(ha, dev) {
8880                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8881                                    bnx2x_mc_addr(ha));
8882
8883                                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8884                                                 ETH_ALEN);
8885                                 bit = (crc >> 24) & 0xff;
8886                                 regidx = bit >> 5;
8887                                 bit &= 0x1f;
8888                                 mc_filter[regidx] |= (1 << bit);
8889                         }
8890
8891                         for (i = 0; i < MC_HASH_SIZE; i++)
8892                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8893                                        mc_filter[i]);
8894                 }
8895         }
8896
8897         bp->rx_mode = rx_mode;
8898         bnx2x_set_storm_rx_mode(bp);
8899 }
8900
8901 /* called with rtnl_lock */
8902 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8903                            int devad, u16 addr)
8904 {
8905         struct bnx2x *bp = netdev_priv(netdev);
8906         u16 value;
8907         int rc;
8908
8909         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8910            prtad, devad, addr);
8911
8912         /* The HW expects different devad if CL22 is used */
8913         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8914
8915         bnx2x_acquire_phy_lock(bp);
8916         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8917         bnx2x_release_phy_lock(bp);
8918         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8919
8920         if (!rc)
8921                 rc = value;
8922         return rc;
8923 }
8924
8925 /* called with rtnl_lock */
8926 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8927                             u16 addr, u16 value)
8928 {
8929         struct bnx2x *bp = netdev_priv(netdev);
8930         int rc;
8931
8932         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8933                            " value 0x%x\n", prtad, devad, addr, value);
8934
8935         /* The HW expects different devad if CL22 is used */
8936         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8937
8938         bnx2x_acquire_phy_lock(bp);
8939         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8940         bnx2x_release_phy_lock(bp);
8941         return rc;
8942 }
8943
8944 /* called with rtnl_lock */
8945 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8946 {
8947         struct bnx2x *bp = netdev_priv(dev);
8948         struct mii_ioctl_data *mdio = if_mii(ifr);
8949
8950         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8951            mdio->phy_id, mdio->reg_num, mdio->val_in);
8952
8953         if (!netif_running(dev))
8954                 return -EAGAIN;
8955
8956         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8957 }
8958
8959 #ifdef CONFIG_NET_POLL_CONTROLLER
8960 static void poll_bnx2x(struct net_device *dev)
8961 {
8962         struct bnx2x *bp = netdev_priv(dev);
8963
8964         disable_irq(bp->pdev->irq);
8965         bnx2x_interrupt(bp->pdev->irq, dev);
8966         enable_irq(bp->pdev->irq);
8967 }
8968 #endif
8969
8970 static const struct net_device_ops bnx2x_netdev_ops = {
8971         .ndo_open               = bnx2x_open,
8972         .ndo_stop               = bnx2x_close,
8973         .ndo_start_xmit         = bnx2x_start_xmit,
8974         .ndo_select_queue       = bnx2x_select_queue,
8975         .ndo_set_multicast_list = bnx2x_set_rx_mode,
8976         .ndo_set_mac_address    = bnx2x_change_mac_addr,
8977         .ndo_validate_addr      = eth_validate_addr,
8978         .ndo_do_ioctl           = bnx2x_ioctl,
8979         .ndo_change_mtu         = bnx2x_change_mtu,
8980         .ndo_tx_timeout         = bnx2x_tx_timeout,
8981 #ifdef CONFIG_NET_POLL_CONTROLLER
8982         .ndo_poll_controller    = poll_bnx2x,
8983 #endif
8984 };
8985
8986 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8987                                     struct net_device *dev)
8988 {
8989         struct bnx2x *bp;
8990         int rc;
8991
8992         SET_NETDEV_DEV(dev, &pdev->dev);
8993         bp = netdev_priv(dev);
8994
8995         bp->dev = dev;
8996         bp->pdev = pdev;
8997         bp->flags = 0;
8998         bp->pf_num = PCI_FUNC(pdev->devfn);
8999
9000         rc = pci_enable_device(pdev);
9001         if (rc) {
9002                 dev_err(&bp->pdev->dev,
9003                         "Cannot enable PCI device, aborting\n");
9004                 goto err_out;
9005         }
9006
9007         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9008                 dev_err(&bp->pdev->dev,
9009                         "Cannot find PCI device base address, aborting\n");
9010                 rc = -ENODEV;
9011                 goto err_out_disable;
9012         }
9013
9014         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9015                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9016                        " base address, aborting\n");
9017                 rc = -ENODEV;
9018                 goto err_out_disable;
9019         }
9020
9021         if (atomic_read(&pdev->enable_cnt) == 1) {
9022                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9023                 if (rc) {
9024                         dev_err(&bp->pdev->dev,
9025                                 "Cannot obtain PCI resources, aborting\n");
9026                         goto err_out_disable;
9027                 }
9028
9029                 pci_set_master(pdev);
9030                 pci_save_state(pdev);
9031         }
9032
9033         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9034         if (bp->pm_cap == 0) {
9035                 dev_err(&bp->pdev->dev,
9036                         "Cannot find power management capability, aborting\n");
9037                 rc = -EIO;
9038                 goto err_out_release;
9039         }
9040
9041         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9042         if (bp->pcie_cap == 0) {
9043                 dev_err(&bp->pdev->dev,
9044                         "Cannot find PCI Express capability, aborting\n");
9045                 rc = -EIO;
9046                 goto err_out_release;
9047         }
9048
9049         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9050                 bp->flags |= USING_DAC_FLAG;
9051                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9052                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9053                                " failed, aborting\n");
9054                         rc = -EIO;
9055                         goto err_out_release;
9056                 }
9057
9058         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9059                 dev_err(&bp->pdev->dev,
9060                         "System does not support DMA, aborting\n");
9061                 rc = -EIO;
9062                 goto err_out_release;
9063         }
9064
9065         dev->mem_start = pci_resource_start(pdev, 0);
9066         dev->base_addr = dev->mem_start;
9067         dev->mem_end = pci_resource_end(pdev, 0);
9068
9069         dev->irq = pdev->irq;
9070
9071         bp->regview = pci_ioremap_bar(pdev, 0);
9072         if (!bp->regview) {
9073                 dev_err(&bp->pdev->dev,
9074                         "Cannot map register space, aborting\n");
9075                 rc = -ENOMEM;
9076                 goto err_out_release;
9077         }
9078
9079         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9080                                         min_t(u64, BNX2X_DB_SIZE(bp),
9081                                               pci_resource_len(pdev, 2)));
9082         if (!bp->doorbells) {
9083                 dev_err(&bp->pdev->dev,
9084                         "Cannot map doorbell space, aborting\n");
9085                 rc = -ENOMEM;
9086                 goto err_out_unmap;
9087         }
9088
9089         bnx2x_set_power_state(bp, PCI_D0);
9090
9091         /* clean indirect addresses */
9092         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9093                                PCICFG_VENDOR_ID_OFFSET);
9094         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9095         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9096         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9097         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9098
9099         /* Reset the load counter */
9100         bnx2x_clear_load_cnt(bp);
9101
9102         dev->watchdog_timeo = TX_TIMEOUT;
9103
9104         dev->netdev_ops = &bnx2x_netdev_ops;
9105         bnx2x_set_ethtool_ops(dev);
9106         dev->features |= NETIF_F_SG;
9107         dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9108         if (bp->flags & USING_DAC_FLAG)
9109                 dev->features |= NETIF_F_HIGHDMA;
9110         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9111         dev->features |= NETIF_F_TSO6;
9112         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9113
9114         dev->vlan_features |= NETIF_F_SG;
9115         dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9116         if (bp->flags & USING_DAC_FLAG)
9117                 dev->vlan_features |= NETIF_F_HIGHDMA;
9118         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9119         dev->vlan_features |= NETIF_F_TSO6;
9120
9121 #ifdef BCM_DCB
9122         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9123 #endif
9124
9125         /* get_port_hwinfo() will set prtad and mmds properly */
9126         bp->mdio.prtad = MDIO_PRTAD_NONE;
9127         bp->mdio.mmds = 0;
9128         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9129         bp->mdio.dev = dev;
9130         bp->mdio.mdio_read = bnx2x_mdio_read;
9131         bp->mdio.mdio_write = bnx2x_mdio_write;
9132
9133         return 0;
9134
9135 err_out_unmap:
9136         if (bp->regview) {
9137                 iounmap(bp->regview);
9138                 bp->regview = NULL;
9139         }
9140         if (bp->doorbells) {
9141                 iounmap(bp->doorbells);
9142                 bp->doorbells = NULL;
9143         }
9144
9145 err_out_release:
9146         if (atomic_read(&pdev->enable_cnt) == 1)
9147                 pci_release_regions(pdev);
9148
9149 err_out_disable:
9150         pci_disable_device(pdev);
9151         pci_set_drvdata(pdev, NULL);
9152
9153 err_out:
9154         return rc;
9155 }
9156
9157 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9158                                                  int *width, int *speed)
9159 {
9160         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9161
9162         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9163
9164         /* return value of 1=2.5GHz 2=5GHz */
9165         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9166 }
9167
9168 static int bnx2x_check_firmware(struct bnx2x *bp)
9169 {
9170         const struct firmware *firmware = bp->firmware;
9171         struct bnx2x_fw_file_hdr *fw_hdr;
9172         struct bnx2x_fw_file_section *sections;
9173         u32 offset, len, num_ops;
9174         u16 *ops_offsets;
9175         int i;
9176         const u8 *fw_ver;
9177
9178         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9179                 return -EINVAL;
9180
9181         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9182         sections = (struct bnx2x_fw_file_section *)fw_hdr;
9183
9184         /* Make sure none of the offsets and sizes make us read beyond
9185          * the end of the firmware data */
9186         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9187                 offset = be32_to_cpu(sections[i].offset);
9188                 len = be32_to_cpu(sections[i].len);
9189                 if (offset + len > firmware->size) {
9190                         dev_err(&bp->pdev->dev,
9191                                 "Section %d length is out of bounds\n", i);
9192                         return -EINVAL;
9193                 }
9194         }
9195
9196         /* Likewise for the init_ops offsets */
9197         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9198         ops_offsets = (u16 *)(firmware->data + offset);
9199         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9200
9201         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9202                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9203                         dev_err(&bp->pdev->dev,
9204                                 "Section offset %d is out of bounds\n", i);
9205                         return -EINVAL;
9206                 }
9207         }
9208
9209         /* Check FW version */
9210         offset = be32_to_cpu(fw_hdr->fw_version.offset);
9211         fw_ver = firmware->data + offset;
9212         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9213             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9214             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9215             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9216                 dev_err(&bp->pdev->dev,
9217                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9218                        fw_ver[0], fw_ver[1], fw_ver[2],
9219                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9220                        BCM_5710_FW_MINOR_VERSION,
9221                        BCM_5710_FW_REVISION_VERSION,
9222                        BCM_5710_FW_ENGINEERING_VERSION);
9223                 return -EINVAL;
9224         }
9225
9226         return 0;
9227 }
9228
9229 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9230 {
9231         const __be32 *source = (const __be32 *)_source;
9232         u32 *target = (u32 *)_target;
9233         u32 i;
9234
9235         for (i = 0; i < n/4; i++)
9236                 target[i] = be32_to_cpu(source[i]);
9237 }
9238
9239 /*
9240    Ops array is stored in the following format:
9241    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9242  */
9243 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9244 {
9245         const __be32 *source = (const __be32 *)_source;
9246         struct raw_op *target = (struct raw_op *)_target;
9247         u32 i, j, tmp;
9248
9249         for (i = 0, j = 0; i < n/8; i++, j += 2) {
9250                 tmp = be32_to_cpu(source[j]);
9251                 target[i].op = (tmp >> 24) & 0xff;
9252                 target[i].offset = tmp & 0xffffff;
9253                 target[i].raw_data = be32_to_cpu(source[j + 1]);
9254         }
9255 }
9256
9257 /**
9258  * IRO array is stored in the following format:
9259  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9260  */
9261 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9262 {
9263         const __be32 *source = (const __be32 *)_source;
9264         struct iro *target = (struct iro *)_target;
9265         u32 i, j, tmp;
9266
9267         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9268                 target[i].base = be32_to_cpu(source[j]);
9269                 j++;
9270                 tmp = be32_to_cpu(source[j]);
9271                 target[i].m1 = (tmp >> 16) & 0xffff;
9272                 target[i].m2 = tmp & 0xffff;
9273                 j++;
9274                 tmp = be32_to_cpu(source[j]);
9275                 target[i].m3 = (tmp >> 16) & 0xffff;
9276                 target[i].size = tmp & 0xffff;
9277                 j++;
9278         }
9279 }
9280
9281 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9282 {
9283         const __be16 *source = (const __be16 *)_source;
9284         u16 *target = (u16 *)_target;
9285         u32 i;
9286
9287         for (i = 0; i < n/2; i++)
9288                 target[i] = be16_to_cpu(source[i]);
9289 }
9290
9291 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
9292 do {                                                                    \
9293         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
9294         bp->arr = kmalloc(len, GFP_KERNEL);                             \
9295         if (!bp->arr) {                                                 \
9296                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9297                 goto lbl;                                               \
9298         }                                                               \
9299         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
9300              (u8 *)bp->arr, len);                                       \
9301 } while (0)
9302
9303 int bnx2x_init_firmware(struct bnx2x *bp)
9304 {
9305         const char *fw_file_name;
9306         struct bnx2x_fw_file_hdr *fw_hdr;
9307         int rc;
9308
9309         if (CHIP_IS_E1(bp))
9310                 fw_file_name = FW_FILE_NAME_E1;
9311         else if (CHIP_IS_E1H(bp))
9312                 fw_file_name = FW_FILE_NAME_E1H;
9313         else if (CHIP_IS_E2(bp))
9314                 fw_file_name = FW_FILE_NAME_E2;
9315         else {
9316                 BNX2X_ERR("Unsupported chip revision\n");
9317                 return -EINVAL;
9318         }
9319
9320         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9321
9322         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9323         if (rc) {
9324                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9325                 goto request_firmware_exit;
9326         }
9327
9328         rc = bnx2x_check_firmware(bp);
9329         if (rc) {
9330                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9331                 goto request_firmware_exit;
9332         }
9333
9334         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9335
9336         /* Initialize the pointers to the init arrays */
9337         /* Blob */
9338         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9339
9340         /* Opcodes */
9341         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9342
9343         /* Offsets */
9344         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9345                             be16_to_cpu_n);
9346
9347         /* STORMs firmware */
9348         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9349                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9350         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
9351                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9352         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9353                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9354         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
9355                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
9356         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9357                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9358         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
9359                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9360         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9361                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9362         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
9363                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
9364         /* IRO */
9365         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9366
9367         return 0;
9368
9369 iro_alloc_err:
9370         kfree(bp->init_ops_offsets);
9371 init_offsets_alloc_err:
9372         kfree(bp->init_ops);
9373 init_ops_alloc_err:
9374         kfree(bp->init_data);
9375 request_firmware_exit:
9376         release_firmware(bp->firmware);
9377
9378         return rc;
9379 }
9380
9381 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9382 {
9383         int cid_count = L2_FP_COUNT(l2_cid_count);
9384
9385 #ifdef BCM_CNIC
9386         cid_count += CNIC_CID_MAX;
9387 #endif
9388         return roundup(cid_count, QM_CID_ROUND);
9389 }
9390
9391 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9392                                     const struct pci_device_id *ent)
9393 {
9394         struct net_device *dev = NULL;
9395         struct bnx2x *bp;
9396         int pcie_width, pcie_speed;
9397         int rc, cid_count;
9398
9399         switch (ent->driver_data) {
9400         case BCM57710:
9401         case BCM57711:
9402         case BCM57711E:
9403                 cid_count = FP_SB_MAX_E1x;
9404                 break;
9405
9406         case BCM57712:
9407         case BCM57712E:
9408                 cid_count = FP_SB_MAX_E2;
9409                 break;
9410
9411         default:
9412                 pr_err("Unknown board_type (%ld), aborting\n",
9413                            ent->driver_data);
9414                 return -ENODEV;
9415         }
9416
9417         cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9418
9419         /* dev zeroed in init_etherdev */
9420         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9421         if (!dev) {
9422                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9423                 return -ENOMEM;
9424         }
9425
9426         bp = netdev_priv(dev);
9427         bp->msg_enable = debug;
9428
9429         pci_set_drvdata(pdev, dev);
9430
9431         bp->l2_cid_count = cid_count;
9432
9433         rc = bnx2x_init_dev(pdev, dev);
9434         if (rc < 0) {
9435                 free_netdev(dev);
9436                 return rc;
9437         }
9438
9439         rc = bnx2x_init_bp(bp);
9440         if (rc)
9441                 goto init_one_exit;
9442
9443         /* calc qm_cid_count */
9444         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9445
9446 #ifdef BCM_CNIC
9447         /* disable FCOE L2 queue for E1x*/
9448         if (CHIP_IS_E1x(bp))
9449                 bp->flags |= NO_FCOE_FLAG;
9450
9451 #endif
9452
9453         /* Configure interupt mode: try to enable MSI-X/MSI if
9454          * needed, set bp->num_queues appropriately.
9455          */
9456         bnx2x_set_int_mode(bp);
9457
9458         /* Add all NAPI objects */
9459         bnx2x_add_all_napi(bp);
9460
9461         rc = register_netdev(dev);
9462         if (rc) {
9463                 dev_err(&pdev->dev, "Cannot register net device\n");
9464                 goto init_one_exit;
9465         }
9466
9467 #ifdef BCM_CNIC
9468         if (!NO_FCOE(bp)) {
9469                 /* Add storage MAC address */
9470                 rtnl_lock();
9471                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9472                 rtnl_unlock();
9473         }
9474 #endif
9475
9476         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9477
9478         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9479                " IRQ %d, ", board_info[ent->driver_data].name,
9480                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9481                pcie_width,
9482                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9483                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9484                                                 "5GHz (Gen2)" : "2.5GHz",
9485                dev->base_addr, bp->pdev->irq);
9486         pr_cont("node addr %pM\n", dev->dev_addr);
9487
9488         return 0;
9489
9490 init_one_exit:
9491         if (bp->regview)
9492                 iounmap(bp->regview);
9493
9494         if (bp->doorbells)
9495                 iounmap(bp->doorbells);
9496
9497         free_netdev(dev);
9498
9499         if (atomic_read(&pdev->enable_cnt) == 1)
9500                 pci_release_regions(pdev);
9501
9502         pci_disable_device(pdev);
9503         pci_set_drvdata(pdev, NULL);
9504
9505         return rc;
9506 }
9507
9508 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9509 {
9510         struct net_device *dev = pci_get_drvdata(pdev);
9511         struct bnx2x *bp;
9512
9513         if (!dev) {
9514                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9515                 return;
9516         }
9517         bp = netdev_priv(dev);
9518
9519 #ifdef BCM_CNIC
9520         /* Delete storage MAC address */
9521         if (!NO_FCOE(bp)) {
9522                 rtnl_lock();
9523                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9524                 rtnl_unlock();
9525         }
9526 #endif
9527
9528         unregister_netdev(dev);
9529
9530         /* Delete all NAPI objects */
9531         bnx2x_del_all_napi(bp);
9532
9533         /* Power on: we can't let PCI layer write to us while we are in D3 */
9534         bnx2x_set_power_state(bp, PCI_D0);
9535
9536         /* Disable MSI/MSI-X */
9537         bnx2x_disable_msi(bp);
9538
9539         /* Power off */
9540         bnx2x_set_power_state(bp, PCI_D3hot);
9541
9542         /* Make sure RESET task is not scheduled before continuing */
9543         cancel_delayed_work_sync(&bp->reset_task);
9544
9545         if (bp->regview)
9546                 iounmap(bp->regview);
9547
9548         if (bp->doorbells)
9549                 iounmap(bp->doorbells);
9550
9551         bnx2x_free_mem_bp(bp);
9552
9553         free_netdev(dev);
9554
9555         if (atomic_read(&pdev->enable_cnt) == 1)
9556                 pci_release_regions(pdev);
9557
9558         pci_disable_device(pdev);
9559         pci_set_drvdata(pdev, NULL);
9560 }
9561
9562 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9563 {
9564         int i;
9565
9566         bp->state = BNX2X_STATE_ERROR;
9567
9568         bp->rx_mode = BNX2X_RX_MODE_NONE;
9569
9570         bnx2x_netif_stop(bp, 0);
9571         netif_carrier_off(bp->dev);
9572
9573         del_timer_sync(&bp->timer);
9574         bp->stats_state = STATS_STATE_DISABLED;
9575         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9576
9577         /* Release IRQs */
9578         bnx2x_free_irq(bp);
9579
9580         /* Free SKBs, SGEs, TPA pool and driver internals */
9581         bnx2x_free_skbs(bp);
9582
9583         for_each_rx_queue(bp, i)
9584                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9585
9586         bnx2x_free_mem(bp);
9587
9588         bp->state = BNX2X_STATE_CLOSED;
9589
9590         return 0;
9591 }
9592
9593 static void bnx2x_eeh_recover(struct bnx2x *bp)
9594 {
9595         u32 val;
9596
9597         mutex_init(&bp->port.phy_mutex);
9598
9599         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9600         bp->link_params.shmem_base = bp->common.shmem_base;
9601         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9602
9603         if (!bp->common.shmem_base ||
9604             (bp->common.shmem_base < 0xA0000) ||
9605             (bp->common.shmem_base >= 0xC0000)) {
9606                 BNX2X_DEV_INFO("MCP not active\n");
9607                 bp->flags |= NO_MCP_FLAG;
9608                 return;
9609         }
9610
9611         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9612         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9613                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9614                 BNX2X_ERR("BAD MCP validity signature\n");
9615
9616         if (!BP_NOMCP(bp)) {
9617                 bp->fw_seq =
9618                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9619                     DRV_MSG_SEQ_NUMBER_MASK);
9620                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9621         }
9622 }
9623
9624 /**
9625  * bnx2x_io_error_detected - called when PCI error is detected
9626  * @pdev: Pointer to PCI device
9627  * @state: The current pci connection state
9628  *
9629  * This function is called after a PCI bus error affecting
9630  * this device has been detected.
9631  */
9632 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9633                                                 pci_channel_state_t state)
9634 {
9635         struct net_device *dev = pci_get_drvdata(pdev);
9636         struct bnx2x *bp = netdev_priv(dev);
9637
9638         rtnl_lock();
9639
9640         netif_device_detach(dev);
9641
9642         if (state == pci_channel_io_perm_failure) {
9643                 rtnl_unlock();
9644                 return PCI_ERS_RESULT_DISCONNECT;
9645         }
9646
9647         if (netif_running(dev))
9648                 bnx2x_eeh_nic_unload(bp);
9649
9650         pci_disable_device(pdev);
9651
9652         rtnl_unlock();
9653
9654         /* Request a slot reset */
9655         return PCI_ERS_RESULT_NEED_RESET;
9656 }
9657
9658 /**
9659  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9660  * @pdev: Pointer to PCI device
9661  *
9662  * Restart the card from scratch, as if from a cold-boot.
9663  */
9664 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9665 {
9666         struct net_device *dev = pci_get_drvdata(pdev);
9667         struct bnx2x *bp = netdev_priv(dev);
9668
9669         rtnl_lock();
9670
9671         if (pci_enable_device(pdev)) {
9672                 dev_err(&pdev->dev,
9673                         "Cannot re-enable PCI device after reset\n");
9674                 rtnl_unlock();
9675                 return PCI_ERS_RESULT_DISCONNECT;
9676         }
9677
9678         pci_set_master(pdev);
9679         pci_restore_state(pdev);
9680
9681         if (netif_running(dev))
9682                 bnx2x_set_power_state(bp, PCI_D0);
9683
9684         rtnl_unlock();
9685
9686         return PCI_ERS_RESULT_RECOVERED;
9687 }
9688
9689 /**
9690  * bnx2x_io_resume - called when traffic can start flowing again
9691  * @pdev: Pointer to PCI device
9692  *
9693  * This callback is called when the error recovery driver tells us that
9694  * its OK to resume normal operation.
9695  */
9696 static void bnx2x_io_resume(struct pci_dev *pdev)
9697 {
9698         struct net_device *dev = pci_get_drvdata(pdev);
9699         struct bnx2x *bp = netdev_priv(dev);
9700
9701         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9702                 printk(KERN_ERR "Handling parity error recovery. "
9703                                 "Try again later\n");
9704                 return;
9705         }
9706
9707         rtnl_lock();
9708
9709         bnx2x_eeh_recover(bp);
9710
9711         if (netif_running(dev))
9712                 bnx2x_nic_load(bp, LOAD_NORMAL);
9713
9714         netif_device_attach(dev);
9715
9716         rtnl_unlock();
9717 }
9718
9719 static struct pci_error_handlers bnx2x_err_handler = {
9720         .error_detected = bnx2x_io_error_detected,
9721         .slot_reset     = bnx2x_io_slot_reset,
9722         .resume         = bnx2x_io_resume,
9723 };
9724
9725 static struct pci_driver bnx2x_pci_driver = {
9726         .name        = DRV_MODULE_NAME,
9727         .id_table    = bnx2x_pci_tbl,
9728         .probe       = bnx2x_init_one,
9729         .remove      = __devexit_p(bnx2x_remove_one),
9730         .suspend     = bnx2x_suspend,
9731         .resume      = bnx2x_resume,
9732         .err_handler = &bnx2x_err_handler,
9733 };
9734
9735 static int __init bnx2x_init(void)
9736 {
9737         int ret;
9738
9739         pr_info("%s", version);
9740
9741         bnx2x_wq = create_singlethread_workqueue("bnx2x");
9742         if (bnx2x_wq == NULL) {
9743                 pr_err("Cannot create workqueue\n");
9744                 return -ENOMEM;
9745         }
9746
9747         ret = pci_register_driver(&bnx2x_pci_driver);
9748         if (ret) {
9749                 pr_err("Cannot register driver\n");
9750                 destroy_workqueue(bnx2x_wq);
9751         }
9752         return ret;
9753 }
9754
9755 static void __exit bnx2x_cleanup(void)
9756 {
9757         pci_unregister_driver(&bnx2x_pci_driver);
9758
9759         destroy_workqueue(bnx2x_wq);
9760 }
9761
9762 module_init(bnx2x_init);
9763 module_exit(bnx2x_cleanup);
9764
9765 #ifdef BCM_CNIC
9766
9767 /* count denotes the number of new completions we have seen */
9768 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9769 {
9770         struct eth_spe *spe;
9771
9772 #ifdef BNX2X_STOP_ON_ERROR
9773         if (unlikely(bp->panic))
9774                 return;
9775 #endif
9776
9777         spin_lock_bh(&bp->spq_lock);
9778         BUG_ON(bp->cnic_spq_pending < count);
9779         bp->cnic_spq_pending -= count;
9780
9781
9782         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9783                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9784                                 & SPE_HDR_CONN_TYPE) >>
9785                                 SPE_HDR_CONN_TYPE_SHIFT;
9786
9787                 /* Set validation for iSCSI L2 client before sending SETUP
9788                  *  ramrod
9789                  */
9790                 if (type == ETH_CONNECTION_TYPE) {
9791                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9792                                              hdr.conn_and_cmd_data) >>
9793                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9794
9795                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9796                                 bnx2x_set_ctx_validation(&bp->context.
9797                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9798                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9799                 }
9800
9801                 /* There may be not more than 8 L2 and COMMON SPEs and not more
9802                  * than 8 L5 SPEs in the air.
9803                  */
9804                 if ((type == NONE_CONNECTION_TYPE) ||
9805                     (type == ETH_CONNECTION_TYPE)) {
9806                         if (!atomic_read(&bp->spq_left))
9807                                 break;
9808                         else
9809                                 atomic_dec(&bp->spq_left);
9810                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9811                            (type == FCOE_CONNECTION_TYPE)) {
9812                         if (bp->cnic_spq_pending >=
9813                             bp->cnic_eth_dev.max_kwqe_pending)
9814                                 break;
9815                         else
9816                                 bp->cnic_spq_pending++;
9817                 } else {
9818                         BNX2X_ERR("Unknown SPE type: %d\n", type);
9819                         bnx2x_panic();
9820                         break;
9821                 }
9822
9823                 spe = bnx2x_sp_get_next(bp);
9824                 *spe = *bp->cnic_kwq_cons;
9825
9826                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9827                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9828
9829                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9830                         bp->cnic_kwq_cons = bp->cnic_kwq;
9831                 else
9832                         bp->cnic_kwq_cons++;
9833         }
9834         bnx2x_sp_prod_update(bp);
9835         spin_unlock_bh(&bp->spq_lock);
9836 }
9837
9838 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9839                                struct kwqe_16 *kwqes[], u32 count)
9840 {
9841         struct bnx2x *bp = netdev_priv(dev);
9842         int i;
9843
9844 #ifdef BNX2X_STOP_ON_ERROR
9845         if (unlikely(bp->panic))
9846                 return -EIO;
9847 #endif
9848
9849         spin_lock_bh(&bp->spq_lock);
9850
9851         for (i = 0; i < count; i++) {
9852                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9853
9854                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9855                         break;
9856
9857                 *bp->cnic_kwq_prod = *spe;
9858
9859                 bp->cnic_kwq_pending++;
9860
9861                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9862                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
9863                    spe->data.update_data_addr.hi,
9864                    spe->data.update_data_addr.lo,
9865                    bp->cnic_kwq_pending);
9866
9867                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9868                         bp->cnic_kwq_prod = bp->cnic_kwq;
9869                 else
9870                         bp->cnic_kwq_prod++;
9871         }
9872
9873         spin_unlock_bh(&bp->spq_lock);
9874
9875         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9876                 bnx2x_cnic_sp_post(bp, 0);
9877
9878         return i;
9879 }
9880
9881 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9882 {
9883         struct cnic_ops *c_ops;
9884         int rc = 0;
9885
9886         mutex_lock(&bp->cnic_mutex);
9887         c_ops = bp->cnic_ops;
9888         if (c_ops)
9889                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9890         mutex_unlock(&bp->cnic_mutex);
9891
9892         return rc;
9893 }
9894
9895 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9896 {
9897         struct cnic_ops *c_ops;
9898         int rc = 0;
9899
9900         rcu_read_lock();
9901         c_ops = rcu_dereference(bp->cnic_ops);
9902         if (c_ops)
9903                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9904         rcu_read_unlock();
9905
9906         return rc;
9907 }
9908
9909 /*
9910  * for commands that have no data
9911  */
9912 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9913 {
9914         struct cnic_ctl_info ctl = {0};
9915
9916         ctl.cmd = cmd;
9917
9918         return bnx2x_cnic_ctl_send(bp, &ctl);
9919 }
9920
9921 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9922 {
9923         struct cnic_ctl_info ctl;
9924
9925         /* first we tell CNIC and only then we count this as a completion */
9926         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9927         ctl.data.comp.cid = cid;
9928
9929         bnx2x_cnic_ctl_send_bh(bp, &ctl);
9930         bnx2x_cnic_sp_post(bp, 0);
9931 }
9932
9933 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9934 {
9935         struct bnx2x *bp = netdev_priv(dev);
9936         int rc = 0;
9937
9938         switch (ctl->cmd) {
9939         case DRV_CTL_CTXTBL_WR_CMD: {
9940                 u32 index = ctl->data.io.offset;
9941                 dma_addr_t addr = ctl->data.io.dma_addr;
9942
9943                 bnx2x_ilt_wr(bp, index, addr);
9944                 break;
9945         }
9946
9947         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9948                 int count = ctl->data.credit.credit_count;
9949
9950                 bnx2x_cnic_sp_post(bp, count);
9951                 break;
9952         }
9953
9954         /* rtnl_lock is held.  */
9955         case DRV_CTL_START_L2_CMD: {
9956                 u32 cli = ctl->data.ring.client_id;
9957
9958                 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9959                 bnx2x_del_fcoe_eth_macs(bp);
9960
9961                 /* Set iSCSI MAC address */
9962                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9963
9964                 mmiowb();
9965                 barrier();
9966
9967                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9968                  * because it's the only way for UIO Client to accept
9969                  * multicasts (in non-promiscuous mode only one Client per
9970                  * function will receive multicast packets (leading in our
9971                  * case).
9972                  */
9973                 bnx2x_rxq_set_mac_filters(bp, cli,
9974                         BNX2X_ACCEPT_UNICAST |
9975                         BNX2X_ACCEPT_BROADCAST |
9976                         BNX2X_ACCEPT_ALL_MULTICAST);
9977                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9978
9979                 break;
9980         }
9981
9982         /* rtnl_lock is held.  */
9983         case DRV_CTL_STOP_L2_CMD: {
9984                 u32 cli = ctl->data.ring.client_id;
9985
9986                 /* Stop accepting on iSCSI L2 ring */
9987                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9988                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9989
9990                 mmiowb();
9991                 barrier();
9992
9993                 /* Unset iSCSI L2 MAC */
9994                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9995                 break;
9996         }
9997         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9998                 int count = ctl->data.credit.credit_count;
9999
10000                 smp_mb__before_atomic_inc();
10001                 atomic_add(count, &bp->spq_left);
10002                 smp_mb__after_atomic_inc();
10003                 break;
10004         }
10005
10006         default:
10007                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10008                 rc = -EINVAL;
10009         }
10010
10011         return rc;
10012 }
10013
10014 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
10015 {
10016         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10017
10018         if (bp->flags & USING_MSIX_FLAG) {
10019                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10020                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10021                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10022         } else {
10023                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10024                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10025         }
10026         if (CHIP_IS_E2(bp))
10027                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10028         else
10029                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10030
10031         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10032         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10033         cp->irq_arr[1].status_blk = bp->def_status_blk;
10034         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10035         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10036
10037         cp->num_irq = 2;
10038 }
10039
10040 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10041                                void *data)
10042 {
10043         struct bnx2x *bp = netdev_priv(dev);
10044         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10045
10046         if (ops == NULL)
10047                 return -EINVAL;
10048
10049         if (atomic_read(&bp->intr_sem) != 0)
10050                 return -EBUSY;
10051
10052         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10053         if (!bp->cnic_kwq)
10054                 return -ENOMEM;
10055
10056         bp->cnic_kwq_cons = bp->cnic_kwq;
10057         bp->cnic_kwq_prod = bp->cnic_kwq;
10058         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10059
10060         bp->cnic_spq_pending = 0;
10061         bp->cnic_kwq_pending = 0;
10062
10063         bp->cnic_data = data;
10064
10065         cp->num_irq = 0;
10066         cp->drv_state = CNIC_DRV_STATE_REGD;
10067         cp->iro_arr = bp->iro_arr;
10068
10069         bnx2x_setup_cnic_irq_info(bp);
10070
10071         rcu_assign_pointer(bp->cnic_ops, ops);
10072
10073         return 0;
10074 }
10075
10076 static int bnx2x_unregister_cnic(struct net_device *dev)
10077 {
10078         struct bnx2x *bp = netdev_priv(dev);
10079         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10080
10081         mutex_lock(&bp->cnic_mutex);
10082         cp->drv_state = 0;
10083         rcu_assign_pointer(bp->cnic_ops, NULL);
10084         mutex_unlock(&bp->cnic_mutex);
10085         synchronize_rcu();
10086         kfree(bp->cnic_kwq);
10087         bp->cnic_kwq = NULL;
10088
10089         return 0;
10090 }
10091
10092 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10093 {
10094         struct bnx2x *bp = netdev_priv(dev);
10095         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10096
10097         cp->drv_owner = THIS_MODULE;
10098         cp->chip_id = CHIP_ID(bp);
10099         cp->pdev = bp->pdev;
10100         cp->io_base = bp->regview;
10101         cp->io_base2 = bp->doorbells;
10102         cp->max_kwqe_pending = 8;
10103         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10104         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10105                              bnx2x_cid_ilt_lines(bp);
10106         cp->ctx_tbl_len = CNIC_ILT_LINES;
10107         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10108         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10109         cp->drv_ctl = bnx2x_drv_ctl;
10110         cp->drv_register_cnic = bnx2x_register_cnic;
10111         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10112         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10113         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10114                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10115         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10116
10117         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10118                          "starting cid %d\n",
10119            cp->ctx_blk_size,
10120            cp->ctx_tbl_offset,
10121            cp->ctx_tbl_len,
10122            cp->starting_cid);
10123         return cp;
10124 }
10125 EXPORT_SYMBOL(bnx2x_cnic_probe);
10126
10127 #endif /* BCM_CNIC */
10128