]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2x/bnx2x_main.c
Merge branches 'aaci', 'mmci-dma', 'pl' and 'pl011' into drivers
[karo-tx-linux.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58 #include "bnx2x_dcb.h"
59
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION                                 \
64         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
67         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81                    "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91                              "(0 Disable; 1 Enable (default))");
92
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96                                 " (default is as a number of CPUs)");
97
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105                                 "(1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static struct workqueue_struct *bnx2x_wq;
124
125 #ifdef BCM_CNIC
126 static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127 #endif
128
129 enum bnx2x_board_type {
130         BCM57710 = 0,
131         BCM57711 = 1,
132         BCM57711E = 2,
133         BCM57712 = 3,
134         BCM57712E = 4
135 };
136
137 /* indexed by board_type, above */
138 static struct {
139         char *name;
140 } board_info[] __devinitdata = {
141         { "Broadcom NetXtreme II BCM57710 XGb" },
142         { "Broadcom NetXtreme II BCM57711 XGb" },
143         { "Broadcom NetXtreme II BCM57711E XGb" },
144         { "Broadcom NetXtreme II BCM57712 XGb" },
145         { "Broadcom NetXtreme II BCM57712E XGb" }
146 };
147
148 #ifndef PCI_DEVICE_ID_NX2_57712
149 #define PCI_DEVICE_ID_NX2_57712         0x1662
150 #endif
151 #ifndef PCI_DEVICE_ID_NX2_57712E
152 #define PCI_DEVICE_ID_NX2_57712E        0x1663
153 #endif
154
155 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
159         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
161         { 0 }
162 };
163
164 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166 /****************************************************************************
167 * General service functions
168 ****************************************************************************/
169
170 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171                                        u32 addr, dma_addr_t mapping)
172 {
173         REG_WR(bp,  addr, U64_LO(mapping));
174         REG_WR(bp,  addr + 4, U64_HI(mapping));
175 }
176
177 static inline void __storm_memset_fill(struct bnx2x *bp,
178                                        u32 addr, size_t size, u32 val)
179 {
180         int i;
181         for (i = 0; i < size/4; i++)
182                 REG_WR(bp,  addr + (i * 4), val);
183 }
184
185 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186                                             u8 port, u16 stat_id)
187 {
188         size_t size = sizeof(struct ustorm_per_client_stats);
189
190         u32 addr = BAR_USTRORM_INTMEM +
191                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193         __storm_memset_fill(bp, addr, size, 0);
194 }
195
196 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197                                             u8 port, u16 stat_id)
198 {
199         size_t size = sizeof(struct tstorm_per_client_stats);
200
201         u32 addr = BAR_TSTRORM_INTMEM +
202                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204         __storm_memset_fill(bp, addr, size, 0);
205 }
206
207 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208                                             u8 port, u16 stat_id)
209 {
210         size_t size = sizeof(struct xstorm_per_client_stats);
211
212         u32 addr = BAR_XSTRORM_INTMEM +
213                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215         __storm_memset_fill(bp, addr, size, 0);
216 }
217
218
219 static inline void storm_memset_spq_addr(struct bnx2x *bp,
220                                          dma_addr_t mapping, u16 abs_fid)
221 {
222         u32 addr = XSEM_REG_FAST_MEMORY +
223                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225         __storm_memset_dma_mapping(bp, addr, mapping);
226 }
227
228 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229 {
230         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231 }
232
233 static inline void storm_memset_func_cfg(struct bnx2x *bp,
234                                 struct tstorm_eth_function_common_config *tcfg,
235                                 u16 abs_fid)
236 {
237         size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239         u32 addr = BAR_TSTRORM_INTMEM +
240                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243 }
244
245 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246                                 struct stats_indication_flags *flags,
247                                 u16 abs_fid)
248 {
249         size_t size = sizeof(struct stats_indication_flags);
250
251         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253         __storm_memset_struct(bp, addr, size, (u32 *)flags);
254 }
255
256 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257                                 struct stats_indication_flags *flags,
258                                 u16 abs_fid)
259 {
260         size_t size = sizeof(struct stats_indication_flags);
261
262         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264         __storm_memset_struct(bp, addr, size, (u32 *)flags);
265 }
266
267 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268                                 struct stats_indication_flags *flags,
269                                 u16 abs_fid)
270 {
271         size_t size = sizeof(struct stats_indication_flags);
272
273         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275         __storm_memset_struct(bp, addr, size, (u32 *)flags);
276 }
277
278 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279                                 struct stats_indication_flags *flags,
280                                 u16 abs_fid)
281 {
282         size_t size = sizeof(struct stats_indication_flags);
283
284         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286         __storm_memset_struct(bp, addr, size, (u32 *)flags);
287 }
288
289 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290                                            dma_addr_t mapping, u16 abs_fid)
291 {
292         u32 addr = BAR_XSTRORM_INTMEM +
293                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295         __storm_memset_dma_mapping(bp, addr, mapping);
296 }
297
298 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299                                            dma_addr_t mapping, u16 abs_fid)
300 {
301         u32 addr = BAR_TSTRORM_INTMEM +
302                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304         __storm_memset_dma_mapping(bp, addr, mapping);
305 }
306
307 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308                                            dma_addr_t mapping, u16 abs_fid)
309 {
310         u32 addr = BAR_USTRORM_INTMEM +
311                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313         __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317                                            dma_addr_t mapping, u16 abs_fid)
318 {
319         u32 addr = BAR_CSTRORM_INTMEM +
320                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322         __storm_memset_dma_mapping(bp, addr, mapping);
323 }
324
325 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326                                          u16 pf_id)
327 {
328         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331                 pf_id);
332         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333                 pf_id);
334         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335                 pf_id);
336 }
337
338 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339                                         u8 enable)
340 {
341         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344                 enable);
345         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346                 enable);
347         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348                 enable);
349 }
350
351 static inline void storm_memset_eq_data(struct bnx2x *bp,
352                                 struct event_ring_data *eq_data,
353                                 u16 pfid)
354 {
355         size_t size = sizeof(struct event_ring_data);
356
357         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360 }
361
362 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363                                         u16 pfid)
364 {
365         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366         REG_WR16(bp, addr, eq_prod);
367 }
368
369 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370                                              u16 fw_sb_id, u8 sb_index,
371                                              u8 ticks)
372 {
373
374         int index_offset = CHIP_IS_E2(bp) ?
375                 offsetof(struct hc_status_block_data_e2, index_data) :
376                 offsetof(struct hc_status_block_data_e1x, index_data);
377         u32 addr = BAR_CSTRORM_INTMEM +
378                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379                         index_offset +
380                         sizeof(struct hc_index_data)*sb_index +
381                         offsetof(struct hc_index_data, timeout);
382         REG_WR8(bp, addr, ticks);
383         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384                           port, fw_sb_id, sb_index, ticks);
385 }
386 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387                                              u16 fw_sb_id, u8 sb_index,
388                                              u8 disable)
389 {
390         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
391         int index_offset = CHIP_IS_E2(bp) ?
392                 offsetof(struct hc_status_block_data_e2, index_data) :
393                 offsetof(struct hc_status_block_data_e1x, index_data);
394         u32 addr = BAR_CSTRORM_INTMEM +
395                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396                         index_offset +
397                         sizeof(struct hc_index_data)*sb_index +
398                         offsetof(struct hc_index_data, flags);
399         u16 flags = REG_RD16(bp, addr);
400         /* clear and set */
401         flags &= ~HC_INDEX_DATA_HC_ENABLED;
402         flags |= enable_flag;
403         REG_WR16(bp, addr, flags);
404         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405                           port, fw_sb_id, sb_index, disable);
406 }
407
408 /* used only at init
409  * locking is done by mcp
410  */
411 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
412 {
413         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416                                PCICFG_VENDOR_ID_OFFSET);
417 }
418
419 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420 {
421         u32 val;
422
423         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426                                PCICFG_VENDOR_ID_OFFSET);
427
428         return val;
429 }
430
431 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
432 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
433 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
434 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
435 #define DMAE_DP_DST_NONE        "dst_addr [none]"
436
437 static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438                           int msglvl)
439 {
440         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442         switch (dmae->opcode & DMAE_COMMAND_DST) {
443         case DMAE_CMD_DST_PCI:
444                 if (src_type == DMAE_CMD_SRC_PCI)
445                         DP(msglvl, "DMAE: opcode 0x%08x\n"
446                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
448                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450                            dmae->comp_addr_hi, dmae->comp_addr_lo,
451                            dmae->comp_val);
452                 else
453                         DP(msglvl, "DMAE: opcode 0x%08x\n"
454                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
455                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
456                            dmae->opcode, dmae->src_addr_lo >> 2,
457                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458                            dmae->comp_addr_hi, dmae->comp_addr_lo,
459                            dmae->comp_val);
460                 break;
461         case DMAE_CMD_DST_GRC:
462                 if (src_type == DMAE_CMD_SRC_PCI)
463                         DP(msglvl, "DMAE: opcode 0x%08x\n"
464                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
466                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467                            dmae->len, dmae->dst_addr_lo >> 2,
468                            dmae->comp_addr_hi, dmae->comp_addr_lo,
469                            dmae->comp_val);
470                 else
471                         DP(msglvl, "DMAE: opcode 0x%08x\n"
472                            "src [%08x], len [%d*4], dst [%08x]\n"
473                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
474                            dmae->opcode, dmae->src_addr_lo >> 2,
475                            dmae->len, dmae->dst_addr_lo >> 2,
476                            dmae->comp_addr_hi, dmae->comp_addr_lo,
477                            dmae->comp_val);
478                 break;
479         default:
480                 if (src_type == DMAE_CMD_SRC_PCI)
481                         DP(msglvl, "DMAE: opcode 0x%08x\n"
482                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
483                                     "dst_addr [none]\n"
484                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
485                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487                            dmae->comp_val);
488                 else
489                         DP(msglvl, "DMAE: opcode 0x%08x\n"
490                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
491                                     "dst_addr [none]\n"
492                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
493                            dmae->opcode, dmae->src_addr_lo >> 2,
494                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495                            dmae->comp_val);
496                 break;
497         }
498
499 }
500
501 const u32 dmae_reg_go_c[] = {
502         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506 };
507
508 /* copy command into DMAE command memory and set DMAE command go */
509 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
510 {
511         u32 cmd_offset;
512         int i;
513
514         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
518                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
520         }
521         REG_WR(bp, dmae_reg_go_c[idx], 1);
522 }
523
524 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525 {
526         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527                            DMAE_CMD_C_ENABLE);
528 }
529
530 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531 {
532         return opcode & ~DMAE_CMD_SRC_RESET;
533 }
534
535 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536                              bool with_comp, u8 comp_type)
537 {
538         u32 opcode = 0;
539
540         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541                    (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550 #ifdef __BIG_ENDIAN
551         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552 #else
553         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554 #endif
555         if (with_comp)
556                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557         return opcode;
558 }
559
560 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561                                       struct dmae_command *dmae,
562                                       u8 src_type, u8 dst_type)
563 {
564         memset(dmae, 0, sizeof(struct dmae_command));
565
566         /* set the opcode */
567         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568                                          true, DMAE_COMP_PCI);
569
570         /* fill in the completion parameters */
571         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573         dmae->comp_val = DMAE_COMP_VAL;
574 }
575
576 /* issue a dmae command over the init-channel and wailt for completion */
577 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578                                       struct dmae_command *dmae)
579 {
580         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582         int rc = 0;
583
584         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588         /* lock the dmae channel */
589         mutex_lock(&bp->dmae_mutex);
590
591         /* reset completion */
592         *wb_comp = 0;
593
594         /* post the command on the channel used for initializations */
595         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597         /* wait for completion */
598         udelay(5);
599         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602                 if (!cnt) {
603                         BNX2X_ERR("DMAE timeout!\n");
604                         rc = DMAE_TIMEOUT;
605                         goto unlock;
606                 }
607                 cnt--;
608                 udelay(50);
609         }
610         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611                 BNX2X_ERR("DMAE PCI error!\n");
612                 rc = DMAE_PCI_ERROR;
613         }
614
615         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619 unlock:
620         mutex_unlock(&bp->dmae_mutex);
621         return rc;
622 }
623
624 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625                       u32 len32)
626 {
627         struct dmae_command dmae;
628
629         if (!bp->dmae_ready) {
630                 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
633                    "  using indirect\n", dst_addr, len32);
634                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635                 return;
636         }
637
638         /* set opcode and fixed command fields */
639         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641         /* fill in addresses and len */
642         dmae.src_addr_lo = U64_LO(dma_addr);
643         dmae.src_addr_hi = U64_HI(dma_addr);
644         dmae.dst_addr_lo = dst_addr >> 2;
645         dmae.dst_addr_hi = 0;
646         dmae.len = len32;
647
648         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650         /* issue the command and wait for completion */
651         bnx2x_issue_dmae_with_comp(bp, &dmae);
652 }
653
654 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
655 {
656         struct dmae_command dmae;
657
658         if (!bp->dmae_ready) {
659                 u32 *data = bnx2x_sp(bp, wb_data[0]);
660                 int i;
661
662                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
663                    "  using indirect\n", src_addr, len32);
664                 for (i = 0; i < len32; i++)
665                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666                 return;
667         }
668
669         /* set opcode and fixed command fields */
670         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
671
672         /* fill in addresses and len */
673         dmae.src_addr_lo = src_addr >> 2;
674         dmae.src_addr_hi = 0;
675         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677         dmae.len = len32;
678
679         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
680
681         /* issue the command and wait for completion */
682         bnx2x_issue_dmae_with_comp(bp, &dmae);
683 }
684
685 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686                                       u32 addr, u32 len)
687 {
688         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
689         int offset = 0;
690
691         while (len > dmae_wr_max) {
692                 bnx2x_write_dmae(bp, phys_addr + offset,
693                                  addr + offset, dmae_wr_max);
694                 offset += dmae_wr_max * 4;
695                 len -= dmae_wr_max;
696         }
697
698         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699 }
700
701 /* used only for slowpath so not inlined */
702 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703 {
704         u32 wb_write[2];
705
706         wb_write[0] = val_hi;
707         wb_write[1] = val_lo;
708         REG_WR_DMAE(bp, reg, wb_write, 2);
709 }
710
711 #ifdef USE_WB_RD
712 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713 {
714         u32 wb_data[2];
715
716         REG_RD_DMAE(bp, reg, wb_data, 2);
717
718         return HILO_U64(wb_data[0], wb_data[1]);
719 }
720 #endif
721
722 static int bnx2x_mc_assert(struct bnx2x *bp)
723 {
724         char last_idx;
725         int i, rc = 0;
726         u32 row0, row1, row2, row3;
727
728         /* XSTORM */
729         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
731         if (last_idx)
732                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734         /* print the asserts */
735         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738                               XSTORM_ASSERT_LIST_OFFSET(i));
739                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748                                   " 0x%08x 0x%08x 0x%08x\n",
749                                   i, row3, row2, row1, row0);
750                         rc++;
751                 } else {
752                         break;
753                 }
754         }
755
756         /* TSTORM */
757         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
759         if (last_idx)
760                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762         /* print the asserts */
763         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766                               TSTORM_ASSERT_LIST_OFFSET(i));
767                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776                                   " 0x%08x 0x%08x 0x%08x\n",
777                                   i, row3, row2, row1, row0);
778                         rc++;
779                 } else {
780                         break;
781                 }
782         }
783
784         /* CSTORM */
785         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
787         if (last_idx)
788                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790         /* print the asserts */
791         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794                               CSTORM_ASSERT_LIST_OFFSET(i));
795                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804                                   " 0x%08x 0x%08x 0x%08x\n",
805                                   i, row3, row2, row1, row0);
806                         rc++;
807                 } else {
808                         break;
809                 }
810         }
811
812         /* USTORM */
813         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814                            USTORM_ASSERT_LIST_INDEX_OFFSET);
815         if (last_idx)
816                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818         /* print the asserts */
819         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822                               USTORM_ASSERT_LIST_OFFSET(i));
823                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
825                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
827                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832                                   " 0x%08x 0x%08x 0x%08x\n",
833                                   i, row3, row2, row1, row0);
834                         rc++;
835                 } else {
836                         break;
837                 }
838         }
839
840         return rc;
841 }
842
843 static void bnx2x_fw_dump(struct bnx2x *bp)
844 {
845         u32 addr;
846         u32 mark, offset;
847         __be32 data[9];
848         int word;
849         u32 trace_shmem_base;
850         if (BP_NOMCP(bp)) {
851                 BNX2X_ERR("NO MCP - can not dump\n");
852                 return;
853         }
854
855         if (BP_PATH(bp) == 0)
856                 trace_shmem_base = bp->common.shmem_base;
857         else
858                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859         addr = trace_shmem_base - 0x0800 + 4;
860         mark = REG_RD(bp, addr);
861         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862                         + ((mark + 0x3) & ~0x3) - 0x08000000;
863         pr_err("begin fw dump (mark 0x%x)\n", mark);
864
865         pr_err("");
866         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
867                 for (word = 0; word < 8; word++)
868                         data[word] = htonl(REG_RD(bp, offset + 4*word));
869                 data[8] = 0x0;
870                 pr_cont("%s", (char *)data);
871         }
872         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
873                 for (word = 0; word < 8; word++)
874                         data[word] = htonl(REG_RD(bp, offset + 4*word));
875                 data[8] = 0x0;
876                 pr_cont("%s", (char *)data);
877         }
878         pr_err("end of fw dump\n");
879 }
880
881 void bnx2x_panic_dump(struct bnx2x *bp)
882 {
883         int i;
884         u16 j;
885         struct hc_sp_status_block_data sp_sb_data;
886         int func = BP_FUNC(bp);
887 #ifdef BNX2X_STOP_ON_ERROR
888         u16 start = 0, end = 0;
889 #endif
890
891         bp->stats_state = STATS_STATE_DISABLED;
892         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
894         BNX2X_ERR("begin crash dump -----------------\n");
895
896         /* Indices */
897         /* Common */
898         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
899                   "  spq_prod_idx(0x%x)\n",
900                   bp->def_idx, bp->def_att_idx,
901                   bp->attn_state, bp->spq_prod_idx);
902         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
903                   bp->def_status_blk->atten_status_block.attn_bits,
904                   bp->def_status_blk->atten_status_block.attn_bits_ack,
905                   bp->def_status_blk->atten_status_block.status_block_id,
906                   bp->def_status_blk->atten_status_block.attn_bits_index);
907         BNX2X_ERR("     def (");
908         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909                 pr_cont("0x%x%s",
910                        bp->def_status_blk->sp_sb.index_values[i],
911                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
912
913         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916                         i*sizeof(u32));
917
918         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
919                          "pf_id(0x%x)  vnic_id(0x%x)  "
920                          "vf_id(0x%x)  vf_valid (0x%x)\n",
921                sp_sb_data.igu_sb_id,
922                sp_sb_data.igu_seg_id,
923                sp_sb_data.p_func.pf_id,
924                sp_sb_data.p_func.vnic_id,
925                sp_sb_data.p_func.vf_id,
926                sp_sb_data.p_func.vf_valid);
927
928
929         for_each_eth_queue(bp, i) {
930                 struct bnx2x_fastpath *fp = &bp->fp[i];
931                 int loop;
932                 struct hc_status_block_data_e2 sb_data_e2;
933                 struct hc_status_block_data_e1x sb_data_e1x;
934                 struct hc_status_block_sm  *hc_sm_p =
935                         CHIP_IS_E2(bp) ?
936                         sb_data_e2.common.state_machine :
937                         sb_data_e1x.common.state_machine;
938                 struct hc_index_data *hc_index_p =
939                         CHIP_IS_E2(bp) ?
940                         sb_data_e2.index_data :
941                         sb_data_e1x.index_data;
942                 int data_size;
943                 u32 *sb_data_p;
944
945                 /* Rx */
946                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
947                           "  rx_comp_prod(0x%x)"
948                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
949                           i, fp->rx_bd_prod, fp->rx_bd_cons,
950                           fp->rx_comp_prod,
951                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
952                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
953                           "  fp_hc_idx(0x%x)\n",
954                           fp->rx_sge_prod, fp->last_max_sge,
955                           le16_to_cpu(fp->fp_hc_idx));
956
957                 /* Tx */
958                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
959                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
960                           "  *tx_cons_sb(0x%x)\n",
961                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
963
964                 loop = CHIP_IS_E2(bp) ?
965                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
966
967                 /* host sb data */
968
969 #ifdef BCM_CNIC
970                 if (IS_FCOE_FP(fp))
971                         continue;
972 #endif
973                 BNX2X_ERR("     run indexes (");
974                 for (j = 0; j < HC_SB_MAX_SM; j++)
975                         pr_cont("0x%x%s",
976                                fp->sb_running_index[j],
977                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979                 BNX2X_ERR("     indexes (");
980                 for (j = 0; j < loop; j++)
981                         pr_cont("0x%x%s",
982                                fp->sb_index_values[j],
983                                (j == loop - 1) ? ")" : " ");
984                 /* fw sb data */
985                 data_size = CHIP_IS_E2(bp) ?
986                         sizeof(struct hc_status_block_data_e2) :
987                         sizeof(struct hc_status_block_data_e1x);
988                 data_size /= sizeof(u32);
989                 sb_data_p = CHIP_IS_E2(bp) ?
990                         (u32 *)&sb_data_e2 :
991                         (u32 *)&sb_data_e1x;
992                 /* copy sb data in here */
993                 for (j = 0; j < data_size; j++)
994                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996                                 j * sizeof(u32));
997
998                 if (CHIP_IS_E2(bp)) {
999                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1000                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1001                                 sb_data_e2.common.p_func.pf_id,
1002                                 sb_data_e2.common.p_func.vf_id,
1003                                 sb_data_e2.common.p_func.vf_valid,
1004                                 sb_data_e2.common.p_func.vnic_id,
1005                                 sb_data_e2.common.same_igu_sb_1b);
1006                 } else {
1007                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1008                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1009                                 sb_data_e1x.common.p_func.pf_id,
1010                                 sb_data_e1x.common.p_func.vf_id,
1011                                 sb_data_e1x.common.p_func.vf_valid,
1012                                 sb_data_e1x.common.p_func.vnic_id,
1013                                 sb_data_e1x.common.same_igu_sb_1b);
1014                 }
1015
1016                 /* SB_SMs data */
1017                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018                         pr_cont("SM[%d] __flags (0x%x) "
1019                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1020                                "time_to_expire (0x%x) "
1021                                "timer_value(0x%x)\n", j,
1022                                hc_sm_p[j].__flags,
1023                                hc_sm_p[j].igu_sb_id,
1024                                hc_sm_p[j].igu_seg_id,
1025                                hc_sm_p[j].time_to_expire,
1026                                hc_sm_p[j].timer_value);
1027                 }
1028
1029                 /* Indecies data */
1030                 for (j = 0; j < loop; j++) {
1031                         pr_cont("INDEX[%d] flags (0x%x) "
1032                                          "timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039         /* Rings */
1040         /* Rx */
1041         for_each_rx_queue(bp, i) {
1042                 struct bnx2x_fastpath *fp = &bp->fp[i];
1043
1044                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1046                 for (j = start; j != end; j = RX_BD(j + 1)) {
1047                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
1050                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1051                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1052                 }
1053
1054                 start = RX_SGE(fp->rx_sge_prod);
1055                 end = RX_SGE(fp->last_max_sge);
1056                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1057                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
1060                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1061                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1062                 }
1063
1064                 start = RCQ_BD(fp->rx_comp_cons - 10);
1065                 end = RCQ_BD(fp->rx_comp_cons + 503);
1066                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1067                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
1069                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1071                 }
1072         }
1073
1074         /* Tx */
1075         for_each_tx_queue(bp, i) {
1076                 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080                 for (j = start; j != end; j = TX_BD(j + 1)) {
1081                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
1083                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084                                   i, j, sw_bd->skb, sw_bd->first_bd);
1085                 }
1086
1087                 start = TX_BD(fp->tx_bd_cons - 10);
1088                 end = TX_BD(fp->tx_bd_cons + 254);
1089                 for (j = start; j != end; j = TX_BD(j + 1)) {
1090                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
1092                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1094                 }
1095         }
1096 #endif
1097         bnx2x_fw_dump(bp);
1098         bnx2x_mc_assert(bp);
1099         BNX2X_ERR("end crash dump -----------------\n");
1100 }
1101
1102 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1103 {
1104         int port = BP_PORT(bp);
1105         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106         u32 val = REG_RD(bp, addr);
1107         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1108         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1109
1110         if (msix) {
1111                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1113                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115         } else if (msi) {
1116                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1120         } else {
1121                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1122                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1123                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1125
1126                 if (!CHIP_IS_E1(bp)) {
1127                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128                            val, port, addr);
1129
1130                         REG_WR(bp, addr, val);
1131
1132                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133                 }
1134         }
1135
1136         if (CHIP_IS_E1(bp))
1137                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
1139         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1140            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1141
1142         REG_WR(bp, addr, val);
1143         /*
1144          * Ensure that HC_CONFIG is written before leading/trailing edge config
1145          */
1146         mmiowb();
1147         barrier();
1148
1149         if (!CHIP_IS_E1(bp)) {
1150                 /* init leading/trailing edge */
1151                 if (IS_MF(bp)) {
1152                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1153                         if (bp->port.pmf)
1154                                 /* enable nig and gpio3 attention */
1155                                 val |= 0x1100;
1156                 } else
1157                         val = 0xffff;
1158
1159                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161         }
1162
1163         /* Make sure that interrupts are indeed enabled from here on */
1164         mmiowb();
1165 }
1166
1167 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168 {
1169         u32 val;
1170         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175         if (msix) {
1176                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177                          IGU_PF_CONF_SINGLE_ISR_EN);
1178                 val |= (IGU_PF_CONF_FUNC_EN |
1179                         IGU_PF_CONF_MSI_MSIX_EN |
1180                         IGU_PF_CONF_ATTN_BIT_EN);
1181         } else if (msi) {
1182                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183                 val |= (IGU_PF_CONF_FUNC_EN |
1184                         IGU_PF_CONF_MSI_MSIX_EN |
1185                         IGU_PF_CONF_ATTN_BIT_EN |
1186                         IGU_PF_CONF_SINGLE_ISR_EN);
1187         } else {
1188                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189                 val |= (IGU_PF_CONF_FUNC_EN |
1190                         IGU_PF_CONF_INT_LINE_EN |
1191                         IGU_PF_CONF_ATTN_BIT_EN |
1192                         IGU_PF_CONF_SINGLE_ISR_EN);
1193         }
1194
1195         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1196            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200         barrier();
1201
1202         /* init leading/trailing edge */
1203         if (IS_MF(bp)) {
1204                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205                 if (bp->port.pmf)
1206                         /* enable nig and gpio3 attention */
1207                         val |= 0x1100;
1208         } else
1209                 val = 0xffff;
1210
1211         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214         /* Make sure that interrupts are indeed enabled from here on */
1215         mmiowb();
1216 }
1217
1218 void bnx2x_int_enable(struct bnx2x *bp)
1219 {
1220         if (bp->common.int_block == INT_BLOCK_HC)
1221                 bnx2x_hc_int_enable(bp);
1222         else
1223                 bnx2x_igu_int_enable(bp);
1224 }
1225
1226 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1227 {
1228         int port = BP_PORT(bp);
1229         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230         u32 val = REG_RD(bp, addr);
1231
1232         /*
1233          * in E1 we must use only PCI configuration space to disable
1234          * MSI/MSIX capablility
1235          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236          */
1237         if (CHIP_IS_E1(bp)) {
1238                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239                  *  Use mask register to prevent from HC sending interrupts
1240                  *  after we exit the function
1241                  */
1242                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247         } else
1248                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1252
1253         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254            val, port, addr);
1255
1256         /* flush all outstanding writes */
1257         mmiowb();
1258
1259         REG_WR(bp, addr, val);
1260         if (REG_RD(bp, addr) != val)
1261                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262 }
1263
1264 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265 {
1266         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269                  IGU_PF_CONF_INT_LINE_EN |
1270                  IGU_PF_CONF_ATTN_BIT_EN);
1271
1272         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274         /* flush all outstanding writes */
1275         mmiowb();
1276
1277         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280 }
1281
1282 static void bnx2x_int_disable(struct bnx2x *bp)
1283 {
1284         if (bp->common.int_block == INT_BLOCK_HC)
1285                 bnx2x_hc_int_disable(bp);
1286         else
1287                 bnx2x_igu_int_disable(bp);
1288 }
1289
1290 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1291 {
1292         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1293         int i, offset;
1294
1295         /* disable interrupt handling */
1296         atomic_inc(&bp->intr_sem);
1297         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
1299         if (disable_hw)
1300                 /* prevent the HW from sending interrupts */
1301                 bnx2x_int_disable(bp);
1302
1303         /* make sure all ISRs are done */
1304         if (msix) {
1305                 synchronize_irq(bp->msix_table[0].vector);
1306                 offset = 1;
1307 #ifdef BCM_CNIC
1308                 offset++;
1309 #endif
1310                 for_each_eth_queue(bp, i)
1311                         synchronize_irq(bp->msix_table[i + offset].vector);
1312         } else
1313                 synchronize_irq(bp->pdev->irq);
1314
1315         /* make sure sp_task is not running */
1316         cancel_delayed_work(&bp->sp_task);
1317         flush_workqueue(bnx2x_wq);
1318 }
1319
1320 /* fast path */
1321
1322 /*
1323  * General service functions
1324  */
1325
1326 /* Return true if succeeded to acquire the lock */
1327 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328 {
1329         u32 lock_status;
1330         u32 resource_bit = (1 << resource);
1331         int func = BP_FUNC(bp);
1332         u32 hw_lock_control_reg;
1333
1334         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336         /* Validating that the resource is within range */
1337         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338                 DP(NETIF_MSG_HW,
1339                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1341                 return false;
1342         }
1343
1344         if (func <= 5)
1345                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346         else
1347                 hw_lock_control_reg =
1348                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350         /* Try to acquire the lock */
1351         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352         lock_status = REG_RD(bp, hw_lock_control_reg);
1353         if (lock_status & resource_bit)
1354                 return true;
1355
1356         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357         return false;
1358 }
1359
1360 #ifdef BCM_CNIC
1361 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362 #endif
1363
1364 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1365                            union eth_rx_cqe *rr_cqe)
1366 {
1367         struct bnx2x *bp = fp->bp;
1368         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
1371         DP(BNX2X_MSG_SP,
1372            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1373            fp->index, cid, command, bp->state,
1374            rr_cqe->ramrod_cqe.ramrod_type);
1375
1376         switch (command | fp->state) {
1377         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379                 fp->state = BNX2X_FP_STATE_OPEN;
1380                 break;
1381
1382         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1384                 fp->state = BNX2X_FP_STATE_HALTED;
1385                 break;
1386
1387         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389                 fp->state = BNX2X_FP_STATE_TERMINATED;
1390                 break;
1391
1392         default:
1393                 BNX2X_ERR("unexpected MC reply (%d)  "
1394                           "fp[%d] state is %x\n",
1395                           command, fp->index, fp->state);
1396                 break;
1397         }
1398
1399         smp_mb__before_atomic_inc();
1400         atomic_inc(&bp->spq_left);
1401         /* push the change in fp->state and towards the memory */
1402         smp_wmb();
1403
1404         return;
1405 }
1406
1407 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1408 {
1409         struct bnx2x *bp = netdev_priv(dev_instance);
1410         u16 status = bnx2x_ack_int(bp);
1411         u16 mask;
1412         int i;
1413
1414         /* Return here if interrupt is shared and it's not for us */
1415         if (unlikely(status == 0)) {
1416                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417                 return IRQ_NONE;
1418         }
1419         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1420
1421         /* Return here if interrupt is disabled */
1422         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424                 return IRQ_HANDLED;
1425         }
1426
1427 #ifdef BNX2X_STOP_ON_ERROR
1428         if (unlikely(bp->panic))
1429                 return IRQ_HANDLED;
1430 #endif
1431
1432         for_each_eth_queue(bp, i) {
1433                 struct bnx2x_fastpath *fp = &bp->fp[i];
1434
1435                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1436                 if (status & mask) {
1437                         /* Handle Rx and Tx according to SB id */
1438                         prefetch(fp->rx_cons_sb);
1439                         prefetch(fp->tx_cons_sb);
1440                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1441                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1442                         status &= ~mask;
1443                 }
1444         }
1445
1446 #ifdef BCM_CNIC
1447         mask = 0x2;
1448         if (status & (mask | 0x1)) {
1449                 struct cnic_ops *c_ops = NULL;
1450
1451                 rcu_read_lock();
1452                 c_ops = rcu_dereference(bp->cnic_ops);
1453                 if (c_ops)
1454                         c_ops->cnic_handler(bp->cnic_data, NULL);
1455                 rcu_read_unlock();
1456
1457                 status &= ~mask;
1458         }
1459 #endif
1460
1461         if (unlikely(status & 0x1)) {
1462                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1463
1464                 status &= ~0x1;
1465                 if (!status)
1466                         return IRQ_HANDLED;
1467         }
1468
1469         if (unlikely(status))
1470                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1471                    status);
1472
1473         return IRQ_HANDLED;
1474 }
1475
1476 /* end of fast path */
1477
1478
1479 /* Link */
1480
1481 /*
1482  * General service functions
1483  */
1484
1485 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1486 {
1487         u32 lock_status;
1488         u32 resource_bit = (1 << resource);
1489         int func = BP_FUNC(bp);
1490         u32 hw_lock_control_reg;
1491         int cnt;
1492
1493         /* Validating that the resource is within range */
1494         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495                 DP(NETIF_MSG_HW,
1496                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498                 return -EINVAL;
1499         }
1500
1501         if (func <= 5) {
1502                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503         } else {
1504                 hw_lock_control_reg =
1505                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506         }
1507
1508         /* Validating that the resource is not already taken */
1509         lock_status = REG_RD(bp, hw_lock_control_reg);
1510         if (lock_status & resource_bit) {
1511                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1512                    lock_status, resource_bit);
1513                 return -EEXIST;
1514         }
1515
1516         /* Try for 5 second every 5ms */
1517         for (cnt = 0; cnt < 1000; cnt++) {
1518                 /* Try to acquire the lock */
1519                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520                 lock_status = REG_RD(bp, hw_lock_control_reg);
1521                 if (lock_status & resource_bit)
1522                         return 0;
1523
1524                 msleep(5);
1525         }
1526         DP(NETIF_MSG_HW, "Timeout\n");
1527         return -EAGAIN;
1528 }
1529
1530 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1531 {
1532         u32 lock_status;
1533         u32 resource_bit = (1 << resource);
1534         int func = BP_FUNC(bp);
1535         u32 hw_lock_control_reg;
1536
1537         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
1539         /* Validating that the resource is within range */
1540         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541                 DP(NETIF_MSG_HW,
1542                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544                 return -EINVAL;
1545         }
1546
1547         if (func <= 5) {
1548                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549         } else {
1550                 hw_lock_control_reg =
1551                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552         }
1553
1554         /* Validating that the resource is currently taken */
1555         lock_status = REG_RD(bp, hw_lock_control_reg);
1556         if (!(lock_status & resource_bit)) {
1557                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1558                    lock_status, resource_bit);
1559                 return -EFAULT;
1560         }
1561
1562         REG_WR(bp, hw_lock_control_reg, resource_bit);
1563         return 0;
1564 }
1565
1566
1567 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568 {
1569         /* The GPIO should be swapped if swap register is set and active */
1570         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572         int gpio_shift = gpio_num +
1573                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574         u32 gpio_mask = (1 << gpio_shift);
1575         u32 gpio_reg;
1576         int value;
1577
1578         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580                 return -EINVAL;
1581         }
1582
1583         /* read GPIO value */
1584         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586         /* get the requested pin value */
1587         if ((gpio_reg & gpio_mask) == gpio_mask)
1588                 value = 1;
1589         else
1590                 value = 0;
1591
1592         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1593
1594         return value;
1595 }
1596
1597 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1598 {
1599         /* The GPIO should be swapped if swap register is set and active */
1600         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1601                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1602         int gpio_shift = gpio_num +
1603                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604         u32 gpio_mask = (1 << gpio_shift);
1605         u32 gpio_reg;
1606
1607         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609                 return -EINVAL;
1610         }
1611
1612         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613         /* read GPIO and mask except the float bits */
1614         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616         switch (mode) {
1617         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619                    gpio_num, gpio_shift);
1620                 /* clear FLOAT and set CLR */
1621                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623                 break;
1624
1625         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627                    gpio_num, gpio_shift);
1628                 /* clear FLOAT and set SET */
1629                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631                 break;
1632
1633         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1634                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635                    gpio_num, gpio_shift);
1636                 /* set FLOAT */
1637                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638                 break;
1639
1640         default:
1641                 break;
1642         }
1643
1644         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1645         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1646
1647         return 0;
1648 }
1649
1650 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651 {
1652         /* The GPIO should be swapped if swap register is set and active */
1653         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655         int gpio_shift = gpio_num +
1656                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657         u32 gpio_mask = (1 << gpio_shift);
1658         u32 gpio_reg;
1659
1660         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662                 return -EINVAL;
1663         }
1664
1665         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666         /* read GPIO int */
1667         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669         switch (mode) {
1670         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672                                    "output low\n", gpio_num, gpio_shift);
1673                 /* clear SET and set CLR */
1674                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676                 break;
1677
1678         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680                                    "output high\n", gpio_num, gpio_shift);
1681                 /* clear CLR and set SET */
1682                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684                 break;
1685
1686         default:
1687                 break;
1688         }
1689
1690         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693         return 0;
1694 }
1695
1696 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697 {
1698         u32 spio_mask = (1 << spio_num);
1699         u32 spio_reg;
1700
1701         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702             (spio_num > MISC_REGISTERS_SPIO_7)) {
1703                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704                 return -EINVAL;
1705         }
1706
1707         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708         /* read SPIO and mask except the float bits */
1709         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711         switch (mode) {
1712         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1713                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714                 /* clear FLOAT and set CLR */
1715                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717                 break;
1718
1719         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1720                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721                 /* clear FLOAT and set SET */
1722                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724                 break;
1725
1726         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728                 /* set FLOAT */
1729                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730                 break;
1731
1732         default:
1733                 break;
1734         }
1735
1736         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1737         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1738
1739         return 0;
1740 }
1741
1742 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743 {
1744         u32 sel_phy_idx = 0;
1745         if (bp->link_vars.link_up) {
1746                 sel_phy_idx = EXT_PHY1;
1747                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750                         sel_phy_idx = EXT_PHY2;
1751         } else {
1752
1753                 switch (bnx2x_phy_selection(&bp->link_params)) {
1754                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757                        sel_phy_idx = EXT_PHY1;
1758                        break;
1759                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761                        sel_phy_idx = EXT_PHY2;
1762                        break;
1763                 }
1764         }
1765         /*
1766         * The selected actived PHY is always after swapping (in case PHY
1767         * swapping is enabled). So when swapping is enabled, we need to reverse
1768         * the configuration
1769         */
1770
1771         if (bp->link_params.multi_phy_config &
1772             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773                 if (sel_phy_idx == EXT_PHY1)
1774                         sel_phy_idx = EXT_PHY2;
1775                 else if (sel_phy_idx == EXT_PHY2)
1776                         sel_phy_idx = EXT_PHY1;
1777         }
1778         return LINK_CONFIG_IDX(sel_phy_idx);
1779 }
1780
1781 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1782 {
1783         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1784         switch (bp->link_vars.ieee_fc &
1785                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1786         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1787                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1788                                                    ADVERTISED_Pause);
1789                 break;
1790
1791         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1792                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1793                                                   ADVERTISED_Pause);
1794                 break;
1795
1796         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1797                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1798                 break;
1799
1800         default:
1801                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1802                                                    ADVERTISED_Pause);
1803                 break;
1804         }
1805 }
1806
1807 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1808 {
1809         if (!BP_NOMCP(bp)) {
1810                 u8 rc;
1811                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1813                 /* Initialize link parameters structure variables */
1814                 /* It is recommended to turn off RX FC for jumbo frames
1815                    for better performance */
1816                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1817                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1818                 else
1819                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1820
1821                 bnx2x_acquire_phy_lock(bp);
1822
1823                 if (load_mode == LOAD_DIAG) {
1824                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1825                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826                 }
1827
1828                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1829
1830                 bnx2x_release_phy_lock(bp);
1831
1832                 bnx2x_calc_fc_adv(bp);
1833
1834                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1836                         bnx2x_link_report(bp);
1837                 }
1838                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1839                 return rc;
1840         }
1841         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1842         return -EINVAL;
1843 }
1844
1845 void bnx2x_link_set(struct bnx2x *bp)
1846 {
1847         if (!BP_NOMCP(bp)) {
1848                 bnx2x_acquire_phy_lock(bp);
1849                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1850                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1851                 bnx2x_release_phy_lock(bp);
1852
1853                 bnx2x_calc_fc_adv(bp);
1854         } else
1855                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1856 }
1857
1858 static void bnx2x__link_reset(struct bnx2x *bp)
1859 {
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1863                 bnx2x_release_phy_lock(bp);
1864         } else
1865                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1866 }
1867
1868 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1869 {
1870         u8 rc = 0;
1871
1872         if (!BP_NOMCP(bp)) {
1873                 bnx2x_acquire_phy_lock(bp);
1874                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875                                      is_serdes);
1876                 bnx2x_release_phy_lock(bp);
1877         } else
1878                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1879
1880         return rc;
1881 }
1882
1883 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1884 {
1885         u32 r_param = bp->link_vars.line_speed / 8;
1886         u32 fair_periodic_timeout_usec;
1887         u32 t_fair;
1888
1889         memset(&(bp->cmng.rs_vars), 0,
1890                sizeof(struct rate_shaping_vars_per_port));
1891         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1892
1893         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1895
1896         /* this is the threshold below which no timer arming will occur
1897            1.25 coefficient is for the threshold to be a little bigger
1898            than the real time, to compensate for timer in-accuracy */
1899         bp->cmng.rs_vars.rs_threshold =
1900                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
1902         /* resolution of fairness timer */
1903         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1906
1907         /* this is the threshold below which we won't arm the timer anymore */
1908         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1909
1910         /* we multiply by 1e3/8 to get bytes/msec.
1911            We don't want the credits to pass a credit
1912            of the t_fair*FAIR_MEM (algorithm resolution) */
1913         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914         /* since each tick is 4 usec */
1915         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1916 }
1917
1918 /* Calculates the sum of vn_min_rates.
1919    It's needed for further normalizing of the min_rates.
1920    Returns:
1921      sum of vn_min_rates.
1922        or
1923      0 - if all the min_rates are 0.
1924      In the later case fainess algorithm should be deactivated.
1925      If not all min_rates are zero then those that are zeroes will be set to 1.
1926  */
1927 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928 {
1929         int all_zero = 1;
1930         int vn;
1931
1932         bp->vn_weight_sum = 0;
1933         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1934                 u32 vn_cfg = bp->mf_config[vn];
1935                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938                 /* Skip hidden vns */
1939                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940                         continue;
1941
1942                 /* If min rate is zero - set it to 1 */
1943                 if (!vn_min_rate)
1944                         vn_min_rate = DEF_MIN_RATE;
1945                 else
1946                         all_zero = 0;
1947
1948                 bp->vn_weight_sum += vn_min_rate;
1949         }
1950
1951         /* ... only if all min rates are zeros - disable fairness */
1952         if (all_zero) {
1953                 bp->cmng.flags.cmng_enables &=
1954                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956                    "  fairness will be disabled\n");
1957         } else
1958                 bp->cmng.flags.cmng_enables |=
1959                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1960 }
1961
1962 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1963 {
1964         struct rate_shaping_vars_per_vn m_rs_vn;
1965         struct fairness_vars_per_vn m_fair_vn;
1966         u32 vn_cfg = bp->mf_config[vn];
1967         int func = 2*vn + BP_PORT(bp);
1968         u16 vn_min_rate, vn_max_rate;
1969         int i;
1970
1971         /* If function is hidden - set min and max to zeroes */
1972         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973                 vn_min_rate = 0;
1974                 vn_max_rate = 0;
1975
1976         } else {
1977                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1979                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1981                 /* If fairness is enabled (not all min rates are zeroes) and
1982                    if current min rate is zero - set it to 1.
1983                    This is a requirement of the algorithm. */
1984                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1985                         vn_min_rate = DEF_MIN_RATE;
1986
1987                 if (IS_MF_SI(bp))
1988                         /* maxCfg in percents of linkspeed */
1989                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990                 else
1991                         /* maxCfg is absolute in 100Mb units */
1992                         vn_max_rate = maxCfg * 100;
1993         }
1994
1995         DP(NETIF_MSG_IFUP,
1996            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1997            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1998
1999         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002         /* global vn counter - maximal Mbps for this vn */
2003         m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005         /* quota - number of bytes transmitted in this period */
2006         m_rs_vn.vn_counter.quota =
2007                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
2009         if (bp->vn_weight_sum) {
2010                 /* credit for each period of the fairness algorithm:
2011                    number of bytes in T_FAIR (the vn share the port rate).
2012                    vn_weight_sum should not be larger than 10000, thus
2013                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014                    than zero */
2015                 m_fair_vn.vn_credit_delta =
2016                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017                                                    (8 * bp->vn_weight_sum))),
2018                               (bp->cmng.fair_vars.fair_threshold +
2019                                                         MIN_ABOVE_THRESH));
2020                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2021                    m_fair_vn.vn_credit_delta);
2022         }
2023
2024         /* Store it to internal memory */
2025         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2026                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2027                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2028                        ((u32 *)(&m_rs_vn))[i]);
2029
2030         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2031                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2032                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2033                        ((u32 *)(&m_fair_vn))[i]);
2034 }
2035
2036 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2037 {
2038         if (CHIP_REV_IS_SLOW(bp))
2039                 return CMNG_FNS_NONE;
2040         if (IS_MF(bp))
2041                 return CMNG_FNS_MINMAX;
2042
2043         return CMNG_FNS_NONE;
2044 }
2045
2046 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2047 {
2048         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2049
2050         if (BP_NOMCP(bp))
2051                 return; /* what should be the default bvalue in this case */
2052
2053         /* For 2 port configuration the absolute function number formula
2054          * is:
2055          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2056          *
2057          *      and there are 4 functions per port
2058          *
2059          * For 4 port configuration it is
2060          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2061          *
2062          *      and there are 2 functions per port
2063          */
2064         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2065                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2066
2067                 if (func >= E1H_FUNC_MAX)
2068                         break;
2069
2070                 bp->mf_config[vn] =
2071                         MF_CFG_RD(bp, func_mf_config[func].config);
2072         }
2073 }
2074
2075 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2076 {
2077
2078         if (cmng_type == CMNG_FNS_MINMAX) {
2079                 int vn;
2080
2081                 /* clear cmng_enables */
2082                 bp->cmng.flags.cmng_enables = 0;
2083
2084                 /* read mf conf from shmem */
2085                 if (read_cfg)
2086                         bnx2x_read_mf_cfg(bp);
2087
2088                 /* Init rate shaping and fairness contexts */
2089                 bnx2x_init_port_minmax(bp);
2090
2091                 /* vn_weight_sum and enable fairness if not 0 */
2092                 bnx2x_calc_vn_weight_sum(bp);
2093
2094                 /* calculate and set min-max rate for each vn */
2095                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2096                         bnx2x_init_vn_minmax(bp, vn);
2097
2098                 /* always enable rate shaping and fairness */
2099                 bp->cmng.flags.cmng_enables |=
2100                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2101                 if (!bp->vn_weight_sum)
2102                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2103                                    "  fairness will be disabled\n");
2104                 return;
2105         }
2106
2107         /* rate shaping and fairness are disabled */
2108         DP(NETIF_MSG_IFUP,
2109            "rate shaping and fairness are disabled\n");
2110 }
2111
2112 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2113 {
2114         int port = BP_PORT(bp);
2115         int func;
2116         int vn;
2117
2118         /* Set the attention towards other drivers on the same port */
2119         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2120                 if (vn == BP_E1HVN(bp))
2121                         continue;
2122
2123                 func = ((vn << 1) | port);
2124                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2125                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2126         }
2127 }
2128
2129 /* This function is called upon link interrupt */
2130 static void bnx2x_link_attn(struct bnx2x *bp)
2131 {
2132         u32 prev_link_status = bp->link_vars.link_status;
2133         /* Make sure that we are synced with the current statistics */
2134         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2135
2136         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2137
2138         if (bp->link_vars.link_up) {
2139
2140                 /* dropless flow control */
2141                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2142                         int port = BP_PORT(bp);
2143                         u32 pause_enabled = 0;
2144
2145                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2146                                 pause_enabled = 1;
2147
2148                         REG_WR(bp, BAR_USTRORM_INTMEM +
2149                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2150                                pause_enabled);
2151                 }
2152
2153                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2154                         struct host_port_stats *pstats;
2155
2156                         pstats = bnx2x_sp(bp, port_stats);
2157                         /* reset old bmac stats */
2158                         memset(&(pstats->mac_stx[0]), 0,
2159                                sizeof(struct mac_stx));
2160                 }
2161                 if (bp->state == BNX2X_STATE_OPEN)
2162                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2163         }
2164
2165         /* indicate link status only if link status actually changed */
2166         if (prev_link_status != bp->link_vars.link_status)
2167                 bnx2x_link_report(bp);
2168
2169         if (IS_MF(bp))
2170                 bnx2x_link_sync_notify(bp);
2171
2172         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2173                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2174
2175                 if (cmng_fns != CMNG_FNS_NONE) {
2176                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2177                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2178                 } else
2179                         /* rate shaping and fairness are disabled */
2180                         DP(NETIF_MSG_IFUP,
2181                            "single function mode without fairness\n");
2182         }
2183 }
2184
2185 void bnx2x__link_status_update(struct bnx2x *bp)
2186 {
2187         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2188                 return;
2189
2190         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2191
2192         if (bp->link_vars.link_up)
2193                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194         else
2195                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2196
2197         /* the link status update could be the result of a DCC event
2198            hence re-read the shmem mf configuration */
2199         bnx2x_read_mf_cfg(bp);
2200
2201         /* indicate link status */
2202         bnx2x_link_report(bp);
2203 }
2204
2205 static void bnx2x_pmf_update(struct bnx2x *bp)
2206 {
2207         int port = BP_PORT(bp);
2208         u32 val;
2209
2210         bp->port.pmf = 1;
2211         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2212
2213         /* enable nig attention */
2214         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2215         if (bp->common.int_block == INT_BLOCK_HC) {
2216                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2217                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2218         } else if (CHIP_IS_E2(bp)) {
2219                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2220                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2221         }
2222
2223         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2224 }
2225
2226 /* end of Link */
2227
2228 /* slow path */
2229
2230 /*
2231  * General service functions
2232  */
2233
2234 /* send the MCP a request, block until there is a reply */
2235 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2236 {
2237         int mb_idx = BP_FW_MB_IDX(bp);
2238         u32 seq = ++bp->fw_seq;
2239         u32 rc = 0;
2240         u32 cnt = 1;
2241         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2242
2243         mutex_lock(&bp->fw_mb_mutex);
2244         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2245         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2246
2247         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2248
2249         do {
2250                 /* let the FW do it's magic ... */
2251                 msleep(delay);
2252
2253                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2254
2255                 /* Give the FW up to 5 second (500*10ms) */
2256         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2257
2258         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2259            cnt*delay, rc, seq);
2260
2261         /* is this a reply to our command? */
2262         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2263                 rc &= FW_MSG_CODE_MASK;
2264         else {
2265                 /* FW BUG! */
2266                 BNX2X_ERR("FW failed to respond!\n");
2267                 bnx2x_fw_dump(bp);
2268                 rc = 0;
2269         }
2270         mutex_unlock(&bp->fw_mb_mutex);
2271
2272         return rc;
2273 }
2274
2275 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2276 {
2277 #ifdef BCM_CNIC
2278         if (IS_FCOE_FP(fp) && IS_MF(bp))
2279                 return false;
2280 #endif
2281         return true;
2282 }
2283
2284 /* must be called under rtnl_lock */
2285 static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2286 {
2287         u32 mask = (1 << cl_id);
2288
2289         /* initial seeting is BNX2X_ACCEPT_NONE */
2290         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2291         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2292         u8 unmatched_unicast = 0;
2293
2294         if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2295                 unmatched_unicast = 1;
2296
2297         if (filters & BNX2X_PROMISCUOUS_MODE) {
2298                 /* promiscious - accept all, drop none */
2299                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2300                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2301                 if (IS_MF_SI(bp)) {
2302                         /*
2303                          * SI mode defines to accept in promiscuos mode
2304                          * only unmatched packets
2305                          */
2306                         unmatched_unicast = 1;
2307                         accp_all_ucast = 0;
2308                 }
2309         }
2310         if (filters & BNX2X_ACCEPT_UNICAST) {
2311                 /* accept matched ucast */
2312                 drop_all_ucast = 0;
2313         }
2314         if (filters & BNX2X_ACCEPT_MULTICAST)
2315                 /* accept matched mcast */
2316                 drop_all_mcast = 0;
2317
2318         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2319                 /* accept all mcast */
2320                 drop_all_ucast = 0;
2321                 accp_all_ucast = 1;
2322         }
2323         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2324                 /* accept all mcast */
2325                 drop_all_mcast = 0;
2326                 accp_all_mcast = 1;
2327         }
2328         if (filters & BNX2X_ACCEPT_BROADCAST) {
2329                 /* accept (all) bcast */
2330                 drop_all_bcast = 0;
2331                 accp_all_bcast = 1;
2332         }
2333
2334         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2335                 bp->mac_filters.ucast_drop_all | mask :
2336                 bp->mac_filters.ucast_drop_all & ~mask;
2337
2338         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2339                 bp->mac_filters.mcast_drop_all | mask :
2340                 bp->mac_filters.mcast_drop_all & ~mask;
2341
2342         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2343                 bp->mac_filters.bcast_drop_all | mask :
2344                 bp->mac_filters.bcast_drop_all & ~mask;
2345
2346         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2347                 bp->mac_filters.ucast_accept_all | mask :
2348                 bp->mac_filters.ucast_accept_all & ~mask;
2349
2350         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2351                 bp->mac_filters.mcast_accept_all | mask :
2352                 bp->mac_filters.mcast_accept_all & ~mask;
2353
2354         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2355                 bp->mac_filters.bcast_accept_all | mask :
2356                 bp->mac_filters.bcast_accept_all & ~mask;
2357
2358         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2359                 bp->mac_filters.unmatched_unicast | mask :
2360                 bp->mac_filters.unmatched_unicast & ~mask;
2361 }
2362
2363 static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2364 {
2365         struct tstorm_eth_function_common_config tcfg = {0};
2366         u16 rss_flgs;
2367
2368         /* tpa */
2369         if (p->func_flgs & FUNC_FLG_TPA)
2370                 tcfg.config_flags |=
2371                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2372
2373         /* set rss flags */
2374         rss_flgs = (p->rss->mode <<
2375                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2376
2377         if (p->rss->cap & RSS_IPV4_CAP)
2378                 rss_flgs |= RSS_IPV4_CAP_MASK;
2379         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2380                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2381         if (p->rss->cap & RSS_IPV6_CAP)
2382                 rss_flgs |= RSS_IPV6_CAP_MASK;
2383         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2384                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2385
2386         tcfg.config_flags |= rss_flgs;
2387         tcfg.rss_result_mask = p->rss->result_mask;
2388
2389         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2390
2391         /* Enable the function in the FW */
2392         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2393         storm_memset_func_en(bp, p->func_id, 1);
2394
2395         /* statistics */
2396         if (p->func_flgs & FUNC_FLG_STATS) {
2397                 struct stats_indication_flags stats_flags = {0};
2398                 stats_flags.collect_eth = 1;
2399
2400                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2401                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2402
2403                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2404                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2405
2406                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2407                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2408
2409                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2410                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2411         }
2412
2413         /* spq */
2414         if (p->func_flgs & FUNC_FLG_SPQ) {
2415                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2416                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2417                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2418         }
2419 }
2420
2421 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2422                                      struct bnx2x_fastpath *fp)
2423 {
2424         u16 flags = 0;
2425
2426         /* calculate queue flags */
2427         flags |= QUEUE_FLG_CACHE_ALIGN;
2428         flags |= QUEUE_FLG_HC;
2429         flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2430
2431         flags |= QUEUE_FLG_VLAN;
2432         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2433
2434         if (!fp->disable_tpa)
2435                 flags |= QUEUE_FLG_TPA;
2436
2437         flags = stat_counter_valid(bp, fp) ?
2438                         (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2439
2440         return flags;
2441 }
2442
2443 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2444         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2445         struct bnx2x_rxq_init_params *rxq_init)
2446 {
2447         u16 max_sge = 0;
2448         u16 sge_sz = 0;
2449         u16 tpa_agg_size = 0;
2450
2451         /* calculate queue flags */
2452         u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454         if (!fp->disable_tpa) {
2455                 pause->sge_th_hi = 250;
2456                 pause->sge_th_lo = 150;
2457                 tpa_agg_size = min_t(u32,
2458                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2459                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2460                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2461                         SGE_PAGE_SHIFT;
2462                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2463                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2464                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2465                                     0xffff);
2466         }
2467
2468         /* pause - not for e1 */
2469         if (!CHIP_IS_E1(bp)) {
2470                 pause->bd_th_hi = 350;
2471                 pause->bd_th_lo = 250;
2472                 pause->rcq_th_hi = 350;
2473                 pause->rcq_th_lo = 250;
2474                 pause->sge_th_hi = 0;
2475                 pause->sge_th_lo = 0;
2476                 pause->pri_map = 1;
2477         }
2478
2479         /* rxq setup */
2480         rxq_init->flags = flags;
2481         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2482         rxq_init->dscr_map = fp->rx_desc_mapping;
2483         rxq_init->sge_map = fp->rx_sge_mapping;
2484         rxq_init->rcq_map = fp->rx_comp_mapping;
2485         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2486         rxq_init->mtu = bp->dev->mtu;
2487         rxq_init->buf_sz = bp->rx_buf_size;
2488         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2489         rxq_init->cl_id = fp->cl_id;
2490         rxq_init->spcl_id = fp->cl_id;
2491         rxq_init->stat_id = fp->cl_id;
2492         rxq_init->tpa_agg_sz = tpa_agg_size;
2493         rxq_init->sge_buf_sz = sge_sz;
2494         rxq_init->max_sges_pkt = max_sge;
2495         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2496         rxq_init->fw_sb_id = fp->fw_sb_id;
2497
2498         if (IS_FCOE_FP(fp))
2499                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2500         else
2501                 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2502
2503         rxq_init->cid = HW_CID(bp, fp->cid);
2504
2505         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2506 }
2507
2508 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2509         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2510 {
2511         u16 flags = bnx2x_get_cl_flags(bp, fp);
2512
2513         txq_init->flags = flags;
2514         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2515         txq_init->dscr_map = fp->tx_desc_mapping;
2516         txq_init->stat_id = fp->cl_id;
2517         txq_init->cid = HW_CID(bp, fp->cid);
2518         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2519         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2520         txq_init->fw_sb_id = fp->fw_sb_id;
2521
2522         if (IS_FCOE_FP(fp)) {
2523                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2524                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2525         }
2526
2527         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2528 }
2529
2530 static void bnx2x_pf_init(struct bnx2x *bp)
2531 {
2532         struct bnx2x_func_init_params func_init = {0};
2533         struct bnx2x_rss_params rss = {0};
2534         struct event_ring_data eq_data = { {0} };
2535         u16 flags;
2536
2537         /* pf specific setups */
2538         if (!CHIP_IS_E1(bp))
2539                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2540
2541         if (CHIP_IS_E2(bp)) {
2542                 /* reset IGU PF statistics: MSIX + ATTN */
2543                 /* PF */
2544                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2545                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2546                            (CHIP_MODE_IS_4_PORT(bp) ?
2547                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2548                 /* ATTN */
2549                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2550                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2551                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2552                            (CHIP_MODE_IS_4_PORT(bp) ?
2553                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2554         }
2555
2556         /* function setup flags */
2557         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2558
2559         if (CHIP_IS_E1x(bp))
2560                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2561         else
2562                 flags |= FUNC_FLG_TPA;
2563
2564         /* function setup */
2565
2566         /**
2567          * Although RSS is meaningless when there is a single HW queue we
2568          * still need it enabled in order to have HW Rx hash generated.
2569          */
2570         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2571                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2572         rss.mode = bp->multi_mode;
2573         rss.result_mask = MULTI_MASK;
2574         func_init.rss = &rss;
2575
2576         func_init.func_flgs = flags;
2577         func_init.pf_id = BP_FUNC(bp);
2578         func_init.func_id = BP_FUNC(bp);
2579         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2580         func_init.spq_map = bp->spq_mapping;
2581         func_init.spq_prod = bp->spq_prod_idx;
2582
2583         bnx2x_func_init(bp, &func_init);
2584
2585         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2586
2587         /*
2588         Congestion management values depend on the link rate
2589         There is no active link so initial link rate is set to 10 Gbps.
2590         When the link comes up The congestion management values are
2591         re-calculated according to the actual link rate.
2592         */
2593         bp->link_vars.line_speed = SPEED_10000;
2594         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2595
2596         /* Only the PMF sets the HW */
2597         if (bp->port.pmf)
2598                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2599
2600         /* no rx until link is up */
2601         bp->rx_mode = BNX2X_RX_MODE_NONE;
2602         bnx2x_set_storm_rx_mode(bp);
2603
2604         /* init Event Queue */
2605         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2606         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2607         eq_data.producer = bp->eq_prod;
2608         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2609         eq_data.sb_id = DEF_SB_ID;
2610         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2611 }
2612
2613
2614 static void bnx2x_e1h_disable(struct bnx2x *bp)
2615 {
2616         int port = BP_PORT(bp);
2617
2618         netif_tx_disable(bp->dev);
2619
2620         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2622         netif_carrier_off(bp->dev);
2623 }
2624
2625 static void bnx2x_e1h_enable(struct bnx2x *bp)
2626 {
2627         int port = BP_PORT(bp);
2628
2629         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2631         /* Tx queue should be only reenabled */
2632         netif_tx_wake_all_queues(bp->dev);
2633
2634         /*
2635          * Should not call netif_carrier_on since it will be called if the link
2636          * is up when checking for link state
2637          */
2638 }
2639
2640 /* called due to MCP event (on pmf):
2641  *      reread new bandwidth configuration
2642  *      configure FW
2643  *      notify others function about the change
2644  */
2645 static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2646 {
2647         if (bp->link_vars.link_up) {
2648                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2649                 bnx2x_link_sync_notify(bp);
2650         }
2651         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2652 }
2653
2654 static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2655 {
2656         bnx2x_config_mf_bw(bp);
2657         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2658 }
2659
2660 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2661 {
2662         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2663
2664         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2665
2666                 /*
2667                  * This is the only place besides the function initialization
2668                  * where the bp->flags can change so it is done without any
2669                  * locks
2670                  */
2671                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2672                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2673                         bp->flags |= MF_FUNC_DIS;
2674
2675                         bnx2x_e1h_disable(bp);
2676                 } else {
2677                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2678                         bp->flags &= ~MF_FUNC_DIS;
2679
2680                         bnx2x_e1h_enable(bp);
2681                 }
2682                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2683         }
2684         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2685                 bnx2x_config_mf_bw(bp);
2686                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2687         }
2688
2689         /* Report results to MCP */
2690         if (dcc_event)
2691                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2692         else
2693                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2694 }
2695
2696 /* must be called under the spq lock */
2697 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2698 {
2699         struct eth_spe *next_spe = bp->spq_prod_bd;
2700
2701         if (bp->spq_prod_bd == bp->spq_last_bd) {
2702                 bp->spq_prod_bd = bp->spq;
2703                 bp->spq_prod_idx = 0;
2704                 DP(NETIF_MSG_TIMER, "end of spq\n");
2705         } else {
2706                 bp->spq_prod_bd++;
2707                 bp->spq_prod_idx++;
2708         }
2709         return next_spe;
2710 }
2711
2712 /* must be called under the spq lock */
2713 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2714 {
2715         int func = BP_FUNC(bp);
2716
2717         /* Make sure that BD data is updated before writing the producer */
2718         wmb();
2719
2720         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2721                  bp->spq_prod_idx);
2722         mmiowb();
2723 }
2724
2725 /* the slow path queue is odd since completions arrive on the fastpath ring */
2726 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2727                   u32 data_hi, u32 data_lo, int common)
2728 {
2729         struct eth_spe *spe;
2730         u16 type;
2731
2732 #ifdef BNX2X_STOP_ON_ERROR
2733         if (unlikely(bp->panic))
2734                 return -EIO;
2735 #endif
2736
2737         spin_lock_bh(&bp->spq_lock);
2738
2739         if (!atomic_read(&bp->spq_left)) {
2740                 BNX2X_ERR("BUG! SPQ ring full!\n");
2741                 spin_unlock_bh(&bp->spq_lock);
2742                 bnx2x_panic();
2743                 return -EBUSY;
2744         }
2745
2746         spe = bnx2x_sp_get_next(bp);
2747
2748         /* CID needs port number to be encoded int it */
2749         spe->hdr.conn_and_cmd_data =
2750                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2751                                     HW_CID(bp, cid));
2752
2753         if (common)
2754                 /* Common ramrods:
2755                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2756                  *      TRAFFIC_STOP, TRAFFIC_START
2757                  */
2758                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2759                         & SPE_HDR_CONN_TYPE;
2760         else
2761                 /* ETH ramrods: SETUP, HALT */
2762                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2763                         & SPE_HDR_CONN_TYPE;
2764
2765         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2766                  SPE_HDR_FUNCTION_ID);
2767
2768         spe->hdr.type = cpu_to_le16(type);
2769
2770         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2771         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2772
2773         /* stats ramrod has it's own slot on the spq */
2774         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2775                 /* It's ok if the actual decrement is issued towards the memory
2776                  * somewhere between the spin_lock and spin_unlock. Thus no
2777                  * more explict memory barrier is needed.
2778                  */
2779                 atomic_dec(&bp->spq_left);
2780
2781         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2782            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2783            "type(0x%x) left %x\n",
2784            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2785            (u32)(U64_LO(bp->spq_mapping) +
2786            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2787            HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2788
2789         bnx2x_sp_prod_update(bp);
2790         spin_unlock_bh(&bp->spq_lock);
2791         return 0;
2792 }
2793
2794 /* acquire split MCP access lock register */
2795 static int bnx2x_acquire_alr(struct bnx2x *bp)
2796 {
2797         u32 j, val;
2798         int rc = 0;
2799
2800         might_sleep();
2801         for (j = 0; j < 1000; j++) {
2802                 val = (1UL << 31);
2803                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2804                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2805                 if (val & (1L << 31))
2806                         break;
2807
2808                 msleep(5);
2809         }
2810         if (!(val & (1L << 31))) {
2811                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2812                 rc = -EBUSY;
2813         }
2814
2815         return rc;
2816 }
2817
2818 /* release split MCP access lock register */
2819 static void bnx2x_release_alr(struct bnx2x *bp)
2820 {
2821         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2822 }
2823
2824 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2825 #define BNX2X_DEF_SB_IDX        0x0002
2826
2827 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2828 {
2829         struct host_sp_status_block *def_sb = bp->def_status_blk;
2830         u16 rc = 0;
2831
2832         barrier(); /* status block is written to by the chip */
2833         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2834                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2835                 rc |= BNX2X_DEF_SB_ATT_IDX;
2836         }
2837
2838         if (bp->def_idx != def_sb->sp_sb.running_index) {
2839                 bp->def_idx = def_sb->sp_sb.running_index;
2840                 rc |= BNX2X_DEF_SB_IDX;
2841         }
2842
2843         /* Do not reorder: indecies reading should complete before handling */
2844         barrier();
2845         return rc;
2846 }
2847
2848 /*
2849  * slow path service functions
2850  */
2851
2852 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2853 {
2854         int port = BP_PORT(bp);
2855         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2856                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2857         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2858                                        NIG_REG_MASK_INTERRUPT_PORT0;
2859         u32 aeu_mask;
2860         u32 nig_mask = 0;
2861         u32 reg_addr;
2862
2863         if (bp->attn_state & asserted)
2864                 BNX2X_ERR("IGU ERROR\n");
2865
2866         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867         aeu_mask = REG_RD(bp, aeu_addr);
2868
2869         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2870            aeu_mask, asserted);
2871         aeu_mask &= ~(asserted & 0x3ff);
2872         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2873
2874         REG_WR(bp, aeu_addr, aeu_mask);
2875         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876
2877         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2878         bp->attn_state |= asserted;
2879         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2880
2881         if (asserted & ATTN_HARD_WIRED_MASK) {
2882                 if (asserted & ATTN_NIG_FOR_FUNC) {
2883
2884                         bnx2x_acquire_phy_lock(bp);
2885
2886                         /* save nig interrupt mask */
2887                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2888                         REG_WR(bp, nig_int_mask_addr, 0);
2889
2890                         bnx2x_link_attn(bp);
2891
2892                         /* handle unicore attn? */
2893                 }
2894                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897                 if (asserted & GPIO_2_FUNC)
2898                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900                 if (asserted & GPIO_3_FUNC)
2901                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903                 if (asserted & GPIO_4_FUNC)
2904                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906                 if (port == 0) {
2907                         if (asserted & ATTN_GENERAL_ATTN_1) {
2908                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910                         }
2911                         if (asserted & ATTN_GENERAL_ATTN_2) {
2912                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914                         }
2915                         if (asserted & ATTN_GENERAL_ATTN_3) {
2916                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918                         }
2919                 } else {
2920                         if (asserted & ATTN_GENERAL_ATTN_4) {
2921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923                         }
2924                         if (asserted & ATTN_GENERAL_ATTN_5) {
2925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927                         }
2928                         if (asserted & ATTN_GENERAL_ATTN_6) {
2929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931                         }
2932                 }
2933
2934         } /* if hardwired */
2935
2936         if (bp->common.int_block == INT_BLOCK_HC)
2937                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2938                             COMMAND_REG_ATTN_BITS_SET);
2939         else
2940                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2941
2942         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2943            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2944         REG_WR(bp, reg_addr, asserted);
2945
2946         /* now set back the mask */
2947         if (asserted & ATTN_NIG_FOR_FUNC) {
2948                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2949                 bnx2x_release_phy_lock(bp);
2950         }
2951 }
2952
2953 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2954 {
2955         int port = BP_PORT(bp);
2956         u32 ext_phy_config;
2957         /* mark the failure */
2958         ext_phy_config =
2959                 SHMEM_RD(bp,
2960                          dev_info.port_hw_config[port].external_phy_config);
2961
2962         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2963         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2964         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2965                  ext_phy_config);
2966
2967         /* log the failure */
2968         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2969                " the driver to shutdown the card to prevent permanent"
2970                " damage.  Please contact OEM Support for assistance\n");
2971 }
2972
2973 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2974 {
2975         int port = BP_PORT(bp);
2976         int reg_offset;
2977         u32 val;
2978
2979         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2980                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2981
2982         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2983
2984                 val = REG_RD(bp, reg_offset);
2985                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2986                 REG_WR(bp, reg_offset, val);
2987
2988                 BNX2X_ERR("SPIO5 hw attention\n");
2989
2990                 /* Fan failure attention */
2991                 bnx2x_hw_reset_phy(&bp->link_params);
2992                 bnx2x_fan_failure(bp);
2993         }
2994
2995         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2996                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2997                 bnx2x_acquire_phy_lock(bp);
2998                 bnx2x_handle_module_detect_int(&bp->link_params);
2999                 bnx2x_release_phy_lock(bp);
3000         }
3001
3002         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3003
3004                 val = REG_RD(bp, reg_offset);
3005                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3006                 REG_WR(bp, reg_offset, val);
3007
3008                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3009                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3010                 bnx2x_panic();
3011         }
3012 }
3013
3014 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3015 {
3016         u32 val;
3017
3018         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3019
3020                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3021                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3022                 /* DORQ discard attention */
3023                 if (val & 0x2)
3024                         BNX2X_ERR("FATAL error from DORQ\n");
3025         }
3026
3027         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3028
3029                 int port = BP_PORT(bp);
3030                 int reg_offset;
3031
3032                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3033                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3034
3035                 val = REG_RD(bp, reg_offset);
3036                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3037                 REG_WR(bp, reg_offset, val);
3038
3039                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3040                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3041                 bnx2x_panic();
3042         }
3043 }
3044
3045 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3046 {
3047         u32 val;
3048
3049         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3050
3051                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3052                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3053                 /* CFC error attention */
3054                 if (val & 0x2)
3055                         BNX2X_ERR("FATAL error from CFC\n");
3056         }
3057
3058         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3059
3060                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3061                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3062                 /* RQ_USDMDP_FIFO_OVERFLOW */
3063                 if (val & 0x18000)
3064                         BNX2X_ERR("FATAL error from PXP\n");
3065                 if (CHIP_IS_E2(bp)) {
3066                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3067                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3068                 }
3069         }
3070
3071         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3072
3073                 int port = BP_PORT(bp);
3074                 int reg_offset;
3075
3076                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3077                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3078
3079                 val = REG_RD(bp, reg_offset);
3080                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3081                 REG_WR(bp, reg_offset, val);
3082
3083                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3084                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3085                 bnx2x_panic();
3086         }
3087 }
3088
3089 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3090 {
3091         u32 val;
3092
3093         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3094
3095                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3096                         int func = BP_FUNC(bp);
3097
3098                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3099                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3100                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3101                         val = SHMEM_RD(bp,
3102                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3103                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3104                                 bnx2x_dcc_event(bp,
3105                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3106
3107                         if (val & DRV_STATUS_SET_MF_BW)
3108                                 bnx2x_set_mf_bw(bp);
3109
3110                         bnx2x__link_status_update(bp);
3111                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3112                                 bnx2x_pmf_update(bp);
3113
3114                         if (bp->port.pmf &&
3115                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3116                                 bp->dcbx_enabled > 0)
3117                                 /* start dcbx state machine */
3118                                 bnx2x_dcbx_set_params(bp,
3119                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
3120                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3121
3122                         BNX2X_ERR("MC assert!\n");
3123                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3125                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3126                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127                         bnx2x_panic();
3128
3129                 } else if (attn & BNX2X_MCP_ASSERT) {
3130
3131                         BNX2X_ERR("MCP assert!\n");
3132                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3133                         bnx2x_fw_dump(bp);
3134
3135                 } else
3136                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137         }
3138
3139         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3140                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3141                 if (attn & BNX2X_GRC_TIMEOUT) {
3142                         val = CHIP_IS_E1(bp) ? 0 :
3143                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3144                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3145                 }
3146                 if (attn & BNX2X_GRC_RSV) {
3147                         val = CHIP_IS_E1(bp) ? 0 :
3148                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3149                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3150                 }
3151                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3152         }
3153 }
3154
3155 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3156 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3157 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3158 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3159 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3160
3161 /*
3162  * should be run under rtnl lock
3163  */
3164 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3165 {
3166         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3167         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3168         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3169         barrier();
3170         mmiowb();
3171 }
3172
3173 /*
3174  * should be run under rtnl lock
3175  */
3176 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3177 {
3178         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3179         val |= (1 << 16);
3180         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3181         barrier();
3182         mmiowb();
3183 }
3184
3185 /*
3186  * should be run under rtnl lock
3187  */
3188 bool bnx2x_reset_is_done(struct bnx2x *bp)
3189 {
3190         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3192         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3193 }
3194
3195 /*
3196  * should be run under rtnl lock
3197  */
3198 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3199 {
3200         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3201
3202         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3203
3204         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3205         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3206         barrier();
3207         mmiowb();
3208 }
3209
3210 /*
3211  * should be run under rtnl lock
3212  */
3213 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3214 {
3215         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3216
3217         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3218
3219         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3220         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3221         barrier();
3222         mmiowb();
3223
3224         return val1;
3225 }
3226
3227 /*
3228  * should be run under rtnl lock
3229  */
3230 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3231 {
3232         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3233 }
3234
3235 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3236 {
3237         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3238         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3239 }
3240
3241 static inline void _print_next_block(int idx, const char *blk)
3242 {
3243         if (idx)
3244                 pr_cont(", ");
3245         pr_cont("%s", blk);
3246 }
3247
3248 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3249 {
3250         int i = 0;
3251         u32 cur_bit = 0;
3252         for (i = 0; sig; i++) {
3253                 cur_bit = ((u32)0x1 << i);
3254                 if (sig & cur_bit) {
3255                         switch (cur_bit) {
3256                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3257                                 _print_next_block(par_num++, "BRB");
3258                                 break;
3259                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3260                                 _print_next_block(par_num++, "PARSER");
3261                                 break;
3262                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3263                                 _print_next_block(par_num++, "TSDM");
3264                                 break;
3265                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "SEARCHER");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "TSEMI");
3270                                 break;
3271                         }
3272
3273                         /* Clear the bit */
3274                         sig &= ~cur_bit;
3275                 }
3276         }
3277
3278         return par_num;
3279 }
3280
3281 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3282 {
3283         int i = 0;
3284         u32 cur_bit = 0;
3285         for (i = 0; sig; i++) {
3286                 cur_bit = ((u32)0x1 << i);
3287                 if (sig & cur_bit) {
3288                         switch (cur_bit) {
3289                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3290                                 _print_next_block(par_num++, "PBCLIENT");
3291                                 break;
3292                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3293                                 _print_next_block(par_num++, "QM");
3294                                 break;
3295                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3296                                 _print_next_block(par_num++, "XSDM");
3297                                 break;
3298                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "XSEMI");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "DOORBELLQ");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "VAUX PCI CORE");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "DEBUG");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "USDM");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "USEMI");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "UPB");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "CSDM");
3321                                 break;
3322                         }
3323
3324                         /* Clear the bit */
3325                         sig &= ~cur_bit;
3326                 }
3327         }
3328
3329         return par_num;
3330 }
3331
3332 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3333 {
3334         int i = 0;
3335         u32 cur_bit = 0;
3336         for (i = 0; sig; i++) {
3337                 cur_bit = ((u32)0x1 << i);
3338                 if (sig & cur_bit) {
3339                         switch (cur_bit) {
3340                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3341                                 _print_next_block(par_num++, "CSEMI");
3342                                 break;
3343                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3344                                 _print_next_block(par_num++, "PXP");
3345                                 break;
3346                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3347                                 _print_next_block(par_num++,
3348                                         "PXPPCICLOCKCLIENT");
3349                                 break;
3350                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3351                                 _print_next_block(par_num++, "CFC");
3352                                 break;
3353                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3354                                 _print_next_block(par_num++, "CDU");
3355                                 break;
3356                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3357                                 _print_next_block(par_num++, "IGU");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "MISC");
3361                                 break;
3362                         }
3363
3364                         /* Clear the bit */
3365                         sig &= ~cur_bit;
3366                 }
3367         }
3368
3369         return par_num;
3370 }
3371
3372 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3373 {
3374         int i = 0;
3375         u32 cur_bit = 0;
3376         for (i = 0; sig; i++) {
3377                 cur_bit = ((u32)0x1 << i);
3378                 if (sig & cur_bit) {
3379                         switch (cur_bit) {
3380                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3381                                 _print_next_block(par_num++, "MCP ROM");
3382                                 break;
3383                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3384                                 _print_next_block(par_num++, "MCP UMP RX");
3385                                 break;
3386                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3387                                 _print_next_block(par_num++, "MCP UMP TX");
3388                                 break;
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3390                                 _print_next_block(par_num++, "MCP SCPAD");
3391                                 break;
3392                         }
3393
3394                         /* Clear the bit */
3395                         sig &= ~cur_bit;
3396                 }
3397         }
3398
3399         return par_num;
3400 }
3401
3402 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3403                                      u32 sig2, u32 sig3)
3404 {
3405         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3406             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3407                 int par_num = 0;
3408                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3409                         "[0]:0x%08x [1]:0x%08x "
3410                         "[2]:0x%08x [3]:0x%08x\n",
3411                           sig0 & HW_PRTY_ASSERT_SET_0,
3412                           sig1 & HW_PRTY_ASSERT_SET_1,
3413                           sig2 & HW_PRTY_ASSERT_SET_2,
3414                           sig3 & HW_PRTY_ASSERT_SET_3);
3415                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3416                        bp->dev->name);
3417                 par_num = bnx2x_print_blocks_with_parity0(
3418                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3419                 par_num = bnx2x_print_blocks_with_parity1(
3420                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3421                 par_num = bnx2x_print_blocks_with_parity2(
3422                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3423                 par_num = bnx2x_print_blocks_with_parity3(
3424                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3425                 printk("\n");
3426                 return true;
3427         } else
3428                 return false;
3429 }
3430
3431 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3432 {
3433         struct attn_route attn;
3434         int port = BP_PORT(bp);
3435
3436         attn.sig[0] = REG_RD(bp,
3437                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3438                              port*4);
3439         attn.sig[1] = REG_RD(bp,
3440                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3441                              port*4);
3442         attn.sig[2] = REG_RD(bp,
3443                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3444                              port*4);
3445         attn.sig[3] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3447                              port*4);
3448
3449         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3450                                         attn.sig[3]);
3451 }
3452
3453
3454 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3455 {
3456         u32 val;
3457         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3458
3459                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3460                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3461                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3462                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3463                                   "ADDRESS_ERROR\n");
3464                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3465                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3466                                   "INCORRECT_RCV_BEHAVIOR\n");
3467                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3468                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3469                                   "WAS_ERROR_ATTN\n");
3470                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3471                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472                                   "VF_LENGTH_VIOLATION_ATTN\n");
3473                 if (val &
3474                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3475                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3477                 if (val &
3478                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3479                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3480                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3481                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3482                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3483                                   "TCPL_ERROR_ATTN\n");
3484                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3485                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3486                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3487                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3488                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489                                   "CSSNOOP_FIFO_OVERFLOW\n");
3490         }
3491         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3492                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3493                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3494                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3495                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3496                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3497                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3498                                   "_ATC_TCPL_TO_NOT_PEND\n");
3499                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3500                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3501                                   "ATC_GPA_MULTIPLE_HITS\n");
3502                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3503                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3504                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3505                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3506                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3507                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3508                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3509                                   "ATC_IREQ_LESS_THAN_STU\n");
3510         }
3511
3512         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3513                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3514                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3515                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3516                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3517         }
3518
3519 }
3520
3521 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3522 {
3523         struct attn_route attn, *group_mask;
3524         int port = BP_PORT(bp);
3525         int index;
3526         u32 reg_addr;
3527         u32 val;
3528         u32 aeu_mask;
3529
3530         /* need to take HW lock because MCP or other port might also
3531            try to handle this event */
3532         bnx2x_acquire_alr(bp);
3533
3534         if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3535                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3536                 bnx2x_set_reset_in_progress(bp);
3537                 schedule_delayed_work(&bp->reset_task, 0);
3538                 /* Disable HW interrupts */
3539                 bnx2x_int_disable(bp);
3540                 bnx2x_release_alr(bp);
3541                 /* In case of parity errors don't handle attentions so that
3542                  * other function would "see" parity errors.
3543                  */
3544                 return;
3545         }
3546
3547         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3548         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3549         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3550         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3551         if (CHIP_IS_E2(bp))
3552                 attn.sig[4] =
3553                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3554         else
3555                 attn.sig[4] = 0;
3556
3557         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3558            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3559
3560         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3561                 if (deasserted & (1 << index)) {
3562                         group_mask = &bp->attn_group[index];
3563
3564                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3565                                          "%08x %08x %08x\n",
3566                            index,
3567                            group_mask->sig[0], group_mask->sig[1],
3568                            group_mask->sig[2], group_mask->sig[3],
3569                            group_mask->sig[4]);
3570
3571                         bnx2x_attn_int_deasserted4(bp,
3572                                         attn.sig[4] & group_mask->sig[4]);
3573                         bnx2x_attn_int_deasserted3(bp,
3574                                         attn.sig[3] & group_mask->sig[3]);
3575                         bnx2x_attn_int_deasserted1(bp,
3576                                         attn.sig[1] & group_mask->sig[1]);
3577                         bnx2x_attn_int_deasserted2(bp,
3578                                         attn.sig[2] & group_mask->sig[2]);
3579                         bnx2x_attn_int_deasserted0(bp,
3580                                         attn.sig[0] & group_mask->sig[0]);
3581                 }
3582         }
3583
3584         bnx2x_release_alr(bp);
3585
3586         if (bp->common.int_block == INT_BLOCK_HC)
3587                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3588                             COMMAND_REG_ATTN_BITS_CLR);
3589         else
3590                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3591
3592         val = ~deasserted;
3593         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3594            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3595         REG_WR(bp, reg_addr, val);
3596
3597         if (~bp->attn_state & deasserted)
3598                 BNX2X_ERR("IGU ERROR\n");
3599
3600         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3601                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3602
3603         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3604         aeu_mask = REG_RD(bp, reg_addr);
3605
3606         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3607            aeu_mask, deasserted);
3608         aeu_mask |= (deasserted & 0x3ff);
3609         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3610
3611         REG_WR(bp, reg_addr, aeu_mask);
3612         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3613
3614         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3615         bp->attn_state &= ~deasserted;
3616         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3617 }
3618
3619 static void bnx2x_attn_int(struct bnx2x *bp)
3620 {
3621         /* read local copy of bits */
3622         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3623                                                                 attn_bits);
3624         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3625                                                                 attn_bits_ack);
3626         u32 attn_state = bp->attn_state;
3627
3628         /* look for changed bits */
3629         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3630         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3631
3632         DP(NETIF_MSG_HW,
3633            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3634            attn_bits, attn_ack, asserted, deasserted);
3635
3636         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3637                 BNX2X_ERR("BAD attention state\n");
3638
3639         /* handle bits that were raised */
3640         if (asserted)
3641                 bnx2x_attn_int_asserted(bp, asserted);
3642
3643         if (deasserted)
3644                 bnx2x_attn_int_deasserted(bp, deasserted);
3645 }
3646
3647 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3648 {
3649         /* No memory barriers */
3650         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3651         mmiowb(); /* keep prod updates ordered */
3652 }
3653
3654 #ifdef BCM_CNIC
3655 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3656                                       union event_ring_elem *elem)
3657 {
3658         if (!bp->cnic_eth_dev.starting_cid  ||
3659             cid < bp->cnic_eth_dev.starting_cid)
3660                 return 1;
3661
3662         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3663
3664         if (unlikely(elem->message.data.cfc_del_event.error)) {
3665                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3666                           cid);
3667                 bnx2x_panic_dump(bp);
3668         }
3669         bnx2x_cnic_cfc_comp(bp, cid);
3670         return 0;
3671 }
3672 #endif
3673
3674 static void bnx2x_eq_int(struct bnx2x *bp)
3675 {
3676         u16 hw_cons, sw_cons, sw_prod;
3677         union event_ring_elem *elem;
3678         u32 cid;
3679         u8 opcode;
3680         int spqe_cnt = 0;
3681
3682         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3683
3684         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3685          * when we get the the next-page we nned to adjust so the loop
3686          * condition below will be met. The next element is the size of a
3687          * regular element and hence incrementing by 1
3688          */
3689         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3690                 hw_cons++;
3691
3692         /* This function may never run in parralel with itself for a
3693          * specific bp, thus there is no need in "paired" read memory
3694          * barrier here.
3695          */
3696         sw_cons = bp->eq_cons;
3697         sw_prod = bp->eq_prod;
3698
3699         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
3700                         hw_cons, sw_cons, atomic_read(&bp->spq_left));
3701
3702         for (; sw_cons != hw_cons;
3703               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3704
3705
3706                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3707
3708                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3709                 opcode = elem->message.opcode;
3710
3711
3712                 /* handle eq element */
3713                 switch (opcode) {
3714                 case EVENT_RING_OPCODE_STAT_QUERY:
3715                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3716                         /* nothing to do with stats comp */
3717                         continue;
3718
3719                 case EVENT_RING_OPCODE_CFC_DEL:
3720                         /* handle according to cid range */
3721                         /*
3722                          * we may want to verify here that the bp state is
3723                          * HALTING
3724                          */
3725                         DP(NETIF_MSG_IFDOWN,
3726                            "got delete ramrod for MULTI[%d]\n", cid);
3727 #ifdef BCM_CNIC
3728                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3729                                 goto next_spqe;
3730                         if (cid == BNX2X_FCOE_ETH_CID)
3731                                 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3732                         else
3733 #endif
3734                                 bnx2x_fp(bp, cid, state) =
3735                                                 BNX2X_FP_STATE_CLOSED;
3736
3737                         goto next_spqe;
3738
3739                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3740                         DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3741                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3742                         goto next_spqe;
3743                 case EVENT_RING_OPCODE_START_TRAFFIC:
3744                         DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3745                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3746                         goto next_spqe;
3747                 }
3748
3749                 switch (opcode | bp->state) {
3750                 case (EVENT_RING_OPCODE_FUNCTION_START |
3751                       BNX2X_STATE_OPENING_WAIT4_PORT):
3752                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3753                         bp->state = BNX2X_STATE_FUNC_STARTED;
3754                         break;
3755
3756                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3757                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3758                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3759                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3760                         break;
3761
3762                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3763                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3764                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3765                         bp->set_mac_pending = 0;
3766                         break;
3767
3768                 case (EVENT_RING_OPCODE_SET_MAC |
3769                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3770                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3771                         bp->set_mac_pending = 0;
3772                         break;
3773                 default:
3774                         /* unknown event log error and continue */
3775                         BNX2X_ERR("Unknown EQ event %d\n",
3776                                   elem->message.opcode);
3777                 }
3778 next_spqe:
3779                 spqe_cnt++;
3780         } /* for */
3781
3782         smp_mb__before_atomic_inc();
3783         atomic_add(spqe_cnt, &bp->spq_left);
3784
3785         bp->eq_cons = sw_cons;
3786         bp->eq_prod = sw_prod;
3787         /* Make sure that above mem writes were issued towards the memory */
3788         smp_wmb();
3789
3790         /* update producer */
3791         bnx2x_update_eq_prod(bp, bp->eq_prod);
3792 }
3793
3794 static void bnx2x_sp_task(struct work_struct *work)
3795 {
3796         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3797         u16 status;
3798
3799         /* Return here if interrupt is disabled */
3800         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3801                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3802                 return;
3803         }
3804
3805         status = bnx2x_update_dsb_idx(bp);
3806 /*      if (status == 0)                                     */
3807 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3808
3809         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3810
3811         /* HW attentions */
3812         if (status & BNX2X_DEF_SB_ATT_IDX) {
3813                 bnx2x_attn_int(bp);
3814                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3815         }
3816
3817         /* SP events: STAT_QUERY and others */
3818         if (status & BNX2X_DEF_SB_IDX) {
3819 #ifdef BCM_CNIC
3820                 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3821
3822                 if ((!NO_FCOE(bp)) &&
3823                         (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3824                         napi_schedule(&bnx2x_fcoe(bp, napi));
3825 #endif
3826                 /* Handle EQ completions */
3827                 bnx2x_eq_int(bp);
3828
3829                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3830                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3831
3832                 status &= ~BNX2X_DEF_SB_IDX;
3833         }
3834
3835         if (unlikely(status))
3836                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3837                    status);
3838
3839         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3840              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3841 }
3842
3843 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3844 {
3845         struct net_device *dev = dev_instance;
3846         struct bnx2x *bp = netdev_priv(dev);
3847
3848         /* Return here if interrupt is disabled */
3849         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3850                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3851                 return IRQ_HANDLED;
3852         }
3853
3854         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3855                      IGU_INT_DISABLE, 0);
3856
3857 #ifdef BNX2X_STOP_ON_ERROR
3858         if (unlikely(bp->panic))
3859                 return IRQ_HANDLED;
3860 #endif
3861
3862 #ifdef BCM_CNIC
3863         {
3864                 struct cnic_ops *c_ops;
3865
3866                 rcu_read_lock();
3867                 c_ops = rcu_dereference(bp->cnic_ops);
3868                 if (c_ops)
3869                         c_ops->cnic_handler(bp->cnic_data, NULL);
3870                 rcu_read_unlock();
3871         }
3872 #endif
3873         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3874
3875         return IRQ_HANDLED;
3876 }
3877
3878 /* end of slow path */
3879
3880 static void bnx2x_timer(unsigned long data)
3881 {
3882         struct bnx2x *bp = (struct bnx2x *) data;
3883
3884         if (!netif_running(bp->dev))
3885                 return;
3886
3887         if (atomic_read(&bp->intr_sem) != 0)
3888                 goto timer_restart;
3889
3890         if (poll) {
3891                 struct bnx2x_fastpath *fp = &bp->fp[0];
3892                 int rc;
3893
3894                 bnx2x_tx_int(fp);
3895                 rc = bnx2x_rx_int(fp, 1000);
3896         }
3897
3898         if (!BP_NOMCP(bp)) {
3899                 int mb_idx = BP_FW_MB_IDX(bp);
3900                 u32 drv_pulse;
3901                 u32 mcp_pulse;
3902
3903                 ++bp->fw_drv_pulse_wr_seq;
3904                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3905                 /* TBD - add SYSTEM_TIME */
3906                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3907                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3908
3909                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3910                              MCP_PULSE_SEQ_MASK);
3911                 /* The delta between driver pulse and mcp response
3912                  * should be 1 (before mcp response) or 0 (after mcp response)
3913                  */
3914                 if ((drv_pulse != mcp_pulse) &&
3915                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3916                         /* someone lost a heartbeat... */
3917                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3918                                   drv_pulse, mcp_pulse);
3919                 }
3920         }
3921
3922         if (bp->state == BNX2X_STATE_OPEN)
3923                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3924
3925 timer_restart:
3926         mod_timer(&bp->timer, jiffies + bp->current_interval);
3927 }
3928
3929 /* end of Statistics */
3930
3931 /* nic init */
3932
3933 /*
3934  * nic init service functions
3935  */
3936
3937 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3938 {
3939         u32 i;
3940         if (!(len%4) && !(addr%4))
3941                 for (i = 0; i < len; i += 4)
3942                         REG_WR(bp, addr + i, fill);
3943         else
3944                 for (i = 0; i < len; i++)
3945                         REG_WR8(bp, addr + i, fill);
3946
3947 }
3948
3949 /* helper: writes FP SP data to FW - data_size in dwords */
3950 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3951                                        int fw_sb_id,
3952                                        u32 *sb_data_p,
3953                                        u32 data_size)
3954 {
3955         int index;
3956         for (index = 0; index < data_size; index++)
3957                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3958                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3959                         sizeof(u32)*index,
3960                         *(sb_data_p + index));
3961 }
3962
3963 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3964 {
3965         u32 *sb_data_p;
3966         u32 data_size = 0;
3967         struct hc_status_block_data_e2 sb_data_e2;
3968         struct hc_status_block_data_e1x sb_data_e1x;
3969
3970         /* disable the function first */
3971         if (CHIP_IS_E2(bp)) {
3972                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3973                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3974                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3975                 sb_data_e2.common.p_func.vf_valid = false;
3976                 sb_data_p = (u32 *)&sb_data_e2;
3977                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3978         } else {
3979                 memset(&sb_data_e1x, 0,
3980                        sizeof(struct hc_status_block_data_e1x));
3981                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3982                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3983                 sb_data_e1x.common.p_func.vf_valid = false;
3984                 sb_data_p = (u32 *)&sb_data_e1x;
3985                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3986         }
3987         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3988
3989         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3990                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3991                         CSTORM_STATUS_BLOCK_SIZE);
3992         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3993                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3994                         CSTORM_SYNC_BLOCK_SIZE);
3995 }
3996
3997 /* helper:  writes SP SB data to FW */
3998 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3999                 struct hc_sp_status_block_data *sp_sb_data)
4000 {
4001         int func = BP_FUNC(bp);
4002         int i;
4003         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4004                 REG_WR(bp, BAR_CSTRORM_INTMEM +
4005                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4006                         i*sizeof(u32),
4007                         *((u32 *)sp_sb_data + i));
4008 }
4009
4010 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4011 {
4012         int func = BP_FUNC(bp);
4013         struct hc_sp_status_block_data sp_sb_data;
4014         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4015
4016         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4017         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4018         sp_sb_data.p_func.vf_valid = false;
4019
4020         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4021
4022         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4023                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4024                         CSTORM_SP_STATUS_BLOCK_SIZE);
4025         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4026                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4027                         CSTORM_SP_SYNC_BLOCK_SIZE);
4028
4029 }
4030
4031
4032 static inline
4033 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4034                                            int igu_sb_id, int igu_seg_id)
4035 {
4036         hc_sm->igu_sb_id = igu_sb_id;
4037         hc_sm->igu_seg_id = igu_seg_id;
4038         hc_sm->timer_value = 0xFF;
4039         hc_sm->time_to_expire = 0xFFFFFFFF;
4040 }
4041
4042 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4043                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
4044 {
4045         int igu_seg_id;
4046
4047         struct hc_status_block_data_e2 sb_data_e2;
4048         struct hc_status_block_data_e1x sb_data_e1x;
4049         struct hc_status_block_sm  *hc_sm_p;
4050         struct hc_index_data *hc_index_p;
4051         int data_size;
4052         u32 *sb_data_p;
4053
4054         if (CHIP_INT_MODE_IS_BC(bp))
4055                 igu_seg_id = HC_SEG_ACCESS_NORM;
4056         else
4057                 igu_seg_id = IGU_SEG_ACCESS_NORM;
4058
4059         bnx2x_zero_fp_sb(bp, fw_sb_id);
4060
4061         if (CHIP_IS_E2(bp)) {
4062                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4063                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4064                 sb_data_e2.common.p_func.vf_id = vfid;
4065                 sb_data_e2.common.p_func.vf_valid = vf_valid;
4066                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4067                 sb_data_e2.common.same_igu_sb_1b = true;
4068                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4069                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4070                 hc_sm_p = sb_data_e2.common.state_machine;
4071                 hc_index_p = sb_data_e2.index_data;
4072                 sb_data_p = (u32 *)&sb_data_e2;
4073                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4074         } else {
4075                 memset(&sb_data_e1x, 0,
4076                        sizeof(struct hc_status_block_data_e1x));
4077                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4078                 sb_data_e1x.common.p_func.vf_id = 0xff;
4079                 sb_data_e1x.common.p_func.vf_valid = false;
4080                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4081                 sb_data_e1x.common.same_igu_sb_1b = true;
4082                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4083                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4084                 hc_sm_p = sb_data_e1x.common.state_machine;
4085                 hc_index_p = sb_data_e1x.index_data;
4086                 sb_data_p = (u32 *)&sb_data_e1x;
4087                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4088         }
4089
4090         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4091                                        igu_sb_id, igu_seg_id);
4092         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4093                                        igu_sb_id, igu_seg_id);
4094
4095         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4096
4097         /* write indecies to HW */
4098         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4099 }
4100
4101 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4102                                         u8 sb_index, u8 disable, u16 usec)
4103 {
4104         int port = BP_PORT(bp);
4105         u8 ticks = usec / BNX2X_BTR;
4106
4107         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4108
4109         disable = disable ? 1 : (usec ? 0 : 1);
4110         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4111 }
4112
4113 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4114                                      u16 tx_usec, u16 rx_usec)
4115 {
4116         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4117                                     false, rx_usec);
4118         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4119                                     false, tx_usec);
4120 }
4121
4122 static void bnx2x_init_def_sb(struct bnx2x *bp)
4123 {
4124         struct host_sp_status_block *def_sb = bp->def_status_blk;
4125         dma_addr_t mapping = bp->def_status_blk_mapping;
4126         int igu_sp_sb_index;
4127         int igu_seg_id;
4128         int port = BP_PORT(bp);
4129         int func = BP_FUNC(bp);
4130         int reg_offset;
4131         u64 section;
4132         int index;
4133         struct hc_sp_status_block_data sp_sb_data;
4134         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4135
4136         if (CHIP_INT_MODE_IS_BC(bp)) {
4137                 igu_sp_sb_index = DEF_SB_IGU_ID;
4138                 igu_seg_id = HC_SEG_ACCESS_DEF;
4139         } else {
4140                 igu_sp_sb_index = bp->igu_dsb_id;
4141                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4142         }
4143
4144         /* ATTN */
4145         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4146                                             atten_status_block);
4147         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4148
4149         bp->attn_state = 0;
4150
4151         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4152                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4153         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4154                 int sindex;
4155                 /* take care of sig[0]..sig[4] */
4156                 for (sindex = 0; sindex < 4; sindex++)
4157                         bp->attn_group[index].sig[sindex] =
4158                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4159
4160                 if (CHIP_IS_E2(bp))
4161                         /*
4162                          * enable5 is separate from the rest of the registers,
4163                          * and therefore the address skip is 4
4164                          * and not 16 between the different groups
4165                          */
4166                         bp->attn_group[index].sig[4] = REG_RD(bp,
4167                                         reg_offset + 0x10 + 0x4*index);
4168                 else
4169                         bp->attn_group[index].sig[4] = 0;
4170         }
4171
4172         if (bp->common.int_block == INT_BLOCK_HC) {
4173                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4174                                      HC_REG_ATTN_MSG0_ADDR_L);
4175
4176                 REG_WR(bp, reg_offset, U64_LO(section));
4177                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4178         } else if (CHIP_IS_E2(bp)) {
4179                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4180                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4181         }
4182
4183         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4184                                             sp_sb);
4185
4186         bnx2x_zero_sp_sb(bp);
4187
4188         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4189         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4190         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4191         sp_sb_data.igu_seg_id           = igu_seg_id;
4192         sp_sb_data.p_func.pf_id         = func;
4193         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4194         sp_sb_data.p_func.vf_id         = 0xff;
4195
4196         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4197
4198         bp->stats_pending = 0;
4199         bp->set_mac_pending = 0;
4200
4201         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4202 }
4203
4204 void bnx2x_update_coalesce(struct bnx2x *bp)
4205 {
4206         int i;
4207
4208         for_each_eth_queue(bp, i)
4209                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4210                                          bp->rx_ticks, bp->tx_ticks);
4211 }
4212
4213 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4214 {
4215         spin_lock_init(&bp->spq_lock);
4216         atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4217
4218         bp->spq_prod_idx = 0;
4219         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4220         bp->spq_prod_bd = bp->spq;
4221         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4222 }
4223
4224 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4225 {
4226         int i;
4227         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4228                 union event_ring_elem *elem =
4229                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4230
4231                 elem->next_page.addr.hi =
4232                         cpu_to_le32(U64_HI(bp->eq_mapping +
4233                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4234                 elem->next_page.addr.lo =
4235                         cpu_to_le32(U64_LO(bp->eq_mapping +
4236                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4237         }
4238         bp->eq_cons = 0;
4239         bp->eq_prod = NUM_EQ_DESC;
4240         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4241 }
4242
4243 static void bnx2x_init_ind_table(struct bnx2x *bp)
4244 {
4245         int func = BP_FUNC(bp);
4246         int i;
4247
4248         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4249                 return;
4250
4251         DP(NETIF_MSG_IFUP,
4252            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4253         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4254                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4255                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4256                         bp->fp->cl_id + (i % (bp->num_queues -
4257                                 NONE_ETH_CONTEXT_USE)));
4258 }
4259
4260 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4261 {
4262         int mode = bp->rx_mode;
4263         int port = BP_PORT(bp);
4264         u16 cl_id;
4265         u32 def_q_filters = 0;
4266
4267         /* All but management unicast packets should pass to the host as well */
4268         u32 llh_mask =
4269                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4270                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4271                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4272                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4273
4274         switch (mode) {
4275         case BNX2X_RX_MODE_NONE: /* no Rx */
4276                 def_q_filters = BNX2X_ACCEPT_NONE;
4277 #ifdef BCM_CNIC
4278                 if (!NO_FCOE(bp)) {
4279                         cl_id = bnx2x_fcoe(bp, cl_id);
4280                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4281                 }
4282 #endif
4283                 break;
4284
4285         case BNX2X_RX_MODE_NORMAL:
4286                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4287                                 BNX2X_ACCEPT_MULTICAST;
4288 #ifdef BCM_CNIC
4289                 if (!NO_FCOE(bp)) {
4290                         cl_id = bnx2x_fcoe(bp, cl_id);
4291                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4292                                                   BNX2X_ACCEPT_UNICAST |
4293                                                   BNX2X_ACCEPT_MULTICAST);
4294                 }
4295 #endif
4296                 break;
4297
4298         case BNX2X_RX_MODE_ALLMULTI:
4299                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4300                                 BNX2X_ACCEPT_ALL_MULTICAST;
4301 #ifdef BCM_CNIC
4302                 /*
4303                  *  Prevent duplication of multicast packets by configuring FCoE
4304                  *  L2 Client to receive only matched unicast frames.
4305                  */
4306                 if (!NO_FCOE(bp)) {
4307                         cl_id = bnx2x_fcoe(bp, cl_id);
4308                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4309                                                   BNX2X_ACCEPT_UNICAST);
4310                 }
4311 #endif
4312                 break;
4313
4314         case BNX2X_RX_MODE_PROMISC:
4315                 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4316 #ifdef BCM_CNIC
4317                 /*
4318                  *  Prevent packets duplication by configuring DROP_ALL for FCoE
4319                  *  L2 Client.
4320                  */
4321                 if (!NO_FCOE(bp)) {
4322                         cl_id = bnx2x_fcoe(bp, cl_id);
4323                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4324                 }
4325 #endif
4326                 /* pass management unicast packets as well */
4327                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4328                 break;
4329
4330         default:
4331                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4332                 break;
4333         }
4334
4335         cl_id = BP_L_ID(bp);
4336         bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4337
4338         REG_WR(bp,
4339                (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4340                        NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4341
4342         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4343                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4344                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4345                 "unmatched_ucast 0x%x\n", mode,
4346                 bp->mac_filters.ucast_drop_all,
4347                 bp->mac_filters.mcast_drop_all,
4348                 bp->mac_filters.bcast_drop_all,
4349                 bp->mac_filters.ucast_accept_all,
4350                 bp->mac_filters.mcast_accept_all,
4351                 bp->mac_filters.bcast_accept_all,
4352                 bp->mac_filters.unmatched_unicast
4353         );
4354
4355         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4356 }
4357
4358 static void bnx2x_init_internal_common(struct bnx2x *bp)
4359 {
4360         int i;
4361
4362         if (!CHIP_IS_E1(bp)) {
4363
4364                 /* xstorm needs to know whether to add  ovlan to packets or not,
4365                  * in switch-independent we'll write 0 to here... */
4366                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4367                         bp->mf_mode);
4368                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4369                         bp->mf_mode);
4370                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4371                         bp->mf_mode);
4372                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4373                         bp->mf_mode);
4374         }
4375
4376         if (IS_MF_SI(bp))
4377                 /*
4378                  * In switch independent mode, the TSTORM needs to accept
4379                  * packets that failed classification, since approximate match
4380                  * mac addresses aren't written to NIG LLH
4381                  */
4382                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4383                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4384
4385         /* Zero this manually as its initialization is
4386            currently missing in the initTool */
4387         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4388                 REG_WR(bp, BAR_USTRORM_INTMEM +
4389                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4390         if (CHIP_IS_E2(bp)) {
4391                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4392                         CHIP_INT_MODE_IS_BC(bp) ?
4393                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4394         }
4395 }
4396
4397 static void bnx2x_init_internal_port(struct bnx2x *bp)
4398 {
4399         /* port */
4400         bnx2x_dcb_init_intmem_pfc(bp);
4401 }
4402
4403 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4404 {
4405         switch (load_code) {
4406         case FW_MSG_CODE_DRV_LOAD_COMMON:
4407         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4408                 bnx2x_init_internal_common(bp);
4409                 /* no break */
4410
4411         case FW_MSG_CODE_DRV_LOAD_PORT:
4412                 bnx2x_init_internal_port(bp);
4413                 /* no break */
4414
4415         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4416                 /* internal memory per function is
4417                    initialized inside bnx2x_pf_init */
4418                 break;
4419
4420         default:
4421                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4422                 break;
4423         }
4424 }
4425
4426 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4427 {
4428         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4429
4430         fp->state = BNX2X_FP_STATE_CLOSED;
4431
4432         fp->index = fp->cid = fp_idx;
4433         fp->cl_id = BP_L_ID(bp) + fp_idx;
4434         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4435         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4436         /* qZone id equals to FW (per path) client id */
4437         fp->cl_qzone_id  = fp->cl_id +
4438                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4439                                 ETH_MAX_RX_CLIENTS_E1H);
4440         /* init shortcut */
4441         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4442                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4443                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4444         /* Setup SB indicies */
4445         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4446         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4447
4448         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4449                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4450                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4451                    fp->igu_sb_id);
4452         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4453                       fp->fw_sb_id, fp->igu_sb_id);
4454
4455         bnx2x_update_fpsb_idx(fp);
4456 }
4457
4458 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4459 {
4460         int i;
4461
4462         for_each_eth_queue(bp, i)
4463                 bnx2x_init_fp_sb(bp, i);
4464 #ifdef BCM_CNIC
4465         if (!NO_FCOE(bp))
4466                 bnx2x_init_fcoe_fp(bp);
4467
4468         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4469                       BNX2X_VF_ID_INVALID, false,
4470                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4471
4472 #endif
4473
4474         /* ensure status block indices were read */
4475         rmb();
4476
4477         bnx2x_init_def_sb(bp);
4478         bnx2x_update_dsb_idx(bp);
4479         bnx2x_init_rx_rings(bp);
4480         bnx2x_init_tx_rings(bp);
4481         bnx2x_init_sp_ring(bp);
4482         bnx2x_init_eq_ring(bp);
4483         bnx2x_init_internal(bp, load_code);
4484         bnx2x_pf_init(bp);
4485         bnx2x_init_ind_table(bp);
4486         bnx2x_stats_init(bp);
4487
4488         /* At this point, we are ready for interrupts */
4489         atomic_set(&bp->intr_sem, 0);
4490
4491         /* flush all before enabling interrupts */
4492         mb();
4493         mmiowb();
4494
4495         bnx2x_int_enable(bp);
4496
4497         /* Check for SPIO5 */
4498         bnx2x_attn_int_deasserted0(bp,
4499                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4500                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4501 }
4502
4503 /* end of nic init */
4504
4505 /*
4506  * gzip service functions
4507  */
4508
4509 static int bnx2x_gunzip_init(struct bnx2x *bp)
4510 {
4511         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4512                                             &bp->gunzip_mapping, GFP_KERNEL);
4513         if (bp->gunzip_buf  == NULL)
4514                 goto gunzip_nomem1;
4515
4516         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4517         if (bp->strm  == NULL)
4518                 goto gunzip_nomem2;
4519
4520         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4521                                       GFP_KERNEL);
4522         if (bp->strm->workspace == NULL)
4523                 goto gunzip_nomem3;
4524
4525         return 0;
4526
4527 gunzip_nomem3:
4528         kfree(bp->strm);
4529         bp->strm = NULL;
4530
4531 gunzip_nomem2:
4532         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4533                           bp->gunzip_mapping);
4534         bp->gunzip_buf = NULL;
4535
4536 gunzip_nomem1:
4537         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4538                " un-compression\n");
4539         return -ENOMEM;
4540 }
4541
4542 static void bnx2x_gunzip_end(struct bnx2x *bp)
4543 {
4544         kfree(bp->strm->workspace);
4545         kfree(bp->strm);
4546         bp->strm = NULL;
4547
4548         if (bp->gunzip_buf) {
4549                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4550                                   bp->gunzip_mapping);
4551                 bp->gunzip_buf = NULL;
4552         }
4553 }
4554
4555 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4556 {
4557         int n, rc;
4558
4559         /* check gzip header */
4560         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4561                 BNX2X_ERR("Bad gzip header\n");
4562                 return -EINVAL;
4563         }
4564
4565         n = 10;
4566
4567 #define FNAME                           0x8
4568
4569         if (zbuf[3] & FNAME)
4570                 while ((zbuf[n++] != 0) && (n < len));
4571
4572         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4573         bp->strm->avail_in = len - n;
4574         bp->strm->next_out = bp->gunzip_buf;
4575         bp->strm->avail_out = FW_BUF_SIZE;
4576
4577         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4578         if (rc != Z_OK)
4579                 return rc;
4580
4581         rc = zlib_inflate(bp->strm, Z_FINISH);
4582         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4583                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4584                            bp->strm->msg);
4585
4586         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4587         if (bp->gunzip_outlen & 0x3)
4588                 netdev_err(bp->dev, "Firmware decompression error:"
4589                                     " gunzip_outlen (%d) not aligned\n",
4590                                 bp->gunzip_outlen);
4591         bp->gunzip_outlen >>= 2;
4592
4593         zlib_inflateEnd(bp->strm);
4594
4595         if (rc == Z_STREAM_END)
4596                 return 0;
4597
4598         return rc;
4599 }
4600
4601 /* nic load/unload */
4602
4603 /*
4604  * General service functions
4605  */
4606
4607 /* send a NIG loopback debug packet */
4608 static void bnx2x_lb_pckt(struct bnx2x *bp)
4609 {
4610         u32 wb_write[3];
4611
4612         /* Ethernet source and destination addresses */
4613         wb_write[0] = 0x55555555;
4614         wb_write[1] = 0x55555555;
4615         wb_write[2] = 0x20;             /* SOP */
4616         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4617
4618         /* NON-IP protocol */
4619         wb_write[0] = 0x09000000;
4620         wb_write[1] = 0x55555555;
4621         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4622         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4623 }
4624
4625 /* some of the internal memories
4626  * are not directly readable from the driver
4627  * to test them we send debug packets
4628  */
4629 static int bnx2x_int_mem_test(struct bnx2x *bp)
4630 {
4631         int factor;
4632         int count, i;
4633         u32 val = 0;
4634
4635         if (CHIP_REV_IS_FPGA(bp))
4636                 factor = 120;
4637         else if (CHIP_REV_IS_EMUL(bp))
4638                 factor = 200;
4639         else
4640                 factor = 1;
4641
4642         /* Disable inputs of parser neighbor blocks */
4643         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4644         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4645         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4646         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4647
4648         /*  Write 0 to parser credits for CFC search request */
4649         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4650
4651         /* send Ethernet packet */
4652         bnx2x_lb_pckt(bp);
4653
4654         /* TODO do i reset NIG statistic? */
4655         /* Wait until NIG register shows 1 packet of size 0x10 */
4656         count = 1000 * factor;
4657         while (count) {
4658
4659                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4660                 val = *bnx2x_sp(bp, wb_data[0]);
4661                 if (val == 0x10)
4662                         break;
4663
4664                 msleep(10);
4665                 count--;
4666         }
4667         if (val != 0x10) {
4668                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4669                 return -1;
4670         }
4671
4672         /* Wait until PRS register shows 1 packet */
4673         count = 1000 * factor;
4674         while (count) {
4675                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4676                 if (val == 1)
4677                         break;
4678
4679                 msleep(10);
4680                 count--;
4681         }
4682         if (val != 0x1) {
4683                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4684                 return -2;
4685         }
4686
4687         /* Reset and init BRB, PRS */
4688         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4689         msleep(50);
4690         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4691         msleep(50);
4692         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4693         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4694
4695         DP(NETIF_MSG_HW, "part2\n");
4696
4697         /* Disable inputs of parser neighbor blocks */
4698         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4699         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4700         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4701         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4702
4703         /* Write 0 to parser credits for CFC search request */
4704         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4705
4706         /* send 10 Ethernet packets */
4707         for (i = 0; i < 10; i++)
4708                 bnx2x_lb_pckt(bp);
4709
4710         /* Wait until NIG register shows 10 + 1
4711            packets of size 11*0x10 = 0xb0 */
4712         count = 1000 * factor;
4713         while (count) {
4714
4715                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4716                 val = *bnx2x_sp(bp, wb_data[0]);
4717                 if (val == 0xb0)
4718                         break;
4719
4720                 msleep(10);
4721                 count--;
4722         }
4723         if (val != 0xb0) {
4724                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4725                 return -3;
4726         }
4727
4728         /* Wait until PRS register shows 2 packets */
4729         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4730         if (val != 2)
4731                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4732
4733         /* Write 1 to parser credits for CFC search request */
4734         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4735
4736         /* Wait until PRS register shows 3 packets */
4737         msleep(10 * factor);
4738         /* Wait until NIG register shows 1 packet of size 0x10 */
4739         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4740         if (val != 3)
4741                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4742
4743         /* clear NIG EOP FIFO */
4744         for (i = 0; i < 11; i++)
4745                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4746         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4747         if (val != 1) {
4748                 BNX2X_ERR("clear of NIG failed\n");
4749                 return -4;
4750         }
4751
4752         /* Reset and init BRB, PRS, NIG */
4753         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4754         msleep(50);
4755         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4756         msleep(50);
4757         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4758         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4759 #ifndef BCM_CNIC
4760         /* set NIC mode */
4761         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4762 #endif
4763
4764         /* Enable inputs of parser neighbor blocks */
4765         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4766         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4767         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4768         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4769
4770         DP(NETIF_MSG_HW, "done\n");
4771
4772         return 0; /* OK */
4773 }
4774
4775 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4776 {
4777         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4778         if (CHIP_IS_E2(bp))
4779                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4780         else
4781                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4782         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4783         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4784         /*
4785          * mask read length error interrupts in brb for parser
4786          * (parsing unit and 'checksum and crc' unit)
4787          * these errors are legal (PU reads fixed length and CAC can cause
4788          * read length error on truncated packets)
4789          */
4790         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4791         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4792         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4793         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4794         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4795         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4796 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4797 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4798         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4799         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4800         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4801 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4802 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4803         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4804         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4805         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4806         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4807 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4808 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4809
4810         if (CHIP_REV_IS_FPGA(bp))
4811                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4812         else if (CHIP_IS_E2(bp))
4813                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4814                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4815                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4816                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4817                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4818                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4819         else
4820                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4821         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4822         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4823         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4824 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4825 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4826         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4827         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4828 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4829         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
4830 }
4831
4832 static void bnx2x_reset_common(struct bnx2x *bp)
4833 {
4834         /* reset_common */
4835         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4836                0xd3ffff7f);
4837         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4838 }
4839
4840 static void bnx2x_init_pxp(struct bnx2x *bp)
4841 {
4842         u16 devctl;
4843         int r_order, w_order;
4844
4845         pci_read_config_word(bp->pdev,
4846                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4847         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4848         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4849         if (bp->mrrs == -1)
4850                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4851         else {
4852                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4853                 r_order = bp->mrrs;
4854         }
4855
4856         bnx2x_init_pxp_arb(bp, r_order, w_order);
4857 }
4858
4859 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4860 {
4861         int is_required;
4862         u32 val;
4863         int port;
4864
4865         if (BP_NOMCP(bp))
4866                 return;
4867
4868         is_required = 0;
4869         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4870               SHARED_HW_CFG_FAN_FAILURE_MASK;
4871
4872         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4873                 is_required = 1;
4874
4875         /*
4876          * The fan failure mechanism is usually related to the PHY type since
4877          * the power consumption of the board is affected by the PHY. Currently,
4878          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4879          */
4880         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4881                 for (port = PORT_0; port < PORT_MAX; port++) {
4882                         is_required |=
4883                                 bnx2x_fan_failure_det_req(
4884                                         bp,
4885                                         bp->common.shmem_base,
4886                                         bp->common.shmem2_base,
4887                                         port);
4888                 }
4889
4890         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4891
4892         if (is_required == 0)
4893                 return;
4894
4895         /* Fan failure is indicated by SPIO 5 */
4896         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4897                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4898
4899         /* set to active low mode */
4900         val = REG_RD(bp, MISC_REG_SPIO_INT);
4901         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4902                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4903         REG_WR(bp, MISC_REG_SPIO_INT, val);
4904
4905         /* enable interrupt to signal the IGU */
4906         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4907         val |= (1 << MISC_REGISTERS_SPIO_5);
4908         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4909 }
4910
4911 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4912 {
4913         u32 offset = 0;
4914
4915         if (CHIP_IS_E1(bp))
4916                 return;
4917         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4918                 return;
4919
4920         switch (BP_ABS_FUNC(bp)) {
4921         case 0:
4922                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4923                 break;
4924         case 1:
4925                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4926                 break;
4927         case 2:
4928                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4929                 break;
4930         case 3:
4931                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4932                 break;
4933         case 4:
4934                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4935                 break;
4936         case 5:
4937                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4938                 break;
4939         case 6:
4940                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4941                 break;
4942         case 7:
4943                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4944                 break;
4945         default:
4946                 return;
4947         }
4948
4949         REG_WR(bp, offset, pretend_func_num);
4950         REG_RD(bp, offset);
4951         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4952 }
4953
4954 static void bnx2x_pf_disable(struct bnx2x *bp)
4955 {
4956         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4957         val &= ~IGU_PF_CONF_FUNC_EN;
4958
4959         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4960         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4961         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4962 }
4963
4964 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4965 {
4966         u32 val, i;
4967
4968         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4969
4970         bnx2x_reset_common(bp);
4971         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4972         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4973
4974         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4975         if (!CHIP_IS_E1(bp))
4976                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4977
4978         if (CHIP_IS_E2(bp)) {
4979                 u8 fid;
4980
4981                 /**
4982                  * 4-port mode or 2-port mode we need to turn of master-enable
4983                  * for everyone, after that, turn it back on for self.
4984                  * so, we disregard multi-function or not, and always disable
4985                  * for all functions on the given path, this means 0,2,4,6 for
4986                  * path 0 and 1,3,5,7 for path 1
4987                  */
4988                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
4989                         if (fid == BP_ABS_FUNC(bp)) {
4990                                 REG_WR(bp,
4991                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4992                                     1);
4993                                 continue;
4994                         }
4995
4996                         bnx2x_pretend_func(bp, fid);
4997                         /* clear pf enable */
4998                         bnx2x_pf_disable(bp);
4999                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5000                 }
5001         }
5002
5003         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5004         if (CHIP_IS_E1(bp)) {
5005                 /* enable HW interrupt from PXP on USDM overflow
5006                    bit 16 on INT_MASK_0 */
5007                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5008         }
5009
5010         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5011         bnx2x_init_pxp(bp);
5012
5013 #ifdef __BIG_ENDIAN
5014         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5015         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5016         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5017         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5018         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5019         /* make sure this value is 0 */
5020         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5021
5022 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5023         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5024         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5025         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5026         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5027 #endif
5028
5029         bnx2x_ilt_init_page_size(bp, INITOP_SET);
5030
5031         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5032                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5033
5034         /* let the HW do it's magic ... */
5035         msleep(100);
5036         /* finish PXP init */
5037         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5038         if (val != 1) {
5039                 BNX2X_ERR("PXP2 CFG failed\n");
5040                 return -EBUSY;
5041         }
5042         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5043         if (val != 1) {
5044                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5045                 return -EBUSY;
5046         }
5047
5048         /* Timers bug workaround E2 only. We need to set the entire ILT to
5049          * have entries with value "0" and valid bit on.
5050          * This needs to be done by the first PF that is loaded in a path
5051          * (i.e. common phase)
5052          */
5053         if (CHIP_IS_E2(bp)) {
5054                 struct ilt_client_info ilt_cli;
5055                 struct bnx2x_ilt ilt;
5056                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5057                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5058
5059                 /* initialize dummy TM client */
5060                 ilt_cli.start = 0;
5061                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5062                 ilt_cli.client_num = ILT_CLIENT_TM;
5063
5064                 /* Step 1: set zeroes to all ilt page entries with valid bit on
5065                  * Step 2: set the timers first/last ilt entry to point
5066                  * to the entire range to prevent ILT range error for 3rd/4th
5067                  * vnic (this code assumes existance of the vnic)
5068                  *
5069                  * both steps performed by call to bnx2x_ilt_client_init_op()
5070                  * with dummy TM client
5071                  *
5072                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5073                  * and his brother are split registers
5074                  */
5075                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5076                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5077                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5078
5079                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5080                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5081                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5082         }
5083
5084
5085         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5086         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5087
5088         if (CHIP_IS_E2(bp)) {
5089                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5090                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5091                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5092
5093                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5094
5095                 /* let the HW do it's magic ... */
5096                 do {
5097                         msleep(200);
5098                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5099                 } while (factor-- && (val != 1));
5100
5101                 if (val != 1) {
5102                         BNX2X_ERR("ATC_INIT failed\n");
5103                         return -EBUSY;
5104                 }
5105         }
5106
5107         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5108
5109         /* clean the DMAE memory */
5110         bp->dmae_ready = 1;
5111         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5112
5113         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5114         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5115         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5116         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5117
5118         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5119         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5120         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5121         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5122
5123         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5124
5125         if (CHIP_MODE_IS_4_PORT(bp))
5126                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5127
5128         /* QM queues pointers table */
5129         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5130
5131         /* soft reset pulse */
5132         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5133         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5134
5135 #ifdef BCM_CNIC
5136         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5137 #endif
5138
5139         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5140         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5141
5142         if (!CHIP_REV_IS_SLOW(bp)) {
5143                 /* enable hw interrupt from doorbell Q */
5144                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5145         }
5146
5147         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5148         if (CHIP_MODE_IS_4_PORT(bp)) {
5149                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5150                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5151         }
5152
5153         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5154         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5155 #ifndef BCM_CNIC
5156         /* set NIC mode */
5157         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5158 #endif
5159         if (!CHIP_IS_E1(bp))
5160                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5161
5162         if (CHIP_IS_E2(bp)) {
5163                 /* Bit-map indicating which L2 hdrs may appear after the
5164                    basic Ethernet header */
5165                 int has_ovlan = IS_MF_SD(bp);
5166                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5167                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5168         }
5169
5170         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5171         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5172         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5173         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5174
5175         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5176         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5178         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5179
5180         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5181         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5182         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5183         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5184
5185         if (CHIP_MODE_IS_4_PORT(bp))
5186                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5187
5188         /* sync semi rtc */
5189         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5190                0x80000000);
5191         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5192                0x80000000);
5193
5194         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5195         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5196         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5197
5198         if (CHIP_IS_E2(bp)) {
5199                 int has_ovlan = IS_MF_SD(bp);
5200                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5201                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5202         }
5203
5204         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5205         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5206                 REG_WR(bp, i, random32());
5207
5208         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5209 #ifdef BCM_CNIC
5210         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5211         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5212         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5213         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5214         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5215         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5216         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5217         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5218         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5219         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5220 #endif
5221         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5222
5223         if (sizeof(union cdu_context) != 1024)
5224                 /* we currently assume that a context is 1024 bytes */
5225                 dev_alert(&bp->pdev->dev, "please adjust the size "
5226                                           "of cdu_context(%ld)\n",
5227                          (long)sizeof(union cdu_context));
5228
5229         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5230         val = (4 << 24) + (0 << 12) + 1024;
5231         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5232
5233         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5234         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5235         /* enable context validation interrupt from CFC */
5236         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5237
5238         /* set the thresholds to prevent CFC/CDU race */
5239         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5240
5241         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5242
5243         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5244                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5245
5246         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5247         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5248
5249         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5250         /* Reset PCIE errors for debug */
5251         REG_WR(bp, 0x2814, 0xffffffff);
5252         REG_WR(bp, 0x3820, 0xffffffff);
5253
5254         if (CHIP_IS_E2(bp)) {
5255                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5256                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5257                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5258                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5259                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5260                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5261                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5262                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5263                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5264                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5265                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5266         }
5267
5268         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5269         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5270         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5271         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5272
5273         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5274         if (!CHIP_IS_E1(bp)) {
5275                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5276                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5277         }
5278         if (CHIP_IS_E2(bp)) {
5279                 /* Bit-map indicating which L2 hdrs may appear after the
5280                    basic Ethernet header */
5281                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5282         }
5283
5284         if (CHIP_REV_IS_SLOW(bp))
5285                 msleep(200);
5286
5287         /* finish CFC init */
5288         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5289         if (val != 1) {
5290                 BNX2X_ERR("CFC LL_INIT failed\n");
5291                 return -EBUSY;
5292         }
5293         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5294         if (val != 1) {
5295                 BNX2X_ERR("CFC AC_INIT failed\n");
5296                 return -EBUSY;
5297         }
5298         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5299         if (val != 1) {
5300                 BNX2X_ERR("CFC CAM_INIT failed\n");
5301                 return -EBUSY;
5302         }
5303         REG_WR(bp, CFC_REG_DEBUG0, 0);
5304
5305         if (CHIP_IS_E1(bp)) {
5306                 /* read NIG statistic
5307                    to see if this is our first up since powerup */
5308                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5309                 val = *bnx2x_sp(bp, wb_data[0]);
5310
5311                 /* do internal memory self test */
5312                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5313                         BNX2X_ERR("internal mem self test failed\n");
5314                         return -EBUSY;
5315                 }
5316         }
5317
5318         bnx2x_setup_fan_failure_detection(bp);
5319
5320         /* clear PXP2 attentions */
5321         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5322
5323         bnx2x_enable_blocks_attention(bp);
5324         if (CHIP_PARITY_ENABLED(bp))
5325                 bnx2x_enable_blocks_parity(bp);
5326
5327         if (!BP_NOMCP(bp)) {
5328                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5329                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5330                     CHIP_IS_E1x(bp)) {
5331                         u32 shmem_base[2], shmem2_base[2];
5332                         shmem_base[0] =  bp->common.shmem_base;
5333                         shmem2_base[0] = bp->common.shmem2_base;
5334                         if (CHIP_IS_E2(bp)) {
5335                                 shmem_base[1] =
5336                                         SHMEM2_RD(bp, other_shmem_base_addr);
5337                                 shmem2_base[1] =
5338                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5339                         }
5340                         bnx2x_acquire_phy_lock(bp);
5341                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5342                                               bp->common.chip_id);
5343                         bnx2x_release_phy_lock(bp);
5344                 }
5345         } else
5346                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5347
5348         return 0;
5349 }
5350
5351 static int bnx2x_init_hw_port(struct bnx2x *bp)
5352 {
5353         int port = BP_PORT(bp);
5354         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5355         u32 low, high;
5356         u32 val;
5357
5358         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5359
5360         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5361
5362         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5363         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5364
5365         /* Timers bug workaround: disables the pf_master bit in pglue at
5366          * common phase, we need to enable it here before any dmae access are
5367          * attempted. Therefore we manually added the enable-master to the
5368          * port phase (it also happens in the function phase)
5369          */
5370         if (CHIP_IS_E2(bp))
5371                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5372
5373         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5374         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5375         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5376         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5377
5378         /* QM cid (connection) count */
5379         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5380
5381 #ifdef BCM_CNIC
5382         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5383         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5384         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5385 #endif
5386
5387         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5388
5389         if (CHIP_MODE_IS_4_PORT(bp))
5390                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5391
5392         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5393                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5394                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5395                         /* no pause for emulation and FPGA */
5396                         low = 0;
5397                         high = 513;
5398                 } else {
5399                         if (IS_MF(bp))
5400                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5401                         else if (bp->dev->mtu > 4096) {
5402                                 if (bp->flags & ONE_PORT_FLAG)
5403                                         low = 160;
5404                                 else {
5405                                         val = bp->dev->mtu;
5406                                         /* (24*1024 + val*4)/256 */
5407                                         low = 96 + (val/64) +
5408                                                         ((val % 64) ? 1 : 0);
5409                                 }
5410                         } else
5411                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5412                         high = low + 56;        /* 14*1024/256 */
5413                 }
5414                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5415                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5416         }
5417
5418         if (CHIP_MODE_IS_4_PORT(bp)) {
5419                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5420                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5421                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5422                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5423         }
5424
5425         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5426
5427         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5428         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5429         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5430         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5431
5432         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5433         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5434         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5435         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5436         if (CHIP_MODE_IS_4_PORT(bp))
5437                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5438
5439         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5440         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5441
5442         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5443
5444         if (!CHIP_IS_E2(bp)) {
5445                 /* configure PBF to work without PAUSE mtu 9000 */
5446                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5447
5448                 /* update threshold */
5449                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5450                 /* update init credit */
5451                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5452
5453                 /* probe changes */
5454                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5455                 udelay(50);
5456                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5457         }
5458
5459 #ifdef BCM_CNIC
5460         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5461 #endif
5462         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5463         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5464
5465         if (CHIP_IS_E1(bp)) {
5466                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5467                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5468         }
5469         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5470
5471         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5472
5473         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5474         /* init aeu_mask_attn_func_0/1:
5475          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5476          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5477          *             bits 4-7 are used for "per vn group attention" */
5478         val = IS_MF(bp) ? 0xF7 : 0x7;
5479         /* Enable DCBX attention for all but E1 */
5480         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5481         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5482
5483         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5484         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5485         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5486         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5487         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5488
5489         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5490
5491         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5492
5493         if (!CHIP_IS_E1(bp)) {
5494                 /* 0x2 disable mf_ov, 0x1 enable */
5495                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5496                        (IS_MF_SD(bp) ? 0x1 : 0x2));
5497
5498                 if (CHIP_IS_E2(bp)) {
5499                         val = 0;
5500                         switch (bp->mf_mode) {
5501                         case MULTI_FUNCTION_SD:
5502                                 val = 1;
5503                                 break;
5504                         case MULTI_FUNCTION_SI:
5505                                 val = 2;
5506                                 break;
5507                         }
5508
5509                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5510                                                   NIG_REG_LLH0_CLS_TYPE), val);
5511                 }
5512                 {
5513                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5514                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5515                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5516                 }
5517         }
5518
5519         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5520         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5521         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5522                                       bp->common.shmem2_base, port)) {
5523                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5524                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5525                 val = REG_RD(bp, reg_addr);
5526                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527                 REG_WR(bp, reg_addr, val);
5528         }
5529         bnx2x__link_reset(bp);
5530
5531         return 0;
5532 }
5533
5534 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5535 {
5536         int reg;
5537
5538         if (CHIP_IS_E1(bp))
5539                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5540         else
5541                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5542
5543         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5544 }
5545
5546 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5547 {
5548         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5549 }
5550
5551 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5552 {
5553         u32 i, base = FUNC_ILT_BASE(func);
5554         for (i = base; i < base + ILT_PER_FUNC; i++)
5555                 bnx2x_ilt_wr(bp, i, 0);
5556 }
5557
5558 static int bnx2x_init_hw_func(struct bnx2x *bp)
5559 {
5560         int port = BP_PORT(bp);
5561         int func = BP_FUNC(bp);
5562         struct bnx2x_ilt *ilt = BP_ILT(bp);
5563         u16 cdu_ilt_start;
5564         u32 addr, val;
5565         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5566         int i, main_mem_width;
5567
5568         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5569
5570         /* set MSI reconfigure capability */
5571         if (bp->common.int_block == INT_BLOCK_HC) {
5572                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5573                 val = REG_RD(bp, addr);
5574                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5575                 REG_WR(bp, addr, val);
5576         }
5577
5578         ilt = BP_ILT(bp);
5579         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5580
5581         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5582                 ilt->lines[cdu_ilt_start + i].page =
5583                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5584                 ilt->lines[cdu_ilt_start + i].page_mapping =
5585                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5586                 /* cdu ilt pages are allocated manually so there's no need to
5587                 set the size */
5588         }
5589         bnx2x_ilt_init_op(bp, INITOP_SET);
5590
5591 #ifdef BCM_CNIC
5592         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5593
5594         /* T1 hash bits value determines the T1 number of entries */
5595         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5596 #endif
5597
5598 #ifndef BCM_CNIC
5599         /* set NIC mode */
5600         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5601 #endif  /* BCM_CNIC */
5602
5603         if (CHIP_IS_E2(bp)) {
5604                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5605
5606                 /* Turn on a single ISR mode in IGU if driver is going to use
5607                  * INT#x or MSI
5608                  */
5609                 if (!(bp->flags & USING_MSIX_FLAG))
5610                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5611                 /*
5612                  * Timers workaround bug: function init part.
5613                  * Need to wait 20msec after initializing ILT,
5614                  * needed to make sure there are no requests in
5615                  * one of the PXP internal queues with "old" ILT addresses
5616                  */
5617                 msleep(20);
5618                 /*
5619                  * Master enable - Due to WB DMAE writes performed before this
5620                  * register is re-initialized as part of the regular function
5621                  * init
5622                  */
5623                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5624                 /* Enable the function in IGU */
5625                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5626         }
5627
5628         bp->dmae_ready = 1;
5629
5630         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5631
5632         if (CHIP_IS_E2(bp))
5633                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5634
5635         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5636         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5637         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5638         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5639         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5640         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5641         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5642         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5643         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5644
5645         if (CHIP_IS_E2(bp)) {
5646                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5647                                                                 BP_PATH(bp));
5648                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5649                                                                 BP_PATH(bp));
5650         }
5651
5652         if (CHIP_MODE_IS_4_PORT(bp))
5653                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5654
5655         if (CHIP_IS_E2(bp))
5656                 REG_WR(bp, QM_REG_PF_EN, 1);
5657
5658         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5659
5660         if (CHIP_MODE_IS_4_PORT(bp))
5661                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5662
5663         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5664         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5665         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5666         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5667         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5668         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5669         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5670         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5671         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5672         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5673         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5674         if (CHIP_IS_E2(bp))
5675                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5676
5677         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5678
5679         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5680
5681         if (CHIP_IS_E2(bp))
5682                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5683
5684         if (IS_MF(bp)) {
5685                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5686                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5687         }
5688
5689         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5690
5691         /* HC init per function */
5692         if (bp->common.int_block == INT_BLOCK_HC) {
5693                 if (CHIP_IS_E1H(bp)) {
5694                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5695
5696                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5698                 }
5699                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5700
5701         } else {
5702                 int num_segs, sb_idx, prod_offset;
5703
5704                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5705
5706                 if (CHIP_IS_E2(bp)) {
5707                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5708                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5709                 }
5710
5711                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5712
5713                 if (CHIP_IS_E2(bp)) {
5714                         int dsb_idx = 0;
5715                         /**
5716                          * Producer memory:
5717                          * E2 mode: address 0-135 match to the mapping memory;
5718                          * 136 - PF0 default prod; 137 - PF1 default prod;
5719                          * 138 - PF2 default prod; 139 - PF3 default prod;
5720                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5721                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5722                          * 144-147 reserved.
5723                          *
5724                          * E1.5 mode - In backward compatible mode;
5725                          * for non default SB; each even line in the memory
5726                          * holds the U producer and each odd line hold
5727                          * the C producer. The first 128 producers are for
5728                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5729                          * producers are for the DSB for each PF.
5730                          * Each PF has five segments: (the order inside each
5731                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5732                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5733                          * 144-147 attn prods;
5734                          */
5735                         /* non-default-status-blocks */
5736                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5737                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5738                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5739                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5740                                         num_segs;
5741
5742                                 for (i = 0; i < num_segs; i++) {
5743                                         addr = IGU_REG_PROD_CONS_MEMORY +
5744                                                         (prod_offset + i) * 4;
5745                                         REG_WR(bp, addr, 0);
5746                                 }
5747                                 /* send consumer update with value 0 */
5748                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5749                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5750                                 bnx2x_igu_clear_sb(bp,
5751                                                    bp->igu_base_sb + sb_idx);
5752                         }
5753
5754                         /* default-status-blocks */
5755                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5756                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5757
5758                         if (CHIP_MODE_IS_4_PORT(bp))
5759                                 dsb_idx = BP_FUNC(bp);
5760                         else
5761                                 dsb_idx = BP_E1HVN(bp);
5762
5763                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5764                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5765                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5766
5767                         for (i = 0; i < (num_segs * E1HVN_MAX);
5768                              i += E1HVN_MAX) {
5769                                 addr = IGU_REG_PROD_CONS_MEMORY +
5770                                                         (prod_offset + i)*4;
5771                                 REG_WR(bp, addr, 0);
5772                         }
5773                         /* send consumer update with 0 */
5774                         if (CHIP_INT_MODE_IS_BC(bp)) {
5775                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5776                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5777                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5778                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5779                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5780                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5781                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5782                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5783                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5784                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5785                         } else {
5786                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5787                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5788                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5789                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5790                         }
5791                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5792
5793                         /* !!! these should become driver const once
5794                            rf-tool supports split-68 const */
5795                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5796                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5797                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5798                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5799                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5800                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5801                 }
5802         }
5803
5804         /* Reset PCIE errors for debug */
5805         REG_WR(bp, 0x2114, 0xffffffff);
5806         REG_WR(bp, 0x2120, 0xffffffff);
5807
5808         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5809         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5810         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5811         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5812         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5813         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5814
5815         if (CHIP_IS_E1x(bp)) {
5816                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5817                 main_mem_base = HC_REG_MAIN_MEMORY +
5818                                 BP_PORT(bp) * (main_mem_size * 4);
5819                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5820                 main_mem_width = 8;
5821
5822                 val = REG_RD(bp, main_mem_prty_clr);
5823                 if (val)
5824                         DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5825                                           "block during "
5826                                           "function init (0x%x)!\n", val);
5827
5828                 /* Clear "false" parity errors in MSI-X table */
5829                 for (i = main_mem_base;
5830                      i < main_mem_base + main_mem_size * 4;
5831                      i += main_mem_width) {
5832                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
5833                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5834                                          i, main_mem_width / 4);
5835                 }
5836                 /* Clear HC parity attention */
5837                 REG_RD(bp, main_mem_prty_clr);
5838         }
5839
5840         bnx2x_phy_probe(&bp->link_params);
5841
5842         return 0;
5843 }
5844
5845 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5846 {
5847         int rc = 0;
5848
5849         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5850            BP_ABS_FUNC(bp), load_code);
5851
5852         bp->dmae_ready = 0;
5853         mutex_init(&bp->dmae_mutex);
5854         rc = bnx2x_gunzip_init(bp);
5855         if (rc)
5856                 return rc;
5857
5858         switch (load_code) {
5859         case FW_MSG_CODE_DRV_LOAD_COMMON:
5860         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5861                 rc = bnx2x_init_hw_common(bp, load_code);
5862                 if (rc)
5863                         goto init_hw_err;
5864                 /* no break */
5865
5866         case FW_MSG_CODE_DRV_LOAD_PORT:
5867                 rc = bnx2x_init_hw_port(bp);
5868                 if (rc)
5869                         goto init_hw_err;
5870                 /* no break */
5871
5872         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5873                 rc = bnx2x_init_hw_func(bp);
5874                 if (rc)
5875                         goto init_hw_err;
5876                 break;
5877
5878         default:
5879                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5880                 break;
5881         }
5882
5883         if (!BP_NOMCP(bp)) {
5884                 int mb_idx = BP_FW_MB_IDX(bp);
5885
5886                 bp->fw_drv_pulse_wr_seq =
5887                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5888                                  DRV_PULSE_SEQ_MASK);
5889                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5890         }
5891
5892 init_hw_err:
5893         bnx2x_gunzip_end(bp);
5894
5895         return rc;
5896 }
5897
5898 void bnx2x_free_mem(struct bnx2x *bp)
5899 {
5900
5901 #define BNX2X_PCI_FREE(x, y, size) \
5902         do { \
5903                 if (x) { \
5904                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5905                         x = NULL; \
5906                         y = 0; \
5907                 } \
5908         } while (0)
5909
5910 #define BNX2X_FREE(x) \
5911         do { \
5912                 if (x) { \
5913                         kfree((void *)x); \
5914                         x = NULL; \
5915                 } \
5916         } while (0)
5917
5918         int i;
5919
5920         /* fastpath */
5921         /* Common */
5922         for_each_queue(bp, i) {
5923 #ifdef BCM_CNIC
5924                 /* FCoE client uses default status block */
5925                 if (IS_FCOE_IDX(i)) {
5926                         union host_hc_status_block *sb =
5927                                 &bnx2x_fp(bp, i, status_blk);
5928                         memset(sb, 0, sizeof(union host_hc_status_block));
5929                         bnx2x_fp(bp, i, status_blk_mapping) = 0;
5930                 } else {
5931 #endif
5932                 /* status blocks */
5933                 if (CHIP_IS_E2(bp))
5934                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5935                                        bnx2x_fp(bp, i, status_blk_mapping),
5936                                        sizeof(struct host_hc_status_block_e2));
5937                 else
5938                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5939                                        bnx2x_fp(bp, i, status_blk_mapping),
5940                                        sizeof(struct host_hc_status_block_e1x));
5941 #ifdef BCM_CNIC
5942                 }
5943 #endif
5944         }
5945         /* Rx */
5946         for_each_rx_queue(bp, i) {
5947
5948                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5949                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5950                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5951                                bnx2x_fp(bp, i, rx_desc_mapping),
5952                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5953
5954                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5955                                bnx2x_fp(bp, i, rx_comp_mapping),
5956                                sizeof(struct eth_fast_path_rx_cqe) *
5957                                NUM_RCQ_BD);
5958
5959                 /* SGE ring */
5960                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5961                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5962                                bnx2x_fp(bp, i, rx_sge_mapping),
5963                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5964         }
5965         /* Tx */
5966         for_each_tx_queue(bp, i) {
5967
5968                 /* fastpath tx rings: tx_buf tx_desc */
5969                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5970                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5971                                bnx2x_fp(bp, i, tx_desc_mapping),
5972                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5973         }
5974         /* end of fastpath */
5975
5976         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5977                        sizeof(struct host_sp_status_block));
5978
5979         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5980                        sizeof(struct bnx2x_slowpath));
5981
5982         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5983                        bp->context.size);
5984
5985         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5986
5987         BNX2X_FREE(bp->ilt->lines);
5988
5989 #ifdef BCM_CNIC
5990         if (CHIP_IS_E2(bp))
5991                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5992                                sizeof(struct host_hc_status_block_e2));
5993         else
5994                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5995                                sizeof(struct host_hc_status_block_e1x));
5996
5997         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5998 #endif
5999
6000         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6001
6002         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6003                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
6004
6005 #undef BNX2X_PCI_FREE
6006 #undef BNX2X_KFREE
6007 }
6008
6009 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6010 {
6011         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6012         if (CHIP_IS_E2(bp)) {
6013                 bnx2x_fp(bp, index, sb_index_values) =
6014                         (__le16 *)status_blk.e2_sb->sb.index_values;
6015                 bnx2x_fp(bp, index, sb_running_index) =
6016                         (__le16 *)status_blk.e2_sb->sb.running_index;
6017         } else {
6018                 bnx2x_fp(bp, index, sb_index_values) =
6019                         (__le16 *)status_blk.e1x_sb->sb.index_values;
6020                 bnx2x_fp(bp, index, sb_running_index) =
6021                         (__le16 *)status_blk.e1x_sb->sb.running_index;
6022         }
6023 }
6024
6025 int bnx2x_alloc_mem(struct bnx2x *bp)
6026 {
6027 #define BNX2X_PCI_ALLOC(x, y, size) \
6028         do { \
6029                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6030                 if (x == NULL) \
6031                         goto alloc_mem_err; \
6032                 memset(x, 0, size); \
6033         } while (0)
6034
6035 #define BNX2X_ALLOC(x, size) \
6036         do { \
6037                 x = kzalloc(size, GFP_KERNEL); \
6038                 if (x == NULL) \
6039                         goto alloc_mem_err; \
6040         } while (0)
6041
6042         int i;
6043
6044         /* fastpath */
6045         /* Common */
6046         for_each_queue(bp, i) {
6047                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6048                 bnx2x_fp(bp, i, bp) = bp;
6049                 /* status blocks */
6050 #ifdef BCM_CNIC
6051                 if (!IS_FCOE_IDX(i)) {
6052 #endif
6053                         if (CHIP_IS_E2(bp))
6054                                 BNX2X_PCI_ALLOC(sb->e2_sb,
6055                                     &bnx2x_fp(bp, i, status_blk_mapping),
6056                                     sizeof(struct host_hc_status_block_e2));
6057                         else
6058                                 BNX2X_PCI_ALLOC(sb->e1x_sb,
6059                                     &bnx2x_fp(bp, i, status_blk_mapping),
6060                                     sizeof(struct host_hc_status_block_e1x));
6061 #ifdef BCM_CNIC
6062                 }
6063 #endif
6064                 set_sb_shortcuts(bp, i);
6065         }
6066         /* Rx */
6067         for_each_queue(bp, i) {
6068
6069                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6070                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6071                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6072                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6073                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6074                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6075
6076                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6077                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6078                                 sizeof(struct eth_fast_path_rx_cqe) *
6079                                 NUM_RCQ_BD);
6080
6081                 /* SGE ring */
6082                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6083                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6084                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6085                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6086                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6087         }
6088         /* Tx */
6089         for_each_queue(bp, i) {
6090
6091                 /* fastpath tx rings: tx_buf tx_desc */
6092                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6093                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6094                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6095                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6096                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6097         }
6098         /* end of fastpath */
6099
6100 #ifdef BCM_CNIC
6101         if (CHIP_IS_E2(bp))
6102                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6103                                 sizeof(struct host_hc_status_block_e2));
6104         else
6105                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6106                                 sizeof(struct host_hc_status_block_e1x));
6107
6108         /* allocate searcher T2 table */
6109         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6110 #endif
6111
6112
6113         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6114                         sizeof(struct host_sp_status_block));
6115
6116         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6117                         sizeof(struct bnx2x_slowpath));
6118
6119         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6120
6121         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6122                         bp->context.size);
6123
6124         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6125
6126         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6127                 goto alloc_mem_err;
6128
6129         /* Slow path ring */
6130         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6131
6132         /* EQ */
6133         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6134                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
6135         return 0;
6136
6137 alloc_mem_err:
6138         bnx2x_free_mem(bp);
6139         return -ENOMEM;
6140
6141 #undef BNX2X_PCI_ALLOC
6142 #undef BNX2X_ALLOC
6143 }
6144
6145 /*
6146  * Init service functions
6147  */
6148 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6149                              int *state_p, int flags);
6150
6151 int bnx2x_func_start(struct bnx2x *bp)
6152 {
6153         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6154
6155         /* Wait for completion */
6156         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6157                                  WAIT_RAMROD_COMMON);
6158 }
6159
6160 static int bnx2x_func_stop(struct bnx2x *bp)
6161 {
6162         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6163
6164         /* Wait for completion */
6165         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6166                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6167 }
6168
6169 /**
6170  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6171  *
6172  * @param bp driver descriptor
6173  * @param set set or clear an entry (1 or 0)
6174  * @param mac pointer to a buffer containing a MAC
6175  * @param cl_bit_vec bit vector of clients to register a MAC for
6176  * @param cam_offset offset in a CAM to use
6177  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6178  */
6179 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6180                                    u32 cl_bit_vec, u8 cam_offset,
6181                                    u8 is_bcast)
6182 {
6183         struct mac_configuration_cmd *config =
6184                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6185         int ramrod_flags = WAIT_RAMROD_COMMON;
6186
6187         bp->set_mac_pending = 1;
6188         smp_wmb();
6189
6190         config->hdr.length = 1;
6191         config->hdr.offset = cam_offset;
6192         config->hdr.client_id = 0xff;
6193         config->hdr.reserved1 = 0;
6194
6195         /* primary MAC */
6196         config->config_table[0].msb_mac_addr =
6197                                         swab16(*(u16 *)&mac[0]);
6198         config->config_table[0].middle_mac_addr =
6199                                         swab16(*(u16 *)&mac[2]);
6200         config->config_table[0].lsb_mac_addr =
6201                                         swab16(*(u16 *)&mac[4]);
6202         config->config_table[0].clients_bit_vector =
6203                                         cpu_to_le32(cl_bit_vec);
6204         config->config_table[0].vlan_id = 0;
6205         config->config_table[0].pf_id = BP_FUNC(bp);
6206         if (set)
6207                 SET_FLAG(config->config_table[0].flags,
6208                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6209                         T_ETH_MAC_COMMAND_SET);
6210         else
6211                 SET_FLAG(config->config_table[0].flags,
6212                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6213                         T_ETH_MAC_COMMAND_INVALIDATE);
6214
6215         if (is_bcast)
6216                 SET_FLAG(config->config_table[0].flags,
6217                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6218
6219         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6220            (set ? "setting" : "clearing"),
6221            config->config_table[0].msb_mac_addr,
6222            config->config_table[0].middle_mac_addr,
6223            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6224
6225         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6226                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6227                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6228
6229         /* Wait for a completion */
6230         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6231 }
6232
6233 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6234                              int *state_p, int flags)
6235 {
6236         /* can take a while if any port is running */
6237         int cnt = 5000;
6238         u8 poll = flags & WAIT_RAMROD_POLL;
6239         u8 common = flags & WAIT_RAMROD_COMMON;
6240
6241         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6242            poll ? "polling" : "waiting", state, idx);
6243
6244         might_sleep();
6245         while (cnt--) {
6246                 if (poll) {
6247                         if (common)
6248                                 bnx2x_eq_int(bp);
6249                         else {
6250                                 bnx2x_rx_int(bp->fp, 10);
6251                                 /* if index is different from 0
6252                                  * the reply for some commands will
6253                                  * be on the non default queue
6254                                  */
6255                                 if (idx)
6256                                         bnx2x_rx_int(&bp->fp[idx], 10);
6257                         }
6258                 }
6259
6260                 mb(); /* state is changed by bnx2x_sp_event() */
6261                 if (*state_p == state) {
6262 #ifdef BNX2X_STOP_ON_ERROR
6263                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6264 #endif
6265                         return 0;
6266                 }
6267
6268                 msleep(1);
6269
6270                 if (bp->panic)
6271                         return -EIO;
6272         }
6273
6274         /* timeout! */
6275         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6276                   poll ? "polling" : "waiting", state, idx);
6277 #ifdef BNX2X_STOP_ON_ERROR
6278         bnx2x_panic();
6279 #endif
6280
6281         return -EBUSY;
6282 }
6283
6284 static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6285 {
6286         if (CHIP_IS_E1H(bp))
6287                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6288         else if (CHIP_MODE_IS_4_PORT(bp))
6289                 return BP_FUNC(bp) * 32  + rel_offset;
6290         else
6291                 return BP_VN(bp) * 32  + rel_offset;
6292 }
6293
6294 /**
6295  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
6296  *  relevant. In addition, current implementation is tuned for a
6297  *  single ETH MAC.
6298  *
6299  *  When multiple unicast ETH MACs PF configuration in switch
6300  *  independent mode is required (NetQ, multiple netdev MACs,
6301  *  etc.), consider better utilisation of 16 per function MAC
6302  *  entries in the LLH memory.
6303  */
6304 enum {
6305         LLH_CAM_ISCSI_ETH_LINE = 0,
6306         LLH_CAM_ETH_LINE,
6307         LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6308 };
6309
6310 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6311                           int set,
6312                           unsigned char *dev_addr,
6313                           int index)
6314 {
6315         u32 wb_data[2];
6316         u32 mem_offset, ena_offset, mem_index;
6317         /**
6318          * indexes mapping:
6319          * 0..7 - goes to MEM
6320          * 8..15 - goes to MEM2
6321          */
6322
6323         if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6324                 return;
6325
6326         /* calculate memory start offset according to the mapping
6327          * and index in the memory */
6328         if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6329                 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6330                                            NIG_REG_LLH0_FUNC_MEM;
6331                 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6332                                            NIG_REG_LLH0_FUNC_MEM_ENABLE;
6333                 mem_index = index;
6334         } else {
6335                 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6336                                            NIG_REG_P0_LLH_FUNC_MEM2;
6337                 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6338                                            NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6339                 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6340         }
6341
6342         if (set) {
6343                 /* LLH_FUNC_MEM is a u64 WB register */
6344                 mem_offset += 8*mem_index;
6345
6346                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6347                               (dev_addr[4] <<  8) |  dev_addr[5]);
6348                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
6349
6350                 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6351         }
6352
6353         /* enable/disable the entry */
6354         REG_WR(bp, ena_offset + 4*mem_index, set);
6355
6356 }
6357
6358 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6359 {
6360         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6361                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6362
6363         /* networking  MAC */
6364         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6365                                (1 << bp->fp->cl_id), cam_offset , 0);
6366
6367         bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6368
6369         if (CHIP_IS_E1(bp)) {
6370                 /* broadcast MAC */
6371                 static const u8 bcast[ETH_ALEN] = {
6372                         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6373                 };
6374                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6375         }
6376 }
6377 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6378 {
6379         int i = 0, old;
6380         struct net_device *dev = bp->dev;
6381         struct netdev_hw_addr *ha;
6382         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6383         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6384
6385         netdev_for_each_mc_addr(ha, dev) {
6386                 /* copy mac */
6387                 config_cmd->config_table[i].msb_mac_addr =
6388                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6389                 config_cmd->config_table[i].middle_mac_addr =
6390                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6391                 config_cmd->config_table[i].lsb_mac_addr =
6392                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6393
6394                 config_cmd->config_table[i].vlan_id = 0;
6395                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6396                 config_cmd->config_table[i].clients_bit_vector =
6397                         cpu_to_le32(1 << BP_L_ID(bp));
6398
6399                 SET_FLAG(config_cmd->config_table[i].flags,
6400                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6401                         T_ETH_MAC_COMMAND_SET);
6402
6403                 DP(NETIF_MSG_IFUP,
6404                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6405                    config_cmd->config_table[i].msb_mac_addr,
6406                    config_cmd->config_table[i].middle_mac_addr,
6407                    config_cmd->config_table[i].lsb_mac_addr);
6408                 i++;
6409         }
6410         old = config_cmd->hdr.length;
6411         if (old > i) {
6412                 for (; i < old; i++) {
6413                         if (CAM_IS_INVALID(config_cmd->
6414                                            config_table[i])) {
6415                                 /* already invalidated */
6416                                 break;
6417                         }
6418                         /* invalidate */
6419                         SET_FLAG(config_cmd->config_table[i].flags,
6420                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6421                                 T_ETH_MAC_COMMAND_INVALIDATE);
6422                 }
6423         }
6424
6425         config_cmd->hdr.length = i;
6426         config_cmd->hdr.offset = offset;
6427         config_cmd->hdr.client_id = 0xff;
6428         config_cmd->hdr.reserved1 = 0;
6429
6430         bp->set_mac_pending = 1;
6431         smp_wmb();
6432
6433         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6434                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435 }
6436 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6437 {
6438         int i;
6439         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6440         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6441         int ramrod_flags = WAIT_RAMROD_COMMON;
6442
6443         bp->set_mac_pending = 1;
6444         smp_wmb();
6445
6446         for (i = 0; i < config_cmd->hdr.length; i++)
6447                 SET_FLAG(config_cmd->config_table[i].flags,
6448                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6449                         T_ETH_MAC_COMMAND_INVALIDATE);
6450
6451         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6452                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6453
6454         /* Wait for a completion */
6455         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6456                                 ramrod_flags);
6457
6458 }
6459
6460 #ifdef BCM_CNIC
6461 /**
6462  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6463  * MAC(s). This function will wait until the ramdord completion
6464  * returns.
6465  *
6466  * @param bp driver handle
6467  * @param set set or clear the CAM entry
6468  *
6469  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6470  */
6471 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6472 {
6473         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6474                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6475         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6476                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6477         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6478
6479         /* Send a SET_MAC ramrod */
6480         bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6481                                cam_offset, 0);
6482
6483         bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6484
6485         return 0;
6486 }
6487
6488 /**
6489  * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6490  * ETH MAC(s). This function will wait until the ramdord
6491  * completion returns.
6492  *
6493  * @param bp driver handle
6494  * @param set set or clear the CAM entry
6495  *
6496  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6497  */
6498 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6499 {
6500         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6501         /**
6502          * CAM allocation for E1H
6503          * eth unicasts: by func number
6504          * iscsi: by func number
6505          * fip unicast: by func number
6506          * fip multicast: by func number
6507          */
6508         bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6509                 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6510
6511         return 0;
6512 }
6513
6514 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6515 {
6516         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6517
6518         /**
6519          * CAM allocation for E1H
6520          * eth unicasts: by func number
6521          * iscsi: by func number
6522          * fip unicast: by func number
6523          * fip multicast: by func number
6524          */
6525         bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6526                 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6527
6528         return 0;
6529 }
6530 #endif
6531
6532 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6533                                     struct bnx2x_client_init_params *params,
6534                                     u8 activate,
6535                                     struct client_init_ramrod_data *data)
6536 {
6537         /* Clear the buffer */
6538         memset(data, 0, sizeof(*data));
6539
6540         /* general */
6541         data->general.client_id = params->rxq_params.cl_id;
6542         data->general.statistics_counter_id = params->rxq_params.stat_id;
6543         data->general.statistics_en_flg =
6544                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6545         data->general.is_fcoe_flg =
6546                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6547         data->general.activate_flg = activate;
6548         data->general.sp_client_id = params->rxq_params.spcl_id;
6549
6550         /* Rx data */
6551         data->rx.tpa_en_flg =
6552                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6553         data->rx.vmqueue_mode_en_flg = 0;
6554         data->rx.cache_line_alignment_log_size =
6555                 params->rxq_params.cache_line_log;
6556         data->rx.enable_dynamic_hc =
6557                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6558         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6559         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6560         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6561
6562         /* We don't set drop flags */
6563         data->rx.drop_ip_cs_err_flg = 0;
6564         data->rx.drop_tcp_cs_err_flg = 0;
6565         data->rx.drop_ttl0_flg = 0;
6566         data->rx.drop_udp_cs_err_flg = 0;
6567
6568         data->rx.inner_vlan_removal_enable_flg =
6569                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6570         data->rx.outer_vlan_removal_enable_flg =
6571                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6572         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6573         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6574         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6575         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6576         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6577         data->rx.bd_page_base.lo =
6578                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6579         data->rx.bd_page_base.hi =
6580                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6581         data->rx.sge_page_base.lo =
6582                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6583         data->rx.sge_page_base.hi =
6584                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6585         data->rx.cqe_page_base.lo =
6586                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6587         data->rx.cqe_page_base.hi =
6588                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6589         data->rx.is_leading_rss =
6590                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6591         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6592
6593         /* Tx data */
6594         data->tx.enforce_security_flg = 0; /* VF specific */
6595         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6596         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6597         data->tx.mtu = 0; /* VF specific */
6598         data->tx.tx_bd_page_base.lo =
6599                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6600         data->tx.tx_bd_page_base.hi =
6601                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6602
6603         /* flow control data */
6604         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6605         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6606         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6607         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6608         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6609         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6610         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6611
6612         data->fc.safc_group_num = params->txq_params.cos;
6613         data->fc.safc_group_en_flg =
6614                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6615         data->fc.traffic_type =
6616                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6617                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6618 }
6619
6620 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6621 {
6622         /* ustorm cxt validation */
6623         cxt->ustorm_ag_context.cdu_usage =
6624                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6625                                        ETH_CONNECTION_TYPE);
6626         /* xcontext validation */
6627         cxt->xstorm_ag_context.cdu_reserved =
6628                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6629                                        ETH_CONNECTION_TYPE);
6630 }
6631
6632 static int bnx2x_setup_fw_client(struct bnx2x *bp,
6633                                  struct bnx2x_client_init_params *params,
6634                                  u8 activate,
6635                                  struct client_init_ramrod_data *data,
6636                                  dma_addr_t data_mapping)
6637 {
6638         u16 hc_usec;
6639         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6640         int ramrod_flags = 0, rc;
6641
6642         /* HC and context validation values */
6643         hc_usec = params->txq_params.hc_rate ?
6644                 1000000 / params->txq_params.hc_rate : 0;
6645         bnx2x_update_coalesce_sb_index(bp,
6646                         params->txq_params.fw_sb_id,
6647                         params->txq_params.sb_cq_index,
6648                         !(params->txq_params.flags & QUEUE_FLG_HC),
6649                         hc_usec);
6650
6651         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6652
6653         hc_usec = params->rxq_params.hc_rate ?
6654                 1000000 / params->rxq_params.hc_rate : 0;
6655         bnx2x_update_coalesce_sb_index(bp,
6656                         params->rxq_params.fw_sb_id,
6657                         params->rxq_params.sb_cq_index,
6658                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6659                         hc_usec);
6660
6661         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6662                                  params->rxq_params.cid);
6663
6664         /* zero stats */
6665         if (params->txq_params.flags & QUEUE_FLG_STATS)
6666                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6667                                          params->txq_params.stat_id);
6668
6669         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6670                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6671                                          params->rxq_params.stat_id);
6672                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6673                                          params->rxq_params.stat_id);
6674         }
6675
6676         /* Fill the ramrod data */
6677         bnx2x_fill_cl_init_data(bp, params, activate, data);
6678
6679         /* SETUP ramrod.
6680          *
6681          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6682          * barrier except from mmiowb() is needed to impose a
6683          * proper ordering of memory operations.
6684          */
6685         mmiowb();
6686
6687
6688         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6689                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6690
6691         /* Wait for completion */
6692         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6693                                  params->ramrod_params.index,
6694                                  params->ramrod_params.pstate,
6695                                  ramrod_flags);
6696         return rc;
6697 }
6698
6699 /**
6700  * Configure interrupt mode according to current configuration.
6701  * In case of MSI-X it will also try to enable MSI-X.
6702  *
6703  * @param bp
6704  *
6705  * @return int
6706  */
6707 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6708 {
6709         int rc = 0;
6710
6711         switch (bp->int_mode) {
6712         case INT_MODE_MSI:
6713                 bnx2x_enable_msi(bp);
6714                 /* falling through... */
6715         case INT_MODE_INTx:
6716                 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6717                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6718                 break;
6719         default:
6720                 /* Set number of queues according to bp->multi_mode value */
6721                 bnx2x_set_num_queues(bp);
6722
6723                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6724                    bp->num_queues);
6725
6726                 /* if we can't use MSI-X we only need one fp,
6727                  * so try to enable MSI-X with the requested number of fp's
6728                  * and fallback to MSI or legacy INTx with one fp
6729                  */
6730                 rc = bnx2x_enable_msix(bp);
6731                 if (rc) {
6732                         /* failed to enable MSI-X */
6733                         if (bp->multi_mode)
6734                                 DP(NETIF_MSG_IFUP,
6735                                           "Multi requested but failed to "
6736                                           "enable MSI-X (%d), "
6737                                           "set number of queues to %d\n",
6738                                    bp->num_queues,
6739                                    1 + NONE_ETH_CONTEXT_USE);
6740                         bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6741
6742                         if (!(bp->flags & DISABLE_MSI_FLAG))
6743                                 bnx2x_enable_msi(bp);
6744                 }
6745
6746                 break;
6747         }
6748
6749         return rc;
6750 }
6751
6752 /* must be called prioir to any HW initializations */
6753 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6754 {
6755         return L2_ILT_LINES(bp);
6756 }
6757
6758 void bnx2x_ilt_set_info(struct bnx2x *bp)
6759 {
6760         struct ilt_client_info *ilt_client;
6761         struct bnx2x_ilt *ilt = BP_ILT(bp);
6762         u16 line = 0;
6763
6764         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6765         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6766
6767         /* CDU */
6768         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6769         ilt_client->client_num = ILT_CLIENT_CDU;
6770         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6771         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6772         ilt_client->start = line;
6773         line += L2_ILT_LINES(bp);
6774 #ifdef BCM_CNIC
6775         line += CNIC_ILT_LINES;
6776 #endif
6777         ilt_client->end = line - 1;
6778
6779         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6780                                          "flags 0x%x, hw psz %d\n",
6781            ilt_client->start,
6782            ilt_client->end,
6783            ilt_client->page_size,
6784            ilt_client->flags,
6785            ilog2(ilt_client->page_size >> 12));
6786
6787         /* QM */
6788         if (QM_INIT(bp->qm_cid_count)) {
6789                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6790                 ilt_client->client_num = ILT_CLIENT_QM;
6791                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6792                 ilt_client->flags = 0;
6793                 ilt_client->start = line;
6794
6795                 /* 4 bytes for each cid */
6796                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6797                                                          QM_ILT_PAGE_SZ);
6798
6799                 ilt_client->end = line - 1;
6800
6801                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6802                                                  "flags 0x%x, hw psz %d\n",
6803                    ilt_client->start,
6804                    ilt_client->end,
6805                    ilt_client->page_size,
6806                    ilt_client->flags,
6807                    ilog2(ilt_client->page_size >> 12));
6808
6809         }
6810         /* SRC */
6811         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6812 #ifdef BCM_CNIC
6813         ilt_client->client_num = ILT_CLIENT_SRC;
6814         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6815         ilt_client->flags = 0;
6816         ilt_client->start = line;
6817         line += SRC_ILT_LINES;
6818         ilt_client->end = line - 1;
6819
6820         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6821                                          "flags 0x%x, hw psz %d\n",
6822            ilt_client->start,
6823            ilt_client->end,
6824            ilt_client->page_size,
6825            ilt_client->flags,
6826            ilog2(ilt_client->page_size >> 12));
6827
6828 #else
6829         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6830 #endif
6831
6832         /* TM */
6833         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6834 #ifdef BCM_CNIC
6835         ilt_client->client_num = ILT_CLIENT_TM;
6836         ilt_client->page_size = TM_ILT_PAGE_SZ;
6837         ilt_client->flags = 0;
6838         ilt_client->start = line;
6839         line += TM_ILT_LINES;
6840         ilt_client->end = line - 1;
6841
6842         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6843                                          "flags 0x%x, hw psz %d\n",
6844            ilt_client->start,
6845            ilt_client->end,
6846            ilt_client->page_size,
6847            ilt_client->flags,
6848            ilog2(ilt_client->page_size >> 12));
6849
6850 #else
6851         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6852 #endif
6853 }
6854
6855 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6856                        int is_leading)
6857 {
6858         struct bnx2x_client_init_params params = { {0} };
6859         int rc;
6860
6861         /* reset IGU state skip FCoE L2 queue */
6862         if (!IS_FCOE_FP(fp))
6863                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6864                              IGU_INT_ENABLE, 0);
6865
6866         params.ramrod_params.pstate = &fp->state;
6867         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6868         params.ramrod_params.index = fp->index;
6869         params.ramrod_params.cid = fp->cid;
6870
6871 #ifdef BCM_CNIC
6872         if (IS_FCOE_FP(fp))
6873                 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6874
6875 #endif
6876
6877         if (is_leading)
6878                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6879
6880         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6881
6882         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6883
6884         rc = bnx2x_setup_fw_client(bp, &params, 1,
6885                                      bnx2x_sp(bp, client_init_data),
6886                                      bnx2x_sp_mapping(bp, client_init_data));
6887         return rc;
6888 }
6889
6890 static int bnx2x_stop_fw_client(struct bnx2x *bp,
6891                                 struct bnx2x_client_ramrod_params *p)
6892 {
6893         int rc;
6894
6895         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6896
6897         /* halt the connection */
6898         *p->pstate = BNX2X_FP_STATE_HALTING;
6899         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6900                                                   p->cl_id, 0);
6901
6902         /* Wait for completion */
6903         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6904                                p->pstate, poll_flag);
6905         if (rc) /* timeout */
6906                 return rc;
6907
6908         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6909         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6910                                                        p->cl_id, 0);
6911         /* Wait for completion */
6912         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6913                                p->pstate, poll_flag);
6914         if (rc) /* timeout */
6915                 return rc;
6916
6917
6918         /* delete cfc entry */
6919         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6920
6921         /* Wait for completion */
6922         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6923                                p->pstate, WAIT_RAMROD_COMMON);
6924         return rc;
6925 }
6926
6927 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6928 {
6929         struct bnx2x_client_ramrod_params client_stop = {0};
6930         struct bnx2x_fastpath *fp = &bp->fp[index];
6931
6932         client_stop.index = index;
6933         client_stop.cid = fp->cid;
6934         client_stop.cl_id = fp->cl_id;
6935         client_stop.pstate = &(fp->state);
6936         client_stop.poll = 0;
6937
6938         return bnx2x_stop_fw_client(bp, &client_stop);
6939 }
6940
6941
6942 static void bnx2x_reset_func(struct bnx2x *bp)
6943 {
6944         int port = BP_PORT(bp);
6945         int func = BP_FUNC(bp);
6946         int i;
6947         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6948                         (CHIP_IS_E2(bp) ?
6949                          offsetof(struct hc_status_block_data_e2, common) :
6950                          offsetof(struct hc_status_block_data_e1x, common));
6951         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6952         int pfid_offset = offsetof(struct pci_entity, pf_id);
6953
6954         /* Disable the function in the FW */
6955         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6956         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6957         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6958         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6959
6960         /* FP SBs */
6961         for_each_eth_queue(bp, i) {
6962                 struct bnx2x_fastpath *fp = &bp->fp[i];
6963                 REG_WR8(bp,
6964                         BAR_CSTRORM_INTMEM +
6965                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6966                         + pfunc_offset_fp + pfid_offset,
6967                         HC_FUNCTION_DISABLED);
6968         }
6969
6970         /* SP SB */
6971         REG_WR8(bp,
6972                 BAR_CSTRORM_INTMEM +
6973                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6974                 pfunc_offset_sp + pfid_offset,
6975                 HC_FUNCTION_DISABLED);
6976
6977
6978         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6979                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6980                        0);
6981
6982         /* Configure IGU */
6983         if (bp->common.int_block == INT_BLOCK_HC) {
6984                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6985                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6986         } else {
6987                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6988                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6989         }
6990
6991 #ifdef BCM_CNIC
6992         /* Disable Timer scan */
6993         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6994         /*
6995          * Wait for at least 10ms and up to 2 second for the timers scan to
6996          * complete
6997          */
6998         for (i = 0; i < 200; i++) {
6999                 msleep(10);
7000                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7001                         break;
7002         }
7003 #endif
7004         /* Clear ILT */
7005         bnx2x_clear_func_ilt(bp, func);
7006
7007         /* Timers workaround bug for E2: if this is vnic-3,
7008          * we need to set the entire ilt range for this timers.
7009          */
7010         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7011                 struct ilt_client_info ilt_cli;
7012                 /* use dummy TM client */
7013                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7014                 ilt_cli.start = 0;
7015                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7016                 ilt_cli.client_num = ILT_CLIENT_TM;
7017
7018                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7019         }
7020
7021         /* this assumes that reset_port() called before reset_func()*/
7022         if (CHIP_IS_E2(bp))
7023                 bnx2x_pf_disable(bp);
7024
7025         bp->dmae_ready = 0;
7026 }
7027
7028 static void bnx2x_reset_port(struct bnx2x *bp)
7029 {
7030         int port = BP_PORT(bp);
7031         u32 val;
7032
7033         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7034
7035         /* Do not rcv packets to BRB */
7036         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7037         /* Do not direct rcv packets that are not for MCP to the BRB */
7038         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7039                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7040
7041         /* Configure AEU */
7042         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7043
7044         msleep(100);
7045         /* Check for BRB port occupancy */
7046         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7047         if (val)
7048                 DP(NETIF_MSG_IFDOWN,
7049                    "BRB1 is not empty  %d blocks are occupied\n", val);
7050
7051         /* TODO: Close Doorbell port? */
7052 }
7053
7054 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7055 {
7056         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7057            BP_ABS_FUNC(bp), reset_code);
7058
7059         switch (reset_code) {
7060         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7061                 bnx2x_reset_port(bp);
7062                 bnx2x_reset_func(bp);
7063                 bnx2x_reset_common(bp);
7064                 break;
7065
7066         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7067                 bnx2x_reset_port(bp);
7068                 bnx2x_reset_func(bp);
7069                 break;
7070
7071         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7072                 bnx2x_reset_func(bp);
7073                 break;
7074
7075         default:
7076                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7077                 break;
7078         }
7079 }
7080
7081 #ifdef BCM_CNIC
7082 static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7083 {
7084         if (bp->flags & FCOE_MACS_SET) {
7085                 if (!IS_MF_SD(bp))
7086                         bnx2x_set_fip_eth_mac_addr(bp, 0);
7087
7088                 bnx2x_set_all_enode_macs(bp, 0);
7089
7090                 bp->flags &= ~FCOE_MACS_SET;
7091         }
7092 }
7093 #endif
7094
7095 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7096 {
7097         int port = BP_PORT(bp);
7098         u32 reset_code = 0;
7099         int i, cnt, rc;
7100
7101         /* Wait until tx fastpath tasks complete */
7102         for_each_tx_queue(bp, i) {
7103                 struct bnx2x_fastpath *fp = &bp->fp[i];
7104
7105                 cnt = 1000;
7106                 while (bnx2x_has_tx_work_unload(fp)) {
7107
7108                         if (!cnt) {
7109                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7110                                           i);
7111 #ifdef BNX2X_STOP_ON_ERROR
7112                                 bnx2x_panic();
7113                                 return -EBUSY;
7114 #else
7115                                 break;
7116 #endif
7117                         }
7118                         cnt--;
7119                         msleep(1);
7120                 }
7121         }
7122         /* Give HW time to discard old tx messages */
7123         msleep(1);
7124
7125         if (CHIP_IS_E1(bp)) {
7126                 /* invalidate mc list,
7127                  * wait and poll (interrupts are off)
7128                  */
7129                 bnx2x_invlidate_e1_mc_list(bp);
7130                 bnx2x_set_eth_mac(bp, 0);
7131
7132         } else {
7133                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7134
7135                 bnx2x_set_eth_mac(bp, 0);
7136
7137                 for (i = 0; i < MC_HASH_SIZE; i++)
7138                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7139         }
7140
7141 #ifdef BCM_CNIC
7142         bnx2x_del_fcoe_eth_macs(bp);
7143 #endif
7144
7145         if (unload_mode == UNLOAD_NORMAL)
7146                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7147
7148         else if (bp->flags & NO_WOL_FLAG)
7149                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7150
7151         else if (bp->wol) {
7152                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7153                 u8 *mac_addr = bp->dev->dev_addr;
7154                 u32 val;
7155                 /* The mac address is written to entries 1-4 to
7156                    preserve entry 0 which is used by the PMF */
7157                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7158
7159                 val = (mac_addr[0] << 8) | mac_addr[1];
7160                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7161
7162                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7163                       (mac_addr[4] << 8) | mac_addr[5];
7164                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7165
7166                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7167
7168         } else
7169                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7170
7171         /* Close multi and leading connections
7172            Completions for ramrods are collected in a synchronous way */
7173         for_each_queue(bp, i)
7174
7175                 if (bnx2x_stop_client(bp, i))
7176 #ifdef BNX2X_STOP_ON_ERROR
7177                         return;
7178 #else
7179                         goto unload_error;
7180 #endif
7181
7182         rc = bnx2x_func_stop(bp);
7183         if (rc) {
7184                 BNX2X_ERR("Function stop failed!\n");
7185 #ifdef BNX2X_STOP_ON_ERROR
7186                 return;
7187 #else
7188                 goto unload_error;
7189 #endif
7190         }
7191 #ifndef BNX2X_STOP_ON_ERROR
7192 unload_error:
7193 #endif
7194         if (!BP_NOMCP(bp))
7195                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7196         else {
7197                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
7198                                      "%d, %d, %d\n", BP_PATH(bp),
7199                    load_count[BP_PATH(bp)][0],
7200                    load_count[BP_PATH(bp)][1],
7201                    load_count[BP_PATH(bp)][2]);
7202                 load_count[BP_PATH(bp)][0]--;
7203                 load_count[BP_PATH(bp)][1 + port]--;
7204                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
7205                                      "%d, %d, %d\n", BP_PATH(bp),
7206                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7207                    load_count[BP_PATH(bp)][2]);
7208                 if (load_count[BP_PATH(bp)][0] == 0)
7209                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7210                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7211                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7212                 else
7213                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7214         }
7215
7216         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7217             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7218                 bnx2x__link_reset(bp);
7219
7220         /* Disable HW interrupts, NAPI */
7221         bnx2x_netif_stop(bp, 1);
7222
7223         /* Release IRQs */
7224         bnx2x_free_irq(bp);
7225
7226         /* Reset the chip */
7227         bnx2x_reset_chip(bp, reset_code);
7228
7229         /* Report UNLOAD_DONE to MCP */
7230         if (!BP_NOMCP(bp))
7231                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7232
7233 }
7234
7235 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7236 {
7237         u32 val;
7238
7239         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7240
7241         if (CHIP_IS_E1(bp)) {
7242                 int port = BP_PORT(bp);
7243                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7244                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
7245
7246                 val = REG_RD(bp, addr);
7247                 val &= ~(0x300);
7248                 REG_WR(bp, addr, val);
7249         } else if (CHIP_IS_E1H(bp)) {
7250                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7251                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7252                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7253                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7254         }
7255 }
7256
7257 /* Close gates #2, #3 and #4: */
7258 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7259 {
7260         u32 val, addr;
7261
7262         /* Gates #2 and #4a are closed/opened for "not E1" only */
7263         if (!CHIP_IS_E1(bp)) {
7264                 /* #4 */
7265                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7266                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7267                        close ? (val | 0x1) : (val & (~(u32)1)));
7268                 /* #2 */
7269                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7270                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7271                        close ? (val | 0x1) : (val & (~(u32)1)));
7272         }
7273
7274         /* #3 */
7275         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7276         val = REG_RD(bp, addr);
7277         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7278
7279         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7280                 close ? "closing" : "opening");
7281         mmiowb();
7282 }
7283
7284 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
7285
7286 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7287 {
7288         /* Do some magic... */
7289         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7290         *magic_val = val & SHARED_MF_CLP_MAGIC;
7291         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7292 }
7293
7294 /* Restore the value of the `magic' bit.
7295  *
7296  * @param pdev Device handle.
7297  * @param magic_val Old value of the `magic' bit.
7298  */
7299 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7300 {
7301         /* Restore the `magic' bit value... */
7302         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7303         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7304                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7305 }
7306
7307 /**
7308  * Prepares for MCP reset: takes care of CLP configurations.
7309  *
7310  * @param bp
7311  * @param magic_val Old value of 'magic' bit.
7312  */
7313 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7314 {
7315         u32 shmem;
7316         u32 validity_offset;
7317
7318         DP(NETIF_MSG_HW, "Starting\n");
7319
7320         /* Set `magic' bit in order to save MF config */
7321         if (!CHIP_IS_E1(bp))
7322                 bnx2x_clp_reset_prep(bp, magic_val);
7323
7324         /* Get shmem offset */
7325         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7326         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7327
7328         /* Clear validity map flags */
7329         if (shmem > 0)
7330                 REG_WR(bp, shmem + validity_offset, 0);
7331 }
7332
7333 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7334 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7335
7336 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7337  * depending on the HW type.
7338  *
7339  * @param bp
7340  */
7341 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7342 {
7343         /* special handling for emulation and FPGA,
7344            wait 10 times longer */
7345         if (CHIP_REV_IS_SLOW(bp))
7346                 msleep(MCP_ONE_TIMEOUT*10);
7347         else
7348                 msleep(MCP_ONE_TIMEOUT);
7349 }
7350
7351 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7352 {
7353         u32 shmem, cnt, validity_offset, val;
7354         int rc = 0;
7355
7356         msleep(100);
7357
7358         /* Get shmem offset */
7359         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7360         if (shmem == 0) {
7361                 BNX2X_ERR("Shmem 0 return failure\n");
7362                 rc = -ENOTTY;
7363                 goto exit_lbl;
7364         }
7365
7366         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7367
7368         /* Wait for MCP to come up */
7369         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7370                 /* TBD: its best to check validity map of last port.
7371                  * currently checks on port 0.
7372                  */
7373                 val = REG_RD(bp, shmem + validity_offset);
7374                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7375                    shmem + validity_offset, val);
7376
7377                 /* check that shared memory is valid. */
7378                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7379                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7380                         break;
7381
7382                 bnx2x_mcp_wait_one(bp);
7383         }
7384
7385         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7386
7387         /* Check that shared memory is valid. This indicates that MCP is up. */
7388         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7389             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7390                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7391                 rc = -ENOTTY;
7392                 goto exit_lbl;
7393         }
7394
7395 exit_lbl:
7396         /* Restore the `magic' bit value */
7397         if (!CHIP_IS_E1(bp))
7398                 bnx2x_clp_reset_done(bp, magic_val);
7399
7400         return rc;
7401 }
7402
7403 static void bnx2x_pxp_prep(struct bnx2x *bp)
7404 {
7405         if (!CHIP_IS_E1(bp)) {
7406                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7407                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7408                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7409                 mmiowb();
7410         }
7411 }
7412
7413 /*
7414  * Reset the whole chip except for:
7415  *      - PCIE core
7416  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7417  *              one reset bit)
7418  *      - IGU
7419  *      - MISC (including AEU)
7420  *      - GRC
7421  *      - RBCN, RBCP
7422  */
7423 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7424 {
7425         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7426
7427         not_reset_mask1 =
7428                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7429                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7430                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7431
7432         not_reset_mask2 =
7433                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7434                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7435                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7436                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7437                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7438                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7439                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7440                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7441
7442         reset_mask1 = 0xffffffff;
7443
7444         if (CHIP_IS_E1(bp))
7445                 reset_mask2 = 0xffff;
7446         else
7447                 reset_mask2 = 0x1ffff;
7448
7449         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7450                reset_mask1 & (~not_reset_mask1));
7451         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7452                reset_mask2 & (~not_reset_mask2));
7453
7454         barrier();
7455         mmiowb();
7456
7457         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7458         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7459         mmiowb();
7460 }
7461
7462 static int bnx2x_process_kill(struct bnx2x *bp)
7463 {
7464         int cnt = 1000;
7465         u32 val = 0;
7466         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7467
7468
7469         /* Empty the Tetris buffer, wait for 1s */
7470         do {
7471                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7472                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7473                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7474                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7475                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7476                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7477                     ((port_is_idle_0 & 0x1) == 0x1) &&
7478                     ((port_is_idle_1 & 0x1) == 0x1) &&
7479                     (pgl_exp_rom2 == 0xffffffff))
7480                         break;
7481                 msleep(1);
7482         } while (cnt-- > 0);
7483
7484         if (cnt <= 0) {
7485                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7486                           " are still"
7487                           " outstanding read requests after 1s!\n");
7488                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7489                           " port_is_idle_0=0x%08x,"
7490                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7491                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7492                           pgl_exp_rom2);
7493                 return -EAGAIN;
7494         }
7495
7496         barrier();
7497
7498         /* Close gates #2, #3 and #4 */
7499         bnx2x_set_234_gates(bp, true);
7500
7501         /* TBD: Indicate that "process kill" is in progress to MCP */
7502
7503         /* Clear "unprepared" bit */
7504         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7505         barrier();
7506
7507         /* Make sure all is written to the chip before the reset */
7508         mmiowb();
7509
7510         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7511          * PSWHST, GRC and PSWRD Tetris buffer.
7512          */
7513         msleep(1);
7514
7515         /* Prepare to chip reset: */
7516         /* MCP */
7517         bnx2x_reset_mcp_prep(bp, &val);
7518
7519         /* PXP */
7520         bnx2x_pxp_prep(bp);
7521         barrier();
7522
7523         /* reset the chip */
7524         bnx2x_process_kill_chip_reset(bp);
7525         barrier();
7526
7527         /* Recover after reset: */
7528         /* MCP */
7529         if (bnx2x_reset_mcp_comp(bp, val))
7530                 return -EAGAIN;
7531
7532         /* PXP */
7533         bnx2x_pxp_prep(bp);
7534
7535         /* Open the gates #2, #3 and #4 */
7536         bnx2x_set_234_gates(bp, false);
7537
7538         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7539          * reset state, re-enable attentions. */
7540
7541         return 0;
7542 }
7543
7544 static int bnx2x_leader_reset(struct bnx2x *bp)
7545 {
7546         int rc = 0;
7547         /* Try to recover after the failure */
7548         if (bnx2x_process_kill(bp)) {
7549                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7550                        bp->dev->name);
7551                 rc = -EAGAIN;
7552                 goto exit_leader_reset;
7553         }
7554
7555         /* Clear "reset is in progress" bit and update the driver state */
7556         bnx2x_set_reset_done(bp);
7557         bp->recovery_state = BNX2X_RECOVERY_DONE;
7558
7559 exit_leader_reset:
7560         bp->is_leader = 0;
7561         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7562         smp_wmb();
7563         return rc;
7564 }
7565
7566 /* Assumption: runs under rtnl lock. This together with the fact
7567  * that it's called only from bnx2x_reset_task() ensure that it
7568  * will never be called when netif_running(bp->dev) is false.
7569  */
7570 static void bnx2x_parity_recover(struct bnx2x *bp)
7571 {
7572         DP(NETIF_MSG_HW, "Handling parity\n");
7573         while (1) {
7574                 switch (bp->recovery_state) {
7575                 case BNX2X_RECOVERY_INIT:
7576                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7577                         /* Try to get a LEADER_LOCK HW lock */
7578                         if (bnx2x_trylock_hw_lock(bp,
7579                                 HW_LOCK_RESOURCE_RESERVED_08))
7580                                 bp->is_leader = 1;
7581
7582                         /* Stop the driver */
7583                         /* If interface has been removed - break */
7584                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7585                                 return;
7586
7587                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7588                         /* Ensure "is_leader" and "recovery_state"
7589                          *  update values are seen on other CPUs
7590                          */
7591                         smp_wmb();
7592                         break;
7593
7594                 case BNX2X_RECOVERY_WAIT:
7595                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7596                         if (bp->is_leader) {
7597                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7598                                 if (load_counter) {
7599                                         /* Wait until all other functions get
7600                                          * down.
7601                                          */
7602                                         schedule_delayed_work(&bp->reset_task,
7603                                                                 HZ/10);
7604                                         return;
7605                                 } else {
7606                                         /* If all other functions got down -
7607                                          * try to bring the chip back to
7608                                          * normal. In any case it's an exit
7609                                          * point for a leader.
7610                                          */
7611                                         if (bnx2x_leader_reset(bp) ||
7612                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7613                                                 printk(KERN_ERR"%s: Recovery "
7614                                                 "has failed. Power cycle is "
7615                                                 "needed.\n", bp->dev->name);
7616                                                 /* Disconnect this device */
7617                                                 netif_device_detach(bp->dev);
7618                                                 /* Block ifup for all function
7619                                                  * of this ASIC until
7620                                                  * "process kill" or power
7621                                                  * cycle.
7622                                                  */
7623                                                 bnx2x_set_reset_in_progress(bp);
7624                                                 /* Shut down the power */
7625                                                 bnx2x_set_power_state(bp,
7626                                                                 PCI_D3hot);
7627                                                 return;
7628                                         }
7629
7630                                         return;
7631                                 }
7632                         } else { /* non-leader */
7633                                 if (!bnx2x_reset_is_done(bp)) {
7634                                         /* Try to get a LEADER_LOCK HW lock as
7635                                          * long as a former leader may have
7636                                          * been unloaded by the user or
7637                                          * released a leadership by another
7638                                          * reason.
7639                                          */
7640                                         if (bnx2x_trylock_hw_lock(bp,
7641                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7642                                                 /* I'm a leader now! Restart a
7643                                                  * switch case.
7644                                                  */
7645                                                 bp->is_leader = 1;
7646                                                 break;
7647                                         }
7648
7649                                         schedule_delayed_work(&bp->reset_task,
7650                                                                 HZ/10);
7651                                         return;
7652
7653                                 } else { /* A leader has completed
7654                                           * the "process kill". It's an exit
7655                                           * point for a non-leader.
7656                                           */
7657                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7658                                         bp->recovery_state =
7659                                                 BNX2X_RECOVERY_DONE;
7660                                         smp_wmb();
7661                                         return;
7662                                 }
7663                         }
7664                 default:
7665                         return;
7666                 }
7667         }
7668 }
7669
7670 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7671  * scheduled on a general queue in order to prevent a dead lock.
7672  */
7673 static void bnx2x_reset_task(struct work_struct *work)
7674 {
7675         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7676
7677 #ifdef BNX2X_STOP_ON_ERROR
7678         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7679                   " so reset not done to allow debug dump,\n"
7680          KERN_ERR " you will need to reboot when done\n");
7681         return;
7682 #endif
7683
7684         rtnl_lock();
7685
7686         if (!netif_running(bp->dev))
7687                 goto reset_task_exit;
7688
7689         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7690                 bnx2x_parity_recover(bp);
7691         else {
7692                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7693                 bnx2x_nic_load(bp, LOAD_NORMAL);
7694         }
7695
7696 reset_task_exit:
7697         rtnl_unlock();
7698 }
7699
7700 /* end of nic load/unload */
7701
7702 /*
7703  * Init service functions
7704  */
7705
7706 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7707 {
7708         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7709         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7710         return base + (BP_ABS_FUNC(bp)) * stride;
7711 }
7712
7713 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7714 {
7715         u32 reg = bnx2x_get_pretend_reg(bp);
7716
7717         /* Flush all outstanding writes */
7718         mmiowb();
7719
7720         /* Pretend to be function 0 */
7721         REG_WR(bp, reg, 0);
7722         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7723
7724         /* From now we are in the "like-E1" mode */
7725         bnx2x_int_disable(bp);
7726
7727         /* Flush all outstanding writes */
7728         mmiowb();
7729
7730         /* Restore the original function */
7731         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7732         REG_RD(bp, reg);
7733 }
7734
7735 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7736 {
7737         if (CHIP_IS_E1(bp))
7738                 bnx2x_int_disable(bp);
7739         else
7740                 bnx2x_undi_int_disable_e1h(bp);
7741 }
7742
7743 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7744 {
7745         u32 val;
7746
7747         /* Check if there is any driver already loaded */
7748         val = REG_RD(bp, MISC_REG_UNPREPARED);
7749         if (val == 0x1) {
7750                 /* Check if it is the UNDI driver
7751                  * UNDI driver initializes CID offset for normal bell to 0x7
7752                  */
7753                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7754                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7755                 if (val == 0x7) {
7756                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7757                         /* save our pf_num */
7758                         int orig_pf_num = bp->pf_num;
7759                         u32 swap_en;
7760                         u32 swap_val;
7761
7762                         /* clear the UNDI indication */
7763                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7764
7765                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7766
7767                         /* try unload UNDI on port 0 */
7768                         bp->pf_num = 0;
7769                         bp->fw_seq =
7770                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7771                                 DRV_MSG_SEQ_NUMBER_MASK);
7772                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7773
7774                         /* if UNDI is loaded on the other port */
7775                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7776
7777                                 /* send "DONE" for previous unload */
7778                                 bnx2x_fw_command(bp,
7779                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7780
7781                                 /* unload UNDI on port 1 */
7782                                 bp->pf_num = 1;
7783                                 bp->fw_seq =
7784                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7785                                         DRV_MSG_SEQ_NUMBER_MASK);
7786                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7787
7788                                 bnx2x_fw_command(bp, reset_code, 0);
7789                         }
7790
7791                         /* now it's safe to release the lock */
7792                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7793
7794                         bnx2x_undi_int_disable(bp);
7795
7796                         /* close input traffic and wait for it */
7797                         /* Do not rcv packets to BRB */
7798                         REG_WR(bp,
7799                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7800                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7801                         /* Do not direct rcv packets that are not for MCP to
7802                          * the BRB */
7803                         REG_WR(bp,
7804                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806                         /* clear AEU */
7807                         REG_WR(bp,
7808                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7809                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7810                         msleep(10);
7811
7812                         /* save NIG port swap info */
7813                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7814                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7815                         /* reset device */
7816                         REG_WR(bp,
7817                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7818                                0xd3ffffff);
7819                         REG_WR(bp,
7820                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7821                                0x1403);
7822                         /* take the NIG out of reset and restore swap values */
7823                         REG_WR(bp,
7824                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7825                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7826                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7827                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7828
7829                         /* send unload done to the MCP */
7830                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7831
7832                         /* restore our func and fw_seq */
7833                         bp->pf_num = orig_pf_num;
7834                         bp->fw_seq =
7835                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7836                                 DRV_MSG_SEQ_NUMBER_MASK);
7837                 } else
7838                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7839         }
7840 }
7841
7842 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7843 {
7844         u32 val, val2, val3, val4, id;
7845         u16 pmc;
7846
7847         /* Get the chip revision id and number. */
7848         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7849         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7850         id = ((val & 0xffff) << 16);
7851         val = REG_RD(bp, MISC_REG_CHIP_REV);
7852         id |= ((val & 0xf) << 12);
7853         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7854         id |= ((val & 0xff) << 4);
7855         val = REG_RD(bp, MISC_REG_BOND_ID);
7856         id |= (val & 0xf);
7857         bp->common.chip_id = id;
7858
7859         /* Set doorbell size */
7860         bp->db_size = (1 << BNX2X_DB_SHIFT);
7861
7862         if (CHIP_IS_E2(bp)) {
7863                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7864                 if ((val & 1) == 0)
7865                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7866                 else
7867                         val = (val >> 1) & 1;
7868                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7869                                                        "2_PORT_MODE");
7870                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7871                                                  CHIP_2_PORT_MODE;
7872
7873                 if (CHIP_MODE_IS_4_PORT(bp))
7874                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7875                 else
7876                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7877         } else {
7878                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7879                 bp->pfid = bp->pf_num;                  /* 0..7 */
7880         }
7881
7882         /*
7883          * set base FW non-default (fast path) status block id, this value is
7884          * used to initialize the fw_sb_id saved on the fp/queue structure to
7885          * determine the id used by the FW.
7886          */
7887         if (CHIP_IS_E1x(bp))
7888                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7889         else /* E2 */
7890                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7891
7892         bp->link_params.chip_id = bp->common.chip_id;
7893         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7894
7895         val = (REG_RD(bp, 0x2874) & 0x55);
7896         if ((bp->common.chip_id & 0x1) ||
7897             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7898                 bp->flags |= ONE_PORT_FLAG;
7899                 BNX2X_DEV_INFO("single port device\n");
7900         }
7901
7902         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7903         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7904                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7905         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7906                        bp->common.flash_size, bp->common.flash_size);
7907
7908         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7909         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7910                                         MISC_REG_GENERIC_CR_1 :
7911                                         MISC_REG_GENERIC_CR_0));
7912         bp->link_params.shmem_base = bp->common.shmem_base;
7913         bp->link_params.shmem2_base = bp->common.shmem2_base;
7914         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7915                        bp->common.shmem_base, bp->common.shmem2_base);
7916
7917         if (!bp->common.shmem_base) {
7918                 BNX2X_DEV_INFO("MCP not active\n");
7919                 bp->flags |= NO_MCP_FLAG;
7920                 return;
7921         }
7922
7923         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7924         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7925                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7926                 BNX2X_ERR("BAD MCP validity signature\n");
7927
7928         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7929         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7930
7931         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7932                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7933                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7934
7935         bp->link_params.feature_config_flags = 0;
7936         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7937         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7938                 bp->link_params.feature_config_flags |=
7939                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7940         else
7941                 bp->link_params.feature_config_flags &=
7942                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7943
7944         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7945         bp->common.bc_ver = val;
7946         BNX2X_DEV_INFO("bc_ver %X\n", val);
7947         if (val < BNX2X_BC_VER) {
7948                 /* for now only warn
7949                  * later we might need to enforce this */
7950                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7951                           "please upgrade BC\n", BNX2X_BC_VER, val);
7952         }
7953         bp->link_params.feature_config_flags |=
7954                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7955                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7956
7957         bp->link_params.feature_config_flags |=
7958                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7959                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7960
7961         if (BP_E1HVN(bp) == 0) {
7962                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7963                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7964         } else {
7965                 /* no WOL capability for E1HVN != 0 */
7966                 bp->flags |= NO_WOL_FLAG;
7967         }
7968         BNX2X_DEV_INFO("%sWoL capable\n",
7969                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7970
7971         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7972         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7973         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7974         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7975
7976         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7977                  val, val2, val3, val4);
7978 }
7979
7980 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7981 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7982
7983 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7984 {
7985         int pfid = BP_FUNC(bp);
7986         int vn = BP_E1HVN(bp);
7987         int igu_sb_id;
7988         u32 val;
7989         u8 fid;
7990
7991         bp->igu_base_sb = 0xff;
7992         bp->igu_sb_cnt = 0;
7993         if (CHIP_INT_MODE_IS_BC(bp)) {
7994                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7995                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7996
7997                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7998                         FP_SB_MAX_E1x;
7999
8000                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
8001                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8002
8003                 return;
8004         }
8005
8006         /* IGU in normal mode - read CAM */
8007         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8008              igu_sb_id++) {
8009                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8010                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8011                         continue;
8012                 fid = IGU_FID(val);
8013                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8014                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8015                                 continue;
8016                         if (IGU_VEC(val) == 0)
8017                                 /* default status block */
8018                                 bp->igu_dsb_id = igu_sb_id;
8019                         else {
8020                                 if (bp->igu_base_sb == 0xff)
8021                                         bp->igu_base_sb = igu_sb_id;
8022                                 bp->igu_sb_cnt++;
8023                         }
8024                 }
8025         }
8026         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8027                                    NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8028         if (bp->igu_sb_cnt == 0)
8029                 BNX2X_ERR("CAM configuration error\n");
8030 }
8031
8032 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8033                                                     u32 switch_cfg)
8034 {
8035         int cfg_size = 0, idx, port = BP_PORT(bp);
8036
8037         /* Aggregation of supported attributes of all external phys */
8038         bp->port.supported[0] = 0;
8039         bp->port.supported[1] = 0;
8040         switch (bp->link_params.num_phys) {
8041         case 1:
8042                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8043                 cfg_size = 1;
8044                 break;
8045         case 2:
8046                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8047                 cfg_size = 1;
8048                 break;
8049         case 3:
8050                 if (bp->link_params.multi_phy_config &
8051                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8052                         bp->port.supported[1] =
8053                                 bp->link_params.phy[EXT_PHY1].supported;
8054                         bp->port.supported[0] =
8055                                 bp->link_params.phy[EXT_PHY2].supported;
8056                 } else {
8057                         bp->port.supported[0] =
8058                                 bp->link_params.phy[EXT_PHY1].supported;
8059                         bp->port.supported[1] =
8060                                 bp->link_params.phy[EXT_PHY2].supported;
8061                 }
8062                 cfg_size = 2;
8063                 break;
8064         }
8065
8066         if (!(bp->port.supported[0] || bp->port.supported[1])) {
8067                 BNX2X_ERR("NVRAM config error. BAD phy config."
8068                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
8069                            SHMEM_RD(bp,
8070                            dev_info.port_hw_config[port].external_phy_config),
8071                            SHMEM_RD(bp,
8072                            dev_info.port_hw_config[port].external_phy_config2));
8073                         return;
8074         }
8075
8076         switch (switch_cfg) {
8077         case SWITCH_CFG_1G:
8078                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8079                                            port*0x10);
8080                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8081                 break;
8082
8083         case SWITCH_CFG_10G:
8084                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8085                                            port*0x18);
8086                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8087                 break;
8088
8089         default:
8090                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8091                           bp->port.link_config[0]);
8092                 return;
8093         }
8094         /* mask what we support according to speed_cap_mask per configuration */
8095         for (idx = 0; idx < cfg_size; idx++) {
8096                 if (!(bp->link_params.speed_cap_mask[idx] &
8097                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8098                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8099
8100                 if (!(bp->link_params.speed_cap_mask[idx] &
8101                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8102                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8103
8104                 if (!(bp->link_params.speed_cap_mask[idx] &
8105                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8106                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8107
8108                 if (!(bp->link_params.speed_cap_mask[idx] &
8109                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8110                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8111
8112                 if (!(bp->link_params.speed_cap_mask[idx] &
8113                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8114                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8115                                                      SUPPORTED_1000baseT_Full);
8116
8117                 if (!(bp->link_params.speed_cap_mask[idx] &
8118                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8119                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8120
8121                 if (!(bp->link_params.speed_cap_mask[idx] &
8122                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8123                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8124
8125         }
8126
8127         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8128                        bp->port.supported[1]);
8129 }
8130
8131 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8132 {
8133         u32 link_config, idx, cfg_size = 0;
8134         bp->port.advertising[0] = 0;
8135         bp->port.advertising[1] = 0;
8136         switch (bp->link_params.num_phys) {
8137         case 1:
8138         case 2:
8139                 cfg_size = 1;
8140                 break;
8141         case 3:
8142                 cfg_size = 2;
8143                 break;
8144         }
8145         for (idx = 0; idx < cfg_size; idx++) {
8146                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8147                 link_config = bp->port.link_config[idx];
8148                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8149                 case PORT_FEATURE_LINK_SPEED_AUTO:
8150                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8151                                 bp->link_params.req_line_speed[idx] =
8152                                         SPEED_AUTO_NEG;
8153                                 bp->port.advertising[idx] |=
8154                                         bp->port.supported[idx];
8155                         } else {
8156                                 /* force 10G, no AN */
8157                                 bp->link_params.req_line_speed[idx] =
8158                                         SPEED_10000;
8159                                 bp->port.advertising[idx] |=
8160                                         (ADVERTISED_10000baseT_Full |
8161                                          ADVERTISED_FIBRE);
8162                                 continue;
8163                         }
8164                         break;
8165
8166                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8167                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8168                                 bp->link_params.req_line_speed[idx] =
8169                                         SPEED_10;
8170                                 bp->port.advertising[idx] |=
8171                                         (ADVERTISED_10baseT_Full |
8172                                          ADVERTISED_TP);
8173                         } else {
8174                                 BNX2X_ERROR("NVRAM config error. "
8175                                             "Invalid link_config 0x%x"
8176                                             "  speed_cap_mask 0x%x\n",
8177                                             link_config,
8178                                     bp->link_params.speed_cap_mask[idx]);
8179                                 return;
8180                         }
8181                         break;
8182
8183                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8184                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8185                                 bp->link_params.req_line_speed[idx] =
8186                                         SPEED_10;
8187                                 bp->link_params.req_duplex[idx] =
8188                                         DUPLEX_HALF;
8189                                 bp->port.advertising[idx] |=
8190                                         (ADVERTISED_10baseT_Half |
8191                                          ADVERTISED_TP);
8192                         } else {
8193                                 BNX2X_ERROR("NVRAM config error. "
8194                                             "Invalid link_config 0x%x"
8195                                             "  speed_cap_mask 0x%x\n",
8196                                             link_config,
8197                                           bp->link_params.speed_cap_mask[idx]);
8198                                 return;
8199                         }
8200                         break;
8201
8202                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8203                         if (bp->port.supported[idx] &
8204                             SUPPORTED_100baseT_Full) {
8205                                 bp->link_params.req_line_speed[idx] =
8206                                         SPEED_100;
8207                                 bp->port.advertising[idx] |=
8208                                         (ADVERTISED_100baseT_Full |
8209                                          ADVERTISED_TP);
8210                         } else {
8211                                 BNX2X_ERROR("NVRAM config error. "
8212                                             "Invalid link_config 0x%x"
8213                                             "  speed_cap_mask 0x%x\n",
8214                                             link_config,
8215                                           bp->link_params.speed_cap_mask[idx]);
8216                                 return;
8217                         }
8218                         break;
8219
8220                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8221                         if (bp->port.supported[idx] &
8222                             SUPPORTED_100baseT_Half) {
8223                                 bp->link_params.req_line_speed[idx] =
8224                                                                 SPEED_100;
8225                                 bp->link_params.req_duplex[idx] =
8226                                                                 DUPLEX_HALF;
8227                                 bp->port.advertising[idx] |=
8228                                         (ADVERTISED_100baseT_Half |
8229                                          ADVERTISED_TP);
8230                         } else {
8231                                 BNX2X_ERROR("NVRAM config error. "
8232                                     "Invalid link_config 0x%x"
8233                                     "  speed_cap_mask 0x%x\n",
8234                                     link_config,
8235                                     bp->link_params.speed_cap_mask[idx]);
8236                                 return;
8237                         }
8238                         break;
8239
8240                 case PORT_FEATURE_LINK_SPEED_1G:
8241                         if (bp->port.supported[idx] &
8242                             SUPPORTED_1000baseT_Full) {
8243                                 bp->link_params.req_line_speed[idx] =
8244                                         SPEED_1000;
8245                                 bp->port.advertising[idx] |=
8246                                         (ADVERTISED_1000baseT_Full |
8247                                          ADVERTISED_TP);
8248                         } else {
8249                                 BNX2X_ERROR("NVRAM config error. "
8250                                     "Invalid link_config 0x%x"
8251                                     "  speed_cap_mask 0x%x\n",
8252                                     link_config,
8253                                     bp->link_params.speed_cap_mask[idx]);
8254                                 return;
8255                         }
8256                         break;
8257
8258                 case PORT_FEATURE_LINK_SPEED_2_5G:
8259                         if (bp->port.supported[idx] &
8260                             SUPPORTED_2500baseX_Full) {
8261                                 bp->link_params.req_line_speed[idx] =
8262                                         SPEED_2500;
8263                                 bp->port.advertising[idx] |=
8264                                         (ADVERTISED_2500baseX_Full |
8265                                                 ADVERTISED_TP);
8266                         } else {
8267                                 BNX2X_ERROR("NVRAM config error. "
8268                                     "Invalid link_config 0x%x"
8269                                     "  speed_cap_mask 0x%x\n",
8270                                     link_config,
8271                                     bp->link_params.speed_cap_mask[idx]);
8272                                 return;
8273                         }
8274                         break;
8275
8276                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8277                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8278                 case PORT_FEATURE_LINK_SPEED_10G_KR:
8279                         if (bp->port.supported[idx] &
8280                             SUPPORTED_10000baseT_Full) {
8281                                 bp->link_params.req_line_speed[idx] =
8282                                         SPEED_10000;
8283                                 bp->port.advertising[idx] |=
8284                                         (ADVERTISED_10000baseT_Full |
8285                                                 ADVERTISED_FIBRE);
8286                         } else {
8287                                 BNX2X_ERROR("NVRAM config error. "
8288                                     "Invalid link_config 0x%x"
8289                                     "  speed_cap_mask 0x%x\n",
8290                                     link_config,
8291                                     bp->link_params.speed_cap_mask[idx]);
8292                                 return;
8293                         }
8294                         break;
8295
8296                 default:
8297                         BNX2X_ERROR("NVRAM config error. "
8298                                     "BAD link speed link_config 0x%x\n",
8299                                           link_config);
8300                                 bp->link_params.req_line_speed[idx] =
8301                                                         SPEED_AUTO_NEG;
8302                                 bp->port.advertising[idx] =
8303                                                 bp->port.supported[idx];
8304                         break;
8305                 }
8306
8307                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8308                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8309                 if ((bp->link_params.req_flow_ctrl[idx] ==
8310                      BNX2X_FLOW_CTRL_AUTO) &&
8311                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8312                         bp->link_params.req_flow_ctrl[idx] =
8313                                 BNX2X_FLOW_CTRL_NONE;
8314                 }
8315
8316                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8317                                " 0x%x advertising 0x%x\n",
8318                                bp->link_params.req_line_speed[idx],
8319                                bp->link_params.req_duplex[idx],
8320                                bp->link_params.req_flow_ctrl[idx],
8321                                bp->port.advertising[idx]);
8322         }
8323 }
8324
8325 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8326 {
8327         mac_hi = cpu_to_be16(mac_hi);
8328         mac_lo = cpu_to_be32(mac_lo);
8329         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8330         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8331 }
8332
8333 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8334 {
8335         int port = BP_PORT(bp);
8336         u32 config;
8337         u32 ext_phy_type, ext_phy_config;
8338
8339         bp->link_params.bp = bp;
8340         bp->link_params.port = port;
8341
8342         bp->link_params.lane_config =
8343                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8344
8345         bp->link_params.speed_cap_mask[0] =
8346                 SHMEM_RD(bp,
8347                          dev_info.port_hw_config[port].speed_capability_mask);
8348         bp->link_params.speed_cap_mask[1] =
8349                 SHMEM_RD(bp,
8350                          dev_info.port_hw_config[port].speed_capability_mask2);
8351         bp->port.link_config[0] =
8352                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8353
8354         bp->port.link_config[1] =
8355                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8356
8357         bp->link_params.multi_phy_config =
8358                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8359         /* If the device is capable of WoL, set the default state according
8360          * to the HW
8361          */
8362         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8363         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8364                    (config & PORT_FEATURE_WOL_ENABLED));
8365
8366         BNX2X_DEV_INFO("lane_config 0x%08x  "
8367                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8368                        bp->link_params.lane_config,
8369                        bp->link_params.speed_cap_mask[0],
8370                        bp->port.link_config[0]);
8371
8372         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8373                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8374         bnx2x_phy_probe(&bp->link_params);
8375         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8376
8377         bnx2x_link_settings_requested(bp);
8378
8379         /*
8380          * If connected directly, work with the internal PHY, otherwise, work
8381          * with the external PHY
8382          */
8383         ext_phy_config =
8384                 SHMEM_RD(bp,
8385                          dev_info.port_hw_config[port].external_phy_config);
8386         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8387         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8388                 bp->mdio.prtad = bp->port.phy_addr;
8389
8390         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8391                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8392                 bp->mdio.prtad =
8393                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8394
8395         /*
8396          * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8397          * In MF mode, it is set to cover self test cases
8398          */
8399         if (IS_MF(bp))
8400                 bp->port.need_hw_lock = 1;
8401         else
8402                 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8403                                                         bp->common.shmem_base,
8404                                                         bp->common.shmem2_base);
8405 }
8406
8407 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8408 {
8409         u32 val, val2;
8410         int func = BP_ABS_FUNC(bp);
8411         int port = BP_PORT(bp);
8412
8413         if (BP_NOMCP(bp)) {
8414                 BNX2X_ERROR("warning: random MAC workaround active\n");
8415                 random_ether_addr(bp->dev->dev_addr);
8416         } else if (IS_MF(bp)) {
8417                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8418                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8419                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8420                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8421                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8422
8423 #ifdef BCM_CNIC
8424                 /* iSCSI NPAR MAC */
8425                 if (IS_MF_SI(bp)) {
8426                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8427                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8428                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8429                                                      iscsi_mac_addr_upper);
8430                                 val = MF_CFG_RD(bp, func_ext_config[func].
8431                                                     iscsi_mac_addr_lower);
8432                                 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8433                         }
8434                 }
8435 #endif
8436         } else {
8437                 /* in SF read MACs from port configuration */
8438                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8439                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8440                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8441
8442 #ifdef BCM_CNIC
8443                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8444                                     iscsi_mac_upper);
8445                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8446                                    iscsi_mac_lower);
8447                 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8448 #endif
8449         }
8450
8451         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8452         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8453
8454 #ifdef BCM_CNIC
8455         /* Inform the upper layers about FCoE MAC */
8456         if (!CHIP_IS_E1x(bp)) {
8457                 if (IS_MF_SD(bp))
8458                         memcpy(bp->fip_mac, bp->dev->dev_addr,
8459                                sizeof(bp->fip_mac));
8460                 else
8461                         memcpy(bp->fip_mac, bp->iscsi_mac,
8462                                sizeof(bp->fip_mac));
8463         }
8464 #endif
8465 }
8466
8467 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8468 {
8469         int /*abs*/func = BP_ABS_FUNC(bp);
8470         int vn, port;
8471         u32 val = 0;
8472         int rc = 0;
8473
8474         bnx2x_get_common_hwinfo(bp);
8475
8476         if (CHIP_IS_E1x(bp)) {
8477                 bp->common.int_block = INT_BLOCK_HC;
8478
8479                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8480                 bp->igu_base_sb = 0;
8481                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8482                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8483         } else {
8484                 bp->common.int_block = INT_BLOCK_IGU;
8485                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8486                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8487                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8488                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8489                 } else
8490                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8491
8492                 bnx2x_get_igu_cam_info(bp);
8493
8494         }
8495         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8496                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8497
8498         /*
8499          * Initialize MF configuration
8500          */
8501
8502         bp->mf_ov = 0;
8503         bp->mf_mode = 0;
8504         vn = BP_E1HVN(bp);
8505         port = BP_PORT(bp);
8506
8507         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8508                 DP(NETIF_MSG_PROBE,
8509                             "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8510                             bp->common.shmem2_base, SHMEM2_RD(bp, size),
8511                             (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8512                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8513                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8514                 else
8515                         bp->common.mf_cfg_base = bp->common.shmem_base +
8516                                 offsetof(struct shmem_region, func_mb) +
8517                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8518                 /*
8519                  * get mf configuration:
8520                  * 1. existance of MF configuration
8521                  * 2. MAC address must be legal (check only upper bytes)
8522                  *    for  Switch-Independent mode;
8523                  *    OVLAN must be legal for Switch-Dependent mode
8524                  * 3. SF_MODE configures specific MF mode
8525                  */
8526                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8527                         /* get mf configuration */
8528                         val = SHMEM_RD(bp,
8529                                        dev_info.shared_feature_config.config);
8530                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8531
8532                         switch (val) {
8533                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8534                                 val = MF_CFG_RD(bp, func_mf_config[func].
8535                                                 mac_upper);
8536                                 /* check for legal mac (upper bytes)*/
8537                                 if (val != 0xffff) {
8538                                         bp->mf_mode = MULTI_FUNCTION_SI;
8539                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8540                                                    func_mf_config[func].config);
8541                                 } else
8542                                         DP(NETIF_MSG_PROBE, "illegal MAC "
8543                                                             "address for SI\n");
8544                                 break;
8545                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8546                                 /* get OV configuration */
8547                                 val = MF_CFG_RD(bp,
8548                                         func_mf_config[FUNC_0].e1hov_tag);
8549                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8550
8551                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8552                                         bp->mf_mode = MULTI_FUNCTION_SD;
8553                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8554                                                 func_mf_config[func].config);
8555                                 } else
8556                                         DP(NETIF_MSG_PROBE, "illegal OV for "
8557                                                             "SD\n");
8558                                 break;
8559                         default:
8560                                 /* Unknown configuration: reset mf_config */
8561                                 bp->mf_config[vn] = 0;
8562                                 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8563                                    val);
8564                         }
8565                 }
8566
8567                 BNX2X_DEV_INFO("%s function mode\n",
8568                                IS_MF(bp) ? "multi" : "single");
8569
8570                 switch (bp->mf_mode) {
8571                 case MULTI_FUNCTION_SD:
8572                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8573                               FUNC_MF_CFG_E1HOV_TAG_MASK;
8574                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8575                                 bp->mf_ov = val;
8576                                 BNX2X_DEV_INFO("MF OV for func %d is %d"
8577                                                " (0x%04x)\n", func,
8578                                                bp->mf_ov, bp->mf_ov);
8579                         } else {
8580                                 BNX2X_ERR("No valid MF OV for func %d,"
8581                                           "  aborting\n", func);
8582                                 rc = -EPERM;
8583                         }
8584                         break;
8585                 case MULTI_FUNCTION_SI:
8586                         BNX2X_DEV_INFO("func %d is in MF "
8587                                        "switch-independent mode\n", func);
8588                         break;
8589                 default:
8590                         if (vn) {
8591                                 BNX2X_ERR("VN %d in single function mode,"
8592                                           "  aborting\n", vn);
8593                                 rc = -EPERM;
8594                         }
8595                         break;
8596                 }
8597
8598         }
8599
8600         /* adjust igu_sb_cnt to MF for E1x */
8601         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8602                 bp->igu_sb_cnt /= E1HVN_MAX;
8603
8604         /*
8605          * adjust E2 sb count: to be removed when FW will support
8606          * more then 16 L2 clients
8607          */
8608 #define MAX_L2_CLIENTS                          16
8609         if (CHIP_IS_E2(bp))
8610                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8611                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8612
8613         if (!BP_NOMCP(bp)) {
8614                 bnx2x_get_port_hwinfo(bp);
8615
8616                 bp->fw_seq =
8617                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8618                          DRV_MSG_SEQ_NUMBER_MASK);
8619                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8620         }
8621
8622         /* Get MAC addresses */
8623         bnx2x_get_mac_hwinfo(bp);
8624
8625         return rc;
8626 }
8627
8628 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8629 {
8630         int cnt, i, block_end, rodi;
8631         char vpd_data[BNX2X_VPD_LEN+1];
8632         char str_id_reg[VENDOR_ID_LEN+1];
8633         char str_id_cap[VENDOR_ID_LEN+1];
8634         u8 len;
8635
8636         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8637         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8638
8639         if (cnt < BNX2X_VPD_LEN)
8640                 goto out_not_found;
8641
8642         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8643                              PCI_VPD_LRDT_RO_DATA);
8644         if (i < 0)
8645                 goto out_not_found;
8646
8647
8648         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8649                     pci_vpd_lrdt_size(&vpd_data[i]);
8650
8651         i += PCI_VPD_LRDT_TAG_SIZE;
8652
8653         if (block_end > BNX2X_VPD_LEN)
8654                 goto out_not_found;
8655
8656         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8657                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8658         if (rodi < 0)
8659                 goto out_not_found;
8660
8661         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8662
8663         if (len != VENDOR_ID_LEN)
8664                 goto out_not_found;
8665
8666         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8667
8668         /* vendor specific info */
8669         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8670         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8671         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8672             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8673
8674                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8675                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8676                 if (rodi >= 0) {
8677                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8678
8679                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8680
8681                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8682                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8683                                 bp->fw_ver[len] = ' ';
8684                         }
8685                 }
8686                 return;
8687         }
8688 out_not_found:
8689         return;
8690 }
8691
8692 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8693 {
8694         int func;
8695         int timer_interval;
8696         int rc;
8697
8698         /* Disable interrupt handling until HW is initialized */
8699         atomic_set(&bp->intr_sem, 1);
8700         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8701
8702         mutex_init(&bp->port.phy_mutex);
8703         mutex_init(&bp->fw_mb_mutex);
8704         spin_lock_init(&bp->stats_lock);
8705 #ifdef BCM_CNIC
8706         mutex_init(&bp->cnic_mutex);
8707 #endif
8708
8709         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8710         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8711
8712         rc = bnx2x_get_hwinfo(bp);
8713
8714         if (!rc)
8715                 rc = bnx2x_alloc_mem_bp(bp);
8716
8717         bnx2x_read_fwinfo(bp);
8718
8719         func = BP_FUNC(bp);
8720
8721         /* need to reset chip if undi was active */
8722         if (!BP_NOMCP(bp))
8723                 bnx2x_undi_unload(bp);
8724
8725         if (CHIP_REV_IS_FPGA(bp))
8726                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8727
8728         if (BP_NOMCP(bp) && (func == 0))
8729                 dev_err(&bp->pdev->dev, "MCP disabled, "
8730                                         "must load devices in order!\n");
8731
8732         bp->multi_mode = multi_mode;
8733         bp->int_mode = int_mode;
8734
8735         bp->dev->features |= NETIF_F_GRO;
8736
8737         /* Set TPA flags */
8738         if (disable_tpa) {
8739                 bp->flags &= ~TPA_ENABLE_FLAG;
8740                 bp->dev->features &= ~NETIF_F_LRO;
8741         } else {
8742                 bp->flags |= TPA_ENABLE_FLAG;
8743                 bp->dev->features |= NETIF_F_LRO;
8744         }
8745         bp->disable_tpa = disable_tpa;
8746
8747         if (CHIP_IS_E1(bp))
8748                 bp->dropless_fc = 0;
8749         else
8750                 bp->dropless_fc = dropless_fc;
8751
8752         bp->mrrs = mrrs;
8753
8754         bp->tx_ring_size = MAX_TX_AVAIL;
8755
8756         bp->rx_csum = 1;
8757
8758         /* make sure that the numbers are in the right granularity */
8759         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8760         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8761
8762         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8763         bp->current_interval = (poll ? poll : timer_interval);
8764
8765         init_timer(&bp->timer);
8766         bp->timer.expires = jiffies + bp->current_interval;
8767         bp->timer.data = (unsigned long) bp;
8768         bp->timer.function = bnx2x_timer;
8769
8770         bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8771         bnx2x_dcbx_init_params(bp);
8772
8773         return rc;
8774 }
8775
8776
8777 /****************************************************************************
8778 * General service functions
8779 ****************************************************************************/
8780
8781 /* called with rtnl_lock */
8782 static int bnx2x_open(struct net_device *dev)
8783 {
8784         struct bnx2x *bp = netdev_priv(dev);
8785
8786         netif_carrier_off(dev);
8787
8788         bnx2x_set_power_state(bp, PCI_D0);
8789
8790         if (!bnx2x_reset_is_done(bp)) {
8791                 do {
8792                         /* Reset MCP mail box sequence if there is on going
8793                          * recovery
8794                          */
8795                         bp->fw_seq = 0;
8796
8797                         /* If it's the first function to load and reset done
8798                          * is still not cleared it may mean that. We don't
8799                          * check the attention state here because it may have
8800                          * already been cleared by a "common" reset but we
8801                          * shell proceed with "process kill" anyway.
8802                          */
8803                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8804                                 bnx2x_trylock_hw_lock(bp,
8805                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8806                                 (!bnx2x_leader_reset(bp))) {
8807                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8808                                 break;
8809                         }
8810
8811                         bnx2x_set_power_state(bp, PCI_D3hot);
8812
8813                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8814                         " completed yet. Try again later. If u still see this"
8815                         " message after a few retries then power cycle is"
8816                         " required.\n", bp->dev->name);
8817
8818                         return -EAGAIN;
8819                 } while (0);
8820         }
8821
8822         bp->recovery_state = BNX2X_RECOVERY_DONE;
8823
8824         return bnx2x_nic_load(bp, LOAD_OPEN);
8825 }
8826
8827 /* called with rtnl_lock */
8828 static int bnx2x_close(struct net_device *dev)
8829 {
8830         struct bnx2x *bp = netdev_priv(dev);
8831
8832         /* Unload the driver, release IRQs */
8833         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8834         bnx2x_set_power_state(bp, PCI_D3hot);
8835
8836         return 0;
8837 }
8838
8839 /* called with netif_tx_lock from dev_mcast.c */
8840 void bnx2x_set_rx_mode(struct net_device *dev)
8841 {
8842         struct bnx2x *bp = netdev_priv(dev);
8843         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8844         int port = BP_PORT(bp);
8845
8846         if (bp->state != BNX2X_STATE_OPEN) {
8847                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8848                 return;
8849         }
8850
8851         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8852
8853         if (dev->flags & IFF_PROMISC)
8854                 rx_mode = BNX2X_RX_MODE_PROMISC;
8855         else if ((dev->flags & IFF_ALLMULTI) ||
8856                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8857                   CHIP_IS_E1(bp)))
8858                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8859         else { /* some multicasts */
8860                 if (CHIP_IS_E1(bp)) {
8861                         /*
8862                          * set mc list, do not wait as wait implies sleep
8863                          * and set_rx_mode can be invoked from non-sleepable
8864                          * context
8865                          */
8866                         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8867                                      BNX2X_MAX_EMUL_MULTI*(1 + port) :
8868                                      BNX2X_MAX_MULTICAST*(1 + port));
8869
8870                         bnx2x_set_e1_mc_list(bp, offset);
8871                 } else { /* E1H */
8872                         /* Accept one or more multicasts */
8873                         struct netdev_hw_addr *ha;
8874                         u32 mc_filter[MC_HASH_SIZE];
8875                         u32 crc, bit, regidx;
8876                         int i;
8877
8878                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8879
8880                         netdev_for_each_mc_addr(ha, dev) {
8881                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8882                                    bnx2x_mc_addr(ha));
8883
8884                                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8885                                                 ETH_ALEN);
8886                                 bit = (crc >> 24) & 0xff;
8887                                 regidx = bit >> 5;
8888                                 bit &= 0x1f;
8889                                 mc_filter[regidx] |= (1 << bit);
8890                         }
8891
8892                         for (i = 0; i < MC_HASH_SIZE; i++)
8893                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8894                                        mc_filter[i]);
8895                 }
8896         }
8897
8898         bp->rx_mode = rx_mode;
8899         bnx2x_set_storm_rx_mode(bp);
8900 }
8901
8902 /* called with rtnl_lock */
8903 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8904                            int devad, u16 addr)
8905 {
8906         struct bnx2x *bp = netdev_priv(netdev);
8907         u16 value;
8908         int rc;
8909
8910         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8911            prtad, devad, addr);
8912
8913         /* The HW expects different devad if CL22 is used */
8914         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8915
8916         bnx2x_acquire_phy_lock(bp);
8917         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8918         bnx2x_release_phy_lock(bp);
8919         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8920
8921         if (!rc)
8922                 rc = value;
8923         return rc;
8924 }
8925
8926 /* called with rtnl_lock */
8927 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8928                             u16 addr, u16 value)
8929 {
8930         struct bnx2x *bp = netdev_priv(netdev);
8931         int rc;
8932
8933         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8934                            " value 0x%x\n", prtad, devad, addr, value);
8935
8936         /* The HW expects different devad if CL22 is used */
8937         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8938
8939         bnx2x_acquire_phy_lock(bp);
8940         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8941         bnx2x_release_phy_lock(bp);
8942         return rc;
8943 }
8944
8945 /* called with rtnl_lock */
8946 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8947 {
8948         struct bnx2x *bp = netdev_priv(dev);
8949         struct mii_ioctl_data *mdio = if_mii(ifr);
8950
8951         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8952            mdio->phy_id, mdio->reg_num, mdio->val_in);
8953
8954         if (!netif_running(dev))
8955                 return -EAGAIN;
8956
8957         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8958 }
8959
8960 #ifdef CONFIG_NET_POLL_CONTROLLER
8961 static void poll_bnx2x(struct net_device *dev)
8962 {
8963         struct bnx2x *bp = netdev_priv(dev);
8964
8965         disable_irq(bp->pdev->irq);
8966         bnx2x_interrupt(bp->pdev->irq, dev);
8967         enable_irq(bp->pdev->irq);
8968 }
8969 #endif
8970
8971 static const struct net_device_ops bnx2x_netdev_ops = {
8972         .ndo_open               = bnx2x_open,
8973         .ndo_stop               = bnx2x_close,
8974         .ndo_start_xmit         = bnx2x_start_xmit,
8975         .ndo_select_queue       = bnx2x_select_queue,
8976         .ndo_set_multicast_list = bnx2x_set_rx_mode,
8977         .ndo_set_mac_address    = bnx2x_change_mac_addr,
8978         .ndo_validate_addr      = eth_validate_addr,
8979         .ndo_do_ioctl           = bnx2x_ioctl,
8980         .ndo_change_mtu         = bnx2x_change_mtu,
8981         .ndo_tx_timeout         = bnx2x_tx_timeout,
8982 #ifdef CONFIG_NET_POLL_CONTROLLER
8983         .ndo_poll_controller    = poll_bnx2x,
8984 #endif
8985 };
8986
8987 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8988                                     struct net_device *dev)
8989 {
8990         struct bnx2x *bp;
8991         int rc;
8992
8993         SET_NETDEV_DEV(dev, &pdev->dev);
8994         bp = netdev_priv(dev);
8995
8996         bp->dev = dev;
8997         bp->pdev = pdev;
8998         bp->flags = 0;
8999         bp->pf_num = PCI_FUNC(pdev->devfn);
9000
9001         rc = pci_enable_device(pdev);
9002         if (rc) {
9003                 dev_err(&bp->pdev->dev,
9004                         "Cannot enable PCI device, aborting\n");
9005                 goto err_out;
9006         }
9007
9008         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9009                 dev_err(&bp->pdev->dev,
9010                         "Cannot find PCI device base address, aborting\n");
9011                 rc = -ENODEV;
9012                 goto err_out_disable;
9013         }
9014
9015         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9016                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9017                        " base address, aborting\n");
9018                 rc = -ENODEV;
9019                 goto err_out_disable;
9020         }
9021
9022         if (atomic_read(&pdev->enable_cnt) == 1) {
9023                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9024                 if (rc) {
9025                         dev_err(&bp->pdev->dev,
9026                                 "Cannot obtain PCI resources, aborting\n");
9027                         goto err_out_disable;
9028                 }
9029
9030                 pci_set_master(pdev);
9031                 pci_save_state(pdev);
9032         }
9033
9034         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9035         if (bp->pm_cap == 0) {
9036                 dev_err(&bp->pdev->dev,
9037                         "Cannot find power management capability, aborting\n");
9038                 rc = -EIO;
9039                 goto err_out_release;
9040         }
9041
9042         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9043         if (bp->pcie_cap == 0) {
9044                 dev_err(&bp->pdev->dev,
9045                         "Cannot find PCI Express capability, aborting\n");
9046                 rc = -EIO;
9047                 goto err_out_release;
9048         }
9049
9050         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9051                 bp->flags |= USING_DAC_FLAG;
9052                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9053                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9054                                " failed, aborting\n");
9055                         rc = -EIO;
9056                         goto err_out_release;
9057                 }
9058
9059         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9060                 dev_err(&bp->pdev->dev,
9061                         "System does not support DMA, aborting\n");
9062                 rc = -EIO;
9063                 goto err_out_release;
9064         }
9065
9066         dev->mem_start = pci_resource_start(pdev, 0);
9067         dev->base_addr = dev->mem_start;
9068         dev->mem_end = pci_resource_end(pdev, 0);
9069
9070         dev->irq = pdev->irq;
9071
9072         bp->regview = pci_ioremap_bar(pdev, 0);
9073         if (!bp->regview) {
9074                 dev_err(&bp->pdev->dev,
9075                         "Cannot map register space, aborting\n");
9076                 rc = -ENOMEM;
9077                 goto err_out_release;
9078         }
9079
9080         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9081                                         min_t(u64, BNX2X_DB_SIZE(bp),
9082                                               pci_resource_len(pdev, 2)));
9083         if (!bp->doorbells) {
9084                 dev_err(&bp->pdev->dev,
9085                         "Cannot map doorbell space, aborting\n");
9086                 rc = -ENOMEM;
9087                 goto err_out_unmap;
9088         }
9089
9090         bnx2x_set_power_state(bp, PCI_D0);
9091
9092         /* clean indirect addresses */
9093         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9094                                PCICFG_VENDOR_ID_OFFSET);
9095         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9096         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9097         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9098         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9099
9100         /* Reset the load counter */
9101         bnx2x_clear_load_cnt(bp);
9102
9103         dev->watchdog_timeo = TX_TIMEOUT;
9104
9105         dev->netdev_ops = &bnx2x_netdev_ops;
9106         bnx2x_set_ethtool_ops(dev);
9107         dev->features |= NETIF_F_SG;
9108         dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9109         if (bp->flags & USING_DAC_FLAG)
9110                 dev->features |= NETIF_F_HIGHDMA;
9111         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9112         dev->features |= NETIF_F_TSO6;
9113         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9114
9115         dev->vlan_features |= NETIF_F_SG;
9116         dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9117         if (bp->flags & USING_DAC_FLAG)
9118                 dev->vlan_features |= NETIF_F_HIGHDMA;
9119         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9120         dev->vlan_features |= NETIF_F_TSO6;
9121
9122 #ifdef BCM_DCB
9123         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9124 #endif
9125
9126         /* get_port_hwinfo() will set prtad and mmds properly */
9127         bp->mdio.prtad = MDIO_PRTAD_NONE;
9128         bp->mdio.mmds = 0;
9129         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9130         bp->mdio.dev = dev;
9131         bp->mdio.mdio_read = bnx2x_mdio_read;
9132         bp->mdio.mdio_write = bnx2x_mdio_write;
9133
9134         return 0;
9135
9136 err_out_unmap:
9137         if (bp->regview) {
9138                 iounmap(bp->regview);
9139                 bp->regview = NULL;
9140         }
9141         if (bp->doorbells) {
9142                 iounmap(bp->doorbells);
9143                 bp->doorbells = NULL;
9144         }
9145
9146 err_out_release:
9147         if (atomic_read(&pdev->enable_cnt) == 1)
9148                 pci_release_regions(pdev);
9149
9150 err_out_disable:
9151         pci_disable_device(pdev);
9152         pci_set_drvdata(pdev, NULL);
9153
9154 err_out:
9155         return rc;
9156 }
9157
9158 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9159                                                  int *width, int *speed)
9160 {
9161         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9162
9163         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9164
9165         /* return value of 1=2.5GHz 2=5GHz */
9166         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9167 }
9168
9169 static int bnx2x_check_firmware(struct bnx2x *bp)
9170 {
9171         const struct firmware *firmware = bp->firmware;
9172         struct bnx2x_fw_file_hdr *fw_hdr;
9173         struct bnx2x_fw_file_section *sections;
9174         u32 offset, len, num_ops;
9175         u16 *ops_offsets;
9176         int i;
9177         const u8 *fw_ver;
9178
9179         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9180                 return -EINVAL;
9181
9182         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9183         sections = (struct bnx2x_fw_file_section *)fw_hdr;
9184
9185         /* Make sure none of the offsets and sizes make us read beyond
9186          * the end of the firmware data */
9187         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9188                 offset = be32_to_cpu(sections[i].offset);
9189                 len = be32_to_cpu(sections[i].len);
9190                 if (offset + len > firmware->size) {
9191                         dev_err(&bp->pdev->dev,
9192                                 "Section %d length is out of bounds\n", i);
9193                         return -EINVAL;
9194                 }
9195         }
9196
9197         /* Likewise for the init_ops offsets */
9198         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9199         ops_offsets = (u16 *)(firmware->data + offset);
9200         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9201
9202         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9203                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9204                         dev_err(&bp->pdev->dev,
9205                                 "Section offset %d is out of bounds\n", i);
9206                         return -EINVAL;
9207                 }
9208         }
9209
9210         /* Check FW version */
9211         offset = be32_to_cpu(fw_hdr->fw_version.offset);
9212         fw_ver = firmware->data + offset;
9213         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9214             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9215             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9216             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9217                 dev_err(&bp->pdev->dev,
9218                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9219                        fw_ver[0], fw_ver[1], fw_ver[2],
9220                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9221                        BCM_5710_FW_MINOR_VERSION,
9222                        BCM_5710_FW_REVISION_VERSION,
9223                        BCM_5710_FW_ENGINEERING_VERSION);
9224                 return -EINVAL;
9225         }
9226
9227         return 0;
9228 }
9229
9230 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9231 {
9232         const __be32 *source = (const __be32 *)_source;
9233         u32 *target = (u32 *)_target;
9234         u32 i;
9235
9236         for (i = 0; i < n/4; i++)
9237                 target[i] = be32_to_cpu(source[i]);
9238 }
9239
9240 /*
9241    Ops array is stored in the following format:
9242    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9243  */
9244 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9245 {
9246         const __be32 *source = (const __be32 *)_source;
9247         struct raw_op *target = (struct raw_op *)_target;
9248         u32 i, j, tmp;
9249
9250         for (i = 0, j = 0; i < n/8; i++, j += 2) {
9251                 tmp = be32_to_cpu(source[j]);
9252                 target[i].op = (tmp >> 24) & 0xff;
9253                 target[i].offset = tmp & 0xffffff;
9254                 target[i].raw_data = be32_to_cpu(source[j + 1]);
9255         }
9256 }
9257
9258 /**
9259  * IRO array is stored in the following format:
9260  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9261  */
9262 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9263 {
9264         const __be32 *source = (const __be32 *)_source;
9265         struct iro *target = (struct iro *)_target;
9266         u32 i, j, tmp;
9267
9268         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9269                 target[i].base = be32_to_cpu(source[j]);
9270                 j++;
9271                 tmp = be32_to_cpu(source[j]);
9272                 target[i].m1 = (tmp >> 16) & 0xffff;
9273                 target[i].m2 = tmp & 0xffff;
9274                 j++;
9275                 tmp = be32_to_cpu(source[j]);
9276                 target[i].m3 = (tmp >> 16) & 0xffff;
9277                 target[i].size = tmp & 0xffff;
9278                 j++;
9279         }
9280 }
9281
9282 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9283 {
9284         const __be16 *source = (const __be16 *)_source;
9285         u16 *target = (u16 *)_target;
9286         u32 i;
9287
9288         for (i = 0; i < n/2; i++)
9289                 target[i] = be16_to_cpu(source[i]);
9290 }
9291
9292 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
9293 do {                                                                    \
9294         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
9295         bp->arr = kmalloc(len, GFP_KERNEL);                             \
9296         if (!bp->arr) {                                                 \
9297                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9298                 goto lbl;                                               \
9299         }                                                               \
9300         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
9301              (u8 *)bp->arr, len);                                       \
9302 } while (0)
9303
9304 int bnx2x_init_firmware(struct bnx2x *bp)
9305 {
9306         const char *fw_file_name;
9307         struct bnx2x_fw_file_hdr *fw_hdr;
9308         int rc;
9309
9310         if (CHIP_IS_E1(bp))
9311                 fw_file_name = FW_FILE_NAME_E1;
9312         else if (CHIP_IS_E1H(bp))
9313                 fw_file_name = FW_FILE_NAME_E1H;
9314         else if (CHIP_IS_E2(bp))
9315                 fw_file_name = FW_FILE_NAME_E2;
9316         else {
9317                 BNX2X_ERR("Unsupported chip revision\n");
9318                 return -EINVAL;
9319         }
9320
9321         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9322
9323         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9324         if (rc) {
9325                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9326                 goto request_firmware_exit;
9327         }
9328
9329         rc = bnx2x_check_firmware(bp);
9330         if (rc) {
9331                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9332                 goto request_firmware_exit;
9333         }
9334
9335         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9336
9337         /* Initialize the pointers to the init arrays */
9338         /* Blob */
9339         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9340
9341         /* Opcodes */
9342         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9343
9344         /* Offsets */
9345         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9346                             be16_to_cpu_n);
9347
9348         /* STORMs firmware */
9349         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9350                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9351         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
9352                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9353         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9354                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9355         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
9356                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
9357         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9358                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9359         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
9360                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9361         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9362                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9363         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
9364                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
9365         /* IRO */
9366         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9367
9368         return 0;
9369
9370 iro_alloc_err:
9371         kfree(bp->init_ops_offsets);
9372 init_offsets_alloc_err:
9373         kfree(bp->init_ops);
9374 init_ops_alloc_err:
9375         kfree(bp->init_data);
9376 request_firmware_exit:
9377         release_firmware(bp->firmware);
9378
9379         return rc;
9380 }
9381
9382 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9383 {
9384         int cid_count = L2_FP_COUNT(l2_cid_count);
9385
9386 #ifdef BCM_CNIC
9387         cid_count += CNIC_CID_MAX;
9388 #endif
9389         return roundup(cid_count, QM_CID_ROUND);
9390 }
9391
9392 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9393                                     const struct pci_device_id *ent)
9394 {
9395         struct net_device *dev = NULL;
9396         struct bnx2x *bp;
9397         int pcie_width, pcie_speed;
9398         int rc, cid_count;
9399
9400         switch (ent->driver_data) {
9401         case BCM57710:
9402         case BCM57711:
9403         case BCM57711E:
9404                 cid_count = FP_SB_MAX_E1x;
9405                 break;
9406
9407         case BCM57712:
9408         case BCM57712E:
9409                 cid_count = FP_SB_MAX_E2;
9410                 break;
9411
9412         default:
9413                 pr_err("Unknown board_type (%ld), aborting\n",
9414                            ent->driver_data);
9415                 return -ENODEV;
9416         }
9417
9418         cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9419
9420         /* dev zeroed in init_etherdev */
9421         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9422         if (!dev) {
9423                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9424                 return -ENOMEM;
9425         }
9426
9427         bp = netdev_priv(dev);
9428         bp->msg_enable = debug;
9429
9430         pci_set_drvdata(pdev, dev);
9431
9432         bp->l2_cid_count = cid_count;
9433
9434         rc = bnx2x_init_dev(pdev, dev);
9435         if (rc < 0) {
9436                 free_netdev(dev);
9437                 return rc;
9438         }
9439
9440         rc = bnx2x_init_bp(bp);
9441         if (rc)
9442                 goto init_one_exit;
9443
9444         /* calc qm_cid_count */
9445         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9446
9447 #ifdef BCM_CNIC
9448         /* disable FCOE L2 queue for E1x*/
9449         if (CHIP_IS_E1x(bp))
9450                 bp->flags |= NO_FCOE_FLAG;
9451
9452 #endif
9453
9454         /* Configure interupt mode: try to enable MSI-X/MSI if
9455          * needed, set bp->num_queues appropriately.
9456          */
9457         bnx2x_set_int_mode(bp);
9458
9459         /* Add all NAPI objects */
9460         bnx2x_add_all_napi(bp);
9461
9462         rc = register_netdev(dev);
9463         if (rc) {
9464                 dev_err(&pdev->dev, "Cannot register net device\n");
9465                 goto init_one_exit;
9466         }
9467
9468 #ifdef BCM_CNIC
9469         if (!NO_FCOE(bp)) {
9470                 /* Add storage MAC address */
9471                 rtnl_lock();
9472                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9473                 rtnl_unlock();
9474         }
9475 #endif
9476
9477         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9478
9479         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9480                " IRQ %d, ", board_info[ent->driver_data].name,
9481                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9482                pcie_width,
9483                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9484                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9485                                                 "5GHz (Gen2)" : "2.5GHz",
9486                dev->base_addr, bp->pdev->irq);
9487         pr_cont("node addr %pM\n", dev->dev_addr);
9488
9489         return 0;
9490
9491 init_one_exit:
9492         if (bp->regview)
9493                 iounmap(bp->regview);
9494
9495         if (bp->doorbells)
9496                 iounmap(bp->doorbells);
9497
9498         free_netdev(dev);
9499
9500         if (atomic_read(&pdev->enable_cnt) == 1)
9501                 pci_release_regions(pdev);
9502
9503         pci_disable_device(pdev);
9504         pci_set_drvdata(pdev, NULL);
9505
9506         return rc;
9507 }
9508
9509 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9510 {
9511         struct net_device *dev = pci_get_drvdata(pdev);
9512         struct bnx2x *bp;
9513
9514         if (!dev) {
9515                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9516                 return;
9517         }
9518         bp = netdev_priv(dev);
9519
9520 #ifdef BCM_CNIC
9521         /* Delete storage MAC address */
9522         if (!NO_FCOE(bp)) {
9523                 rtnl_lock();
9524                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9525                 rtnl_unlock();
9526         }
9527 #endif
9528
9529         unregister_netdev(dev);
9530
9531         /* Delete all NAPI objects */
9532         bnx2x_del_all_napi(bp);
9533
9534         /* Power on: we can't let PCI layer write to us while we are in D3 */
9535         bnx2x_set_power_state(bp, PCI_D0);
9536
9537         /* Disable MSI/MSI-X */
9538         bnx2x_disable_msi(bp);
9539
9540         /* Power off */
9541         bnx2x_set_power_state(bp, PCI_D3hot);
9542
9543         /* Make sure RESET task is not scheduled before continuing */
9544         cancel_delayed_work_sync(&bp->reset_task);
9545
9546         if (bp->regview)
9547                 iounmap(bp->regview);
9548
9549         if (bp->doorbells)
9550                 iounmap(bp->doorbells);
9551
9552         bnx2x_free_mem_bp(bp);
9553
9554         free_netdev(dev);
9555
9556         if (atomic_read(&pdev->enable_cnt) == 1)
9557                 pci_release_regions(pdev);
9558
9559         pci_disable_device(pdev);
9560         pci_set_drvdata(pdev, NULL);
9561 }
9562
9563 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9564 {
9565         int i;
9566
9567         bp->state = BNX2X_STATE_ERROR;
9568
9569         bp->rx_mode = BNX2X_RX_MODE_NONE;
9570
9571         bnx2x_netif_stop(bp, 0);
9572         netif_carrier_off(bp->dev);
9573
9574         del_timer_sync(&bp->timer);
9575         bp->stats_state = STATS_STATE_DISABLED;
9576         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9577
9578         /* Release IRQs */
9579         bnx2x_free_irq(bp);
9580
9581         /* Free SKBs, SGEs, TPA pool and driver internals */
9582         bnx2x_free_skbs(bp);
9583
9584         for_each_rx_queue(bp, i)
9585                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9586
9587         bnx2x_free_mem(bp);
9588
9589         bp->state = BNX2X_STATE_CLOSED;
9590
9591         return 0;
9592 }
9593
9594 static void bnx2x_eeh_recover(struct bnx2x *bp)
9595 {
9596         u32 val;
9597
9598         mutex_init(&bp->port.phy_mutex);
9599
9600         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9601         bp->link_params.shmem_base = bp->common.shmem_base;
9602         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9603
9604         if (!bp->common.shmem_base ||
9605             (bp->common.shmem_base < 0xA0000) ||
9606             (bp->common.shmem_base >= 0xC0000)) {
9607                 BNX2X_DEV_INFO("MCP not active\n");
9608                 bp->flags |= NO_MCP_FLAG;
9609                 return;
9610         }
9611
9612         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9613         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9614                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9615                 BNX2X_ERR("BAD MCP validity signature\n");
9616
9617         if (!BP_NOMCP(bp)) {
9618                 bp->fw_seq =
9619                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9620                     DRV_MSG_SEQ_NUMBER_MASK);
9621                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9622         }
9623 }
9624
9625 /**
9626  * bnx2x_io_error_detected - called when PCI error is detected
9627  * @pdev: Pointer to PCI device
9628  * @state: The current pci connection state
9629  *
9630  * This function is called after a PCI bus error affecting
9631  * this device has been detected.
9632  */
9633 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9634                                                 pci_channel_state_t state)
9635 {
9636         struct net_device *dev = pci_get_drvdata(pdev);
9637         struct bnx2x *bp = netdev_priv(dev);
9638
9639         rtnl_lock();
9640
9641         netif_device_detach(dev);
9642
9643         if (state == pci_channel_io_perm_failure) {
9644                 rtnl_unlock();
9645                 return PCI_ERS_RESULT_DISCONNECT;
9646         }
9647
9648         if (netif_running(dev))
9649                 bnx2x_eeh_nic_unload(bp);
9650
9651         pci_disable_device(pdev);
9652
9653         rtnl_unlock();
9654
9655         /* Request a slot reset */
9656         return PCI_ERS_RESULT_NEED_RESET;
9657 }
9658
9659 /**
9660  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9661  * @pdev: Pointer to PCI device
9662  *
9663  * Restart the card from scratch, as if from a cold-boot.
9664  */
9665 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9666 {
9667         struct net_device *dev = pci_get_drvdata(pdev);
9668         struct bnx2x *bp = netdev_priv(dev);
9669
9670         rtnl_lock();
9671
9672         if (pci_enable_device(pdev)) {
9673                 dev_err(&pdev->dev,
9674                         "Cannot re-enable PCI device after reset\n");
9675                 rtnl_unlock();
9676                 return PCI_ERS_RESULT_DISCONNECT;
9677         }
9678
9679         pci_set_master(pdev);
9680         pci_restore_state(pdev);
9681
9682         if (netif_running(dev))
9683                 bnx2x_set_power_state(bp, PCI_D0);
9684
9685         rtnl_unlock();
9686
9687         return PCI_ERS_RESULT_RECOVERED;
9688 }
9689
9690 /**
9691  * bnx2x_io_resume - called when traffic can start flowing again
9692  * @pdev: Pointer to PCI device
9693  *
9694  * This callback is called when the error recovery driver tells us that
9695  * its OK to resume normal operation.
9696  */
9697 static void bnx2x_io_resume(struct pci_dev *pdev)
9698 {
9699         struct net_device *dev = pci_get_drvdata(pdev);
9700         struct bnx2x *bp = netdev_priv(dev);
9701
9702         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9703                 printk(KERN_ERR "Handling parity error recovery. "
9704                                 "Try again later\n");
9705                 return;
9706         }
9707
9708         rtnl_lock();
9709
9710         bnx2x_eeh_recover(bp);
9711
9712         if (netif_running(dev))
9713                 bnx2x_nic_load(bp, LOAD_NORMAL);
9714
9715         netif_device_attach(dev);
9716
9717         rtnl_unlock();
9718 }
9719
9720 static struct pci_error_handlers bnx2x_err_handler = {
9721         .error_detected = bnx2x_io_error_detected,
9722         .slot_reset     = bnx2x_io_slot_reset,
9723         .resume         = bnx2x_io_resume,
9724 };
9725
9726 static struct pci_driver bnx2x_pci_driver = {
9727         .name        = DRV_MODULE_NAME,
9728         .id_table    = bnx2x_pci_tbl,
9729         .probe       = bnx2x_init_one,
9730         .remove      = __devexit_p(bnx2x_remove_one),
9731         .suspend     = bnx2x_suspend,
9732         .resume      = bnx2x_resume,
9733         .err_handler = &bnx2x_err_handler,
9734 };
9735
9736 static int __init bnx2x_init(void)
9737 {
9738         int ret;
9739
9740         pr_info("%s", version);
9741
9742         bnx2x_wq = create_singlethread_workqueue("bnx2x");
9743         if (bnx2x_wq == NULL) {
9744                 pr_err("Cannot create workqueue\n");
9745                 return -ENOMEM;
9746         }
9747
9748         ret = pci_register_driver(&bnx2x_pci_driver);
9749         if (ret) {
9750                 pr_err("Cannot register driver\n");
9751                 destroy_workqueue(bnx2x_wq);
9752         }
9753         return ret;
9754 }
9755
9756 static void __exit bnx2x_cleanup(void)
9757 {
9758         pci_unregister_driver(&bnx2x_pci_driver);
9759
9760         destroy_workqueue(bnx2x_wq);
9761 }
9762
9763 module_init(bnx2x_init);
9764 module_exit(bnx2x_cleanup);
9765
9766 #ifdef BCM_CNIC
9767
9768 /* count denotes the number of new completions we have seen */
9769 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9770 {
9771         struct eth_spe *spe;
9772
9773 #ifdef BNX2X_STOP_ON_ERROR
9774         if (unlikely(bp->panic))
9775                 return;
9776 #endif
9777
9778         spin_lock_bh(&bp->spq_lock);
9779         BUG_ON(bp->cnic_spq_pending < count);
9780         bp->cnic_spq_pending -= count;
9781
9782
9783         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9784                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9785                                 & SPE_HDR_CONN_TYPE) >>
9786                                 SPE_HDR_CONN_TYPE_SHIFT;
9787
9788                 /* Set validation for iSCSI L2 client before sending SETUP
9789                  *  ramrod
9790                  */
9791                 if (type == ETH_CONNECTION_TYPE) {
9792                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9793                                              hdr.conn_and_cmd_data) >>
9794                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9795
9796                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9797                                 bnx2x_set_ctx_validation(&bp->context.
9798                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9799                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9800                 }
9801
9802                 /* There may be not more than 8 L2 and COMMON SPEs and not more
9803                  * than 8 L5 SPEs in the air.
9804                  */
9805                 if ((type == NONE_CONNECTION_TYPE) ||
9806                     (type == ETH_CONNECTION_TYPE)) {
9807                         if (!atomic_read(&bp->spq_left))
9808                                 break;
9809                         else
9810                                 atomic_dec(&bp->spq_left);
9811                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9812                            (type == FCOE_CONNECTION_TYPE)) {
9813                         if (bp->cnic_spq_pending >=
9814                             bp->cnic_eth_dev.max_kwqe_pending)
9815                                 break;
9816                         else
9817                                 bp->cnic_spq_pending++;
9818                 } else {
9819                         BNX2X_ERR("Unknown SPE type: %d\n", type);
9820                         bnx2x_panic();
9821                         break;
9822                 }
9823
9824                 spe = bnx2x_sp_get_next(bp);
9825                 *spe = *bp->cnic_kwq_cons;
9826
9827                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9828                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9829
9830                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9831                         bp->cnic_kwq_cons = bp->cnic_kwq;
9832                 else
9833                         bp->cnic_kwq_cons++;
9834         }
9835         bnx2x_sp_prod_update(bp);
9836         spin_unlock_bh(&bp->spq_lock);
9837 }
9838
9839 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9840                                struct kwqe_16 *kwqes[], u32 count)
9841 {
9842         struct bnx2x *bp = netdev_priv(dev);
9843         int i;
9844
9845 #ifdef BNX2X_STOP_ON_ERROR
9846         if (unlikely(bp->panic))
9847                 return -EIO;
9848 #endif
9849
9850         spin_lock_bh(&bp->spq_lock);
9851
9852         for (i = 0; i < count; i++) {
9853                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9854
9855                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9856                         break;
9857
9858                 *bp->cnic_kwq_prod = *spe;
9859
9860                 bp->cnic_kwq_pending++;
9861
9862                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9863                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
9864                    spe->data.update_data_addr.hi,
9865                    spe->data.update_data_addr.lo,
9866                    bp->cnic_kwq_pending);
9867
9868                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9869                         bp->cnic_kwq_prod = bp->cnic_kwq;
9870                 else
9871                         bp->cnic_kwq_prod++;
9872         }
9873
9874         spin_unlock_bh(&bp->spq_lock);
9875
9876         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9877                 bnx2x_cnic_sp_post(bp, 0);
9878
9879         return i;
9880 }
9881
9882 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9883 {
9884         struct cnic_ops *c_ops;
9885         int rc = 0;
9886
9887         mutex_lock(&bp->cnic_mutex);
9888         c_ops = bp->cnic_ops;
9889         if (c_ops)
9890                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9891         mutex_unlock(&bp->cnic_mutex);
9892
9893         return rc;
9894 }
9895
9896 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9897 {
9898         struct cnic_ops *c_ops;
9899         int rc = 0;
9900
9901         rcu_read_lock();
9902         c_ops = rcu_dereference(bp->cnic_ops);
9903         if (c_ops)
9904                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9905         rcu_read_unlock();
9906
9907         return rc;
9908 }
9909
9910 /*
9911  * for commands that have no data
9912  */
9913 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9914 {
9915         struct cnic_ctl_info ctl = {0};
9916
9917         ctl.cmd = cmd;
9918
9919         return bnx2x_cnic_ctl_send(bp, &ctl);
9920 }
9921
9922 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9923 {
9924         struct cnic_ctl_info ctl;
9925
9926         /* first we tell CNIC and only then we count this as a completion */
9927         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9928         ctl.data.comp.cid = cid;
9929
9930         bnx2x_cnic_ctl_send_bh(bp, &ctl);
9931         bnx2x_cnic_sp_post(bp, 0);
9932 }
9933
9934 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9935 {
9936         struct bnx2x *bp = netdev_priv(dev);
9937         int rc = 0;
9938
9939         switch (ctl->cmd) {
9940         case DRV_CTL_CTXTBL_WR_CMD: {
9941                 u32 index = ctl->data.io.offset;
9942                 dma_addr_t addr = ctl->data.io.dma_addr;
9943
9944                 bnx2x_ilt_wr(bp, index, addr);
9945                 break;
9946         }
9947
9948         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9949                 int count = ctl->data.credit.credit_count;
9950
9951                 bnx2x_cnic_sp_post(bp, count);
9952                 break;
9953         }
9954
9955         /* rtnl_lock is held.  */
9956         case DRV_CTL_START_L2_CMD: {
9957                 u32 cli = ctl->data.ring.client_id;
9958
9959                 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9960                 bnx2x_del_fcoe_eth_macs(bp);
9961
9962                 /* Set iSCSI MAC address */
9963                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9964
9965                 mmiowb();
9966                 barrier();
9967
9968                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9969                  * because it's the only way for UIO Client to accept
9970                  * multicasts (in non-promiscuous mode only one Client per
9971                  * function will receive multicast packets (leading in our
9972                  * case).
9973                  */
9974                 bnx2x_rxq_set_mac_filters(bp, cli,
9975                         BNX2X_ACCEPT_UNICAST |
9976                         BNX2X_ACCEPT_BROADCAST |
9977                         BNX2X_ACCEPT_ALL_MULTICAST);
9978                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9979
9980                 break;
9981         }
9982
9983         /* rtnl_lock is held.  */
9984         case DRV_CTL_STOP_L2_CMD: {
9985                 u32 cli = ctl->data.ring.client_id;
9986
9987                 /* Stop accepting on iSCSI L2 ring */
9988                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9989                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9990
9991                 mmiowb();
9992                 barrier();
9993
9994                 /* Unset iSCSI L2 MAC */
9995                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9996                 break;
9997         }
9998         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9999                 int count = ctl->data.credit.credit_count;
10000
10001                 smp_mb__before_atomic_inc();
10002                 atomic_add(count, &bp->spq_left);
10003                 smp_mb__after_atomic_inc();
10004                 break;
10005         }
10006
10007         default:
10008                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10009                 rc = -EINVAL;
10010         }
10011
10012         return rc;
10013 }
10014
10015 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
10016 {
10017         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10018
10019         if (bp->flags & USING_MSIX_FLAG) {
10020                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10021                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10022                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10023         } else {
10024                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10025                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10026         }
10027         if (CHIP_IS_E2(bp))
10028                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10029         else
10030                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10031
10032         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10033         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10034         cp->irq_arr[1].status_blk = bp->def_status_blk;
10035         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10036         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10037
10038         cp->num_irq = 2;
10039 }
10040
10041 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10042                                void *data)
10043 {
10044         struct bnx2x *bp = netdev_priv(dev);
10045         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10046
10047         if (ops == NULL)
10048                 return -EINVAL;
10049
10050         if (atomic_read(&bp->intr_sem) != 0)
10051                 return -EBUSY;
10052
10053         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10054         if (!bp->cnic_kwq)
10055                 return -ENOMEM;
10056
10057         bp->cnic_kwq_cons = bp->cnic_kwq;
10058         bp->cnic_kwq_prod = bp->cnic_kwq;
10059         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10060
10061         bp->cnic_spq_pending = 0;
10062         bp->cnic_kwq_pending = 0;
10063
10064         bp->cnic_data = data;
10065
10066         cp->num_irq = 0;
10067         cp->drv_state = CNIC_DRV_STATE_REGD;
10068         cp->iro_arr = bp->iro_arr;
10069
10070         bnx2x_setup_cnic_irq_info(bp);
10071
10072         rcu_assign_pointer(bp->cnic_ops, ops);
10073
10074         return 0;
10075 }
10076
10077 static int bnx2x_unregister_cnic(struct net_device *dev)
10078 {
10079         struct bnx2x *bp = netdev_priv(dev);
10080         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10081
10082         mutex_lock(&bp->cnic_mutex);
10083         cp->drv_state = 0;
10084         rcu_assign_pointer(bp->cnic_ops, NULL);
10085         mutex_unlock(&bp->cnic_mutex);
10086         synchronize_rcu();
10087         kfree(bp->cnic_kwq);
10088         bp->cnic_kwq = NULL;
10089
10090         return 0;
10091 }
10092
10093 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10094 {
10095         struct bnx2x *bp = netdev_priv(dev);
10096         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10097
10098         cp->drv_owner = THIS_MODULE;
10099         cp->chip_id = CHIP_ID(bp);
10100         cp->pdev = bp->pdev;
10101         cp->io_base = bp->regview;
10102         cp->io_base2 = bp->doorbells;
10103         cp->max_kwqe_pending = 8;
10104         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10105         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10106                              bnx2x_cid_ilt_lines(bp);
10107         cp->ctx_tbl_len = CNIC_ILT_LINES;
10108         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10109         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10110         cp->drv_ctl = bnx2x_drv_ctl;
10111         cp->drv_register_cnic = bnx2x_register_cnic;
10112         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10113         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10114         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10115                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10116         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10117
10118         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10119                          "starting cid %d\n",
10120            cp->ctx_blk_size,
10121            cp->ctx_tbl_offset,
10122            cp->ctx_tbl_len,
10123            cp->starting_cid);
10124         return cp;
10125 }
10126 EXPORT_SYMBOL(bnx2x_cnic_probe);
10127
10128 #endif /* BCM_CNIC */
10129