]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/bnx2x/bnx2x_main.c
bnx2x: fix possible deadlock in HC hw block
[mv-sheeva.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
61 /* FW files */
62 #define FW_FILE_VERSION                                 \
63         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
64         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
66         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
70
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT              (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80                    "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2);
86
87 static int multi_mode = 1;
88 module_param(multi_mode, int, 0);
89 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90                              "(0 Disable; 1 Enable (default))");
91
92 int num_queues;
93 module_param(num_queues, int, 0);
94 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95                                 " (default is as a number of CPUs)");
96
97 static int disable_tpa;
98 module_param(disable_tpa, int, 0);
99 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
100
101 static int int_mode;
102 module_param(int_mode, int, 0);
103 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104                                 "(1 INT#x; 2 MSI)");
105
106 static int dropless_fc;
107 module_param(dropless_fc, int, 0);
108 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
110 static int poll;
111 module_param(poll, int, 0);
112 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
118 static int debug;
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128         BCM57712 = 3,
129         BCM57712E = 4
130 };
131
132 /* indexed by board_type, above */
133 static struct {
134         char *name;
135 } board_info[] __devinitdata = {
136         { "Broadcom NetXtreme II BCM57710 XGb" },
137         { "Broadcom NetXtreme II BCM57711 XGb" },
138         { "Broadcom NetXtreme II BCM57711E XGb" },
139         { "Broadcom NetXtreme II BCM57712 XGb" },
140         { "Broadcom NetXtreme II BCM57712E XGb" }
141 };
142
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712         0x1662
145 #endif
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E        0x1663
148 #endif
149
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
151         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
156         { 0 }
157 };
158
159 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
164
165 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166                                        u32 addr, dma_addr_t mapping)
167 {
168         REG_WR(bp,  addr, U64_LO(mapping));
169         REG_WR(bp,  addr + 4, U64_HI(mapping));
170 }
171
172 static inline void __storm_memset_fill(struct bnx2x *bp,
173                                        u32 addr, size_t size, u32 val)
174 {
175         int i;
176         for (i = 0; i < size/4; i++)
177                 REG_WR(bp,  addr + (i * 4), val);
178 }
179
180 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181                                             u8 port, u16 stat_id)
182 {
183         size_t size = sizeof(struct ustorm_per_client_stats);
184
185         u32 addr = BAR_USTRORM_INTMEM +
186                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188         __storm_memset_fill(bp, addr, size, 0);
189 }
190
191 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192                                             u8 port, u16 stat_id)
193 {
194         size_t size = sizeof(struct tstorm_per_client_stats);
195
196         u32 addr = BAR_TSTRORM_INTMEM +
197                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199         __storm_memset_fill(bp, addr, size, 0);
200 }
201
202 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203                                             u8 port, u16 stat_id)
204 {
205         size_t size = sizeof(struct xstorm_per_client_stats);
206
207         u32 addr = BAR_XSTRORM_INTMEM +
208                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210         __storm_memset_fill(bp, addr, size, 0);
211 }
212
213
214 static inline void storm_memset_spq_addr(struct bnx2x *bp,
215                                          dma_addr_t mapping, u16 abs_fid)
216 {
217         u32 addr = XSEM_REG_FAST_MEMORY +
218                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220         __storm_memset_dma_mapping(bp, addr, mapping);
221 }
222
223 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224 {
225         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226 }
227
228 static inline void storm_memset_func_cfg(struct bnx2x *bp,
229                                 struct tstorm_eth_function_common_config *tcfg,
230                                 u16 abs_fid)
231 {
232         size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234         u32 addr = BAR_TSTRORM_INTMEM +
235                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238 }
239
240 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241                                 struct stats_indication_flags *flags,
242                                 u16 abs_fid)
243 {
244         size_t size = sizeof(struct stats_indication_flags);
245
246         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248         __storm_memset_struct(bp, addr, size, (u32 *)flags);
249 }
250
251 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252                                 struct stats_indication_flags *flags,
253                                 u16 abs_fid)
254 {
255         size_t size = sizeof(struct stats_indication_flags);
256
257         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259         __storm_memset_struct(bp, addr, size, (u32 *)flags);
260 }
261
262 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263                                 struct stats_indication_flags *flags,
264                                 u16 abs_fid)
265 {
266         size_t size = sizeof(struct stats_indication_flags);
267
268         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270         __storm_memset_struct(bp, addr, size, (u32 *)flags);
271 }
272
273 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274                                 struct stats_indication_flags *flags,
275                                 u16 abs_fid)
276 {
277         size_t size = sizeof(struct stats_indication_flags);
278
279         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281         __storm_memset_struct(bp, addr, size, (u32 *)flags);
282 }
283
284 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285                                            dma_addr_t mapping, u16 abs_fid)
286 {
287         u32 addr = BAR_XSTRORM_INTMEM +
288                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290         __storm_memset_dma_mapping(bp, addr, mapping);
291 }
292
293 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294                                            dma_addr_t mapping, u16 abs_fid)
295 {
296         u32 addr = BAR_TSTRORM_INTMEM +
297                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299         __storm_memset_dma_mapping(bp, addr, mapping);
300 }
301
302 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303                                            dma_addr_t mapping, u16 abs_fid)
304 {
305         u32 addr = BAR_USTRORM_INTMEM +
306                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308         __storm_memset_dma_mapping(bp, addr, mapping);
309 }
310
311 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312                                            dma_addr_t mapping, u16 abs_fid)
313 {
314         u32 addr = BAR_CSTRORM_INTMEM +
315                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317         __storm_memset_dma_mapping(bp, addr, mapping);
318 }
319
320 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321                                          u16 pf_id)
322 {
323         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324                 pf_id);
325         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326                 pf_id);
327         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328                 pf_id);
329         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330                 pf_id);
331 }
332
333 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334                                         u8 enable)
335 {
336         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337                 enable);
338         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339                 enable);
340         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341                 enable);
342         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343                 enable);
344 }
345
346 static inline void storm_memset_eq_data(struct bnx2x *bp,
347                                 struct event_ring_data *eq_data,
348                                 u16 pfid)
349 {
350         size_t size = sizeof(struct event_ring_data);
351
352         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355 }
356
357 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358                                         u16 pfid)
359 {
360         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361         REG_WR16(bp, addr, eq_prod);
362 }
363
364 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365                                              u16 fw_sb_id, u8 sb_index,
366                                              u8 ticks)
367 {
368
369         int index_offset = CHIP_IS_E2(bp) ?
370                 offsetof(struct hc_status_block_data_e2, index_data) :
371                 offsetof(struct hc_status_block_data_e1x, index_data);
372         u32 addr = BAR_CSTRORM_INTMEM +
373                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374                         index_offset +
375                         sizeof(struct hc_index_data)*sb_index +
376                         offsetof(struct hc_index_data, timeout);
377         REG_WR8(bp, addr, ticks);
378         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379                           port, fw_sb_id, sb_index, ticks);
380 }
381 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382                                              u16 fw_sb_id, u8 sb_index,
383                                              u8 disable)
384 {
385         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386         int index_offset = CHIP_IS_E2(bp) ?
387                 offsetof(struct hc_status_block_data_e2, index_data) :
388                 offsetof(struct hc_status_block_data_e1x, index_data);
389         u32 addr = BAR_CSTRORM_INTMEM +
390                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391                         index_offset +
392                         sizeof(struct hc_index_data)*sb_index +
393                         offsetof(struct hc_index_data, flags);
394         u16 flags = REG_RD16(bp, addr);
395         /* clear and set */
396         flags &= ~HC_INDEX_DATA_HC_ENABLED;
397         flags |= enable_flag;
398         REG_WR16(bp, addr, flags);
399         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400                           port, fw_sb_id, sb_index, disable);
401 }
402
403 /* used only at init
404  * locking is done by mcp
405  */
406 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
407 {
408         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411                                PCICFG_VENDOR_ID_OFFSET);
412 }
413
414 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415 {
416         u32 val;
417
418         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421                                PCICFG_VENDOR_ID_OFFSET);
422
423         return val;
424 }
425
426 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE        "dst_addr [none]"
431
432 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433 {
434         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436         switch (dmae->opcode & DMAE_COMMAND_DST) {
437         case DMAE_CMD_DST_PCI:
438                 if (src_type == DMAE_CMD_SRC_PCI)
439                         DP(msglvl, "DMAE: opcode 0x%08x\n"
440                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
442                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444                            dmae->comp_addr_hi, dmae->comp_addr_lo,
445                            dmae->comp_val);
446                 else
447                         DP(msglvl, "DMAE: opcode 0x%08x\n"
448                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
449                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
450                            dmae->opcode, dmae->src_addr_lo >> 2,
451                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452                            dmae->comp_addr_hi, dmae->comp_addr_lo,
453                            dmae->comp_val);
454                 break;
455         case DMAE_CMD_DST_GRC:
456                 if (src_type == DMAE_CMD_SRC_PCI)
457                         DP(msglvl, "DMAE: opcode 0x%08x\n"
458                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
460                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461                            dmae->len, dmae->dst_addr_lo >> 2,
462                            dmae->comp_addr_hi, dmae->comp_addr_lo,
463                            dmae->comp_val);
464                 else
465                         DP(msglvl, "DMAE: opcode 0x%08x\n"
466                            "src [%08x], len [%d*4], dst [%08x]\n"
467                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
468                            dmae->opcode, dmae->src_addr_lo >> 2,
469                            dmae->len, dmae->dst_addr_lo >> 2,
470                            dmae->comp_addr_hi, dmae->comp_addr_lo,
471                            dmae->comp_val);
472                 break;
473         default:
474                 if (src_type == DMAE_CMD_SRC_PCI)
475                         DP(msglvl, "DMAE: opcode 0x%08x\n"
476                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
477                                     "dst_addr [none]\n"
478                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
479                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481                            dmae->comp_val);
482                 else
483                         DP(msglvl, "DMAE: opcode 0x%08x\n"
484                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
485                                     "dst_addr [none]\n"
486                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
487                            dmae->opcode, dmae->src_addr_lo >> 2,
488                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489                            dmae->comp_val);
490                 break;
491         }
492
493 }
494
495 const u32 dmae_reg_go_c[] = {
496         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500 };
501
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
504 {
505         u32 cmd_offset;
506         int i;
507
508         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
512                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
514         }
515         REG_WR(bp, dmae_reg_go_c[idx], 1);
516 }
517
518 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519 {
520         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521                            DMAE_CMD_C_ENABLE);
522 }
523
524 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525 {
526         return opcode & ~DMAE_CMD_SRC_RESET;
527 }
528
529 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530                              bool with_comp, u8 comp_type)
531 {
532         u32 opcode = 0;
533
534         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535                    (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544 #ifdef __BIG_ENDIAN
545         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546 #else
547         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548 #endif
549         if (with_comp)
550                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551         return opcode;
552 }
553
554 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555                                u8 src_type, u8 dst_type)
556 {
557         memset(dmae, 0, sizeof(struct dmae_command));
558
559         /* set the opcode */
560         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561                                          true, DMAE_COMP_PCI);
562
563         /* fill in the completion parameters */
564         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566         dmae->comp_val = DMAE_COMP_VAL;
567 }
568
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571 {
572         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574         int rc = 0;
575
576         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580         /* lock the dmae channel */
581         mutex_lock(&bp->dmae_mutex);
582
583         /* reset completion */
584         *wb_comp = 0;
585
586         /* post the command on the channel used for initializations */
587         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589         /* wait for completion */
590         udelay(5);
591         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594                 if (!cnt) {
595                         BNX2X_ERR("DMAE timeout!\n");
596                         rc = DMAE_TIMEOUT;
597                         goto unlock;
598                 }
599                 cnt--;
600                 udelay(50);
601         }
602         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603                 BNX2X_ERR("DMAE PCI error!\n");
604                 rc = DMAE_PCI_ERROR;
605         }
606
607         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611 unlock:
612         mutex_unlock(&bp->dmae_mutex);
613         return rc;
614 }
615
616 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617                       u32 len32)
618 {
619         struct dmae_command dmae;
620
621         if (!bp->dmae_ready) {
622                 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
625                    "  using indirect\n", dst_addr, len32);
626                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627                 return;
628         }
629
630         /* set opcode and fixed command fields */
631         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633         /* fill in addresses and len */
634         dmae.src_addr_lo = U64_LO(dma_addr);
635         dmae.src_addr_hi = U64_HI(dma_addr);
636         dmae.dst_addr_lo = dst_addr >> 2;
637         dmae.dst_addr_hi = 0;
638         dmae.len = len32;
639
640         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642         /* issue the command and wait for completion */
643         bnx2x_issue_dmae_with_comp(bp, &dmae);
644 }
645
646 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
647 {
648         struct dmae_command dmae;
649
650         if (!bp->dmae_ready) {
651                 u32 *data = bnx2x_sp(bp, wb_data[0]);
652                 int i;
653
654                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
655                    "  using indirect\n", src_addr, len32);
656                 for (i = 0; i < len32; i++)
657                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658                 return;
659         }
660
661         /* set opcode and fixed command fields */
662         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
663
664         /* fill in addresses and len */
665         dmae.src_addr_lo = src_addr >> 2;
666         dmae.src_addr_hi = 0;
667         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669         dmae.len = len32;
670
671         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
672
673         /* issue the command and wait for completion */
674         bnx2x_issue_dmae_with_comp(bp, &dmae);
675 }
676
677 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678                                u32 addr, u32 len)
679 {
680         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
681         int offset = 0;
682
683         while (len > dmae_wr_max) {
684                 bnx2x_write_dmae(bp, phys_addr + offset,
685                                  addr + offset, dmae_wr_max);
686                 offset += dmae_wr_max * 4;
687                 len -= dmae_wr_max;
688         }
689
690         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691 }
692
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695 {
696         u32 wb_write[2];
697
698         wb_write[0] = val_hi;
699         wb_write[1] = val_lo;
700         REG_WR_DMAE(bp, reg, wb_write, 2);
701 }
702
703 #ifdef USE_WB_RD
704 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705 {
706         u32 wb_data[2];
707
708         REG_RD_DMAE(bp, reg, wb_data, 2);
709
710         return HILO_U64(wb_data[0], wb_data[1]);
711 }
712 #endif
713
714 static int bnx2x_mc_assert(struct bnx2x *bp)
715 {
716         char last_idx;
717         int i, rc = 0;
718         u32 row0, row1, row2, row3;
719
720         /* XSTORM */
721         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
723         if (last_idx)
724                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
725
726         /* print the asserts */
727         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
728
729                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730                               XSTORM_ASSERT_LIST_OFFSET(i));
731                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
737
738                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740                                   " 0x%08x 0x%08x 0x%08x\n",
741                                   i, row3, row2, row1, row0);
742                         rc++;
743                 } else {
744                         break;
745                 }
746         }
747
748         /* TSTORM */
749         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
751         if (last_idx)
752                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754         /* print the asserts */
755         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758                               TSTORM_ASSERT_LIST_OFFSET(i));
759                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768                                   " 0x%08x 0x%08x 0x%08x\n",
769                                   i, row3, row2, row1, row0);
770                         rc++;
771                 } else {
772                         break;
773                 }
774         }
775
776         /* CSTORM */
777         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
779         if (last_idx)
780                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782         /* print the asserts */
783         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786                               CSTORM_ASSERT_LIST_OFFSET(i));
787                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796                                   " 0x%08x 0x%08x 0x%08x\n",
797                                   i, row3, row2, row1, row0);
798                         rc++;
799                 } else {
800                         break;
801                 }
802         }
803
804         /* USTORM */
805         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806                            USTORM_ASSERT_LIST_INDEX_OFFSET);
807         if (last_idx)
808                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810         /* print the asserts */
811         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814                               USTORM_ASSERT_LIST_OFFSET(i));
815                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
817                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
819                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824                                   " 0x%08x 0x%08x 0x%08x\n",
825                                   i, row3, row2, row1, row0);
826                         rc++;
827                 } else {
828                         break;
829                 }
830         }
831
832         return rc;
833 }
834
835 static void bnx2x_fw_dump(struct bnx2x *bp)
836 {
837         u32 addr;
838         u32 mark, offset;
839         __be32 data[9];
840         int word;
841         u32 trace_shmem_base;
842         if (BP_NOMCP(bp)) {
843                 BNX2X_ERR("NO MCP - can not dump\n");
844                 return;
845         }
846
847         if (BP_PATH(bp) == 0)
848                 trace_shmem_base = bp->common.shmem_base;
849         else
850                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851         addr = trace_shmem_base - 0x0800 + 4;
852         mark = REG_RD(bp, addr);
853         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854                         + ((mark + 0x3) & ~0x3) - 0x08000000;
855         pr_err("begin fw dump (mark 0x%x)\n", mark);
856
857         pr_err("");
858         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
859                 for (word = 0; word < 8; word++)
860                         data[word] = htonl(REG_RD(bp, offset + 4*word));
861                 data[8] = 0x0;
862                 pr_cont("%s", (char *)data);
863         }
864         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
865                 for (word = 0; word < 8; word++)
866                         data[word] = htonl(REG_RD(bp, offset + 4*word));
867                 data[8] = 0x0;
868                 pr_cont("%s", (char *)data);
869         }
870         pr_err("end of fw dump\n");
871 }
872
873 void bnx2x_panic_dump(struct bnx2x *bp)
874 {
875         int i;
876         u16 j;
877         struct hc_sp_status_block_data sp_sb_data;
878         int func = BP_FUNC(bp);
879 #ifdef BNX2X_STOP_ON_ERROR
880         u16 start = 0, end = 0;
881 #endif
882
883         bp->stats_state = STATS_STATE_DISABLED;
884         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
886         BNX2X_ERR("begin crash dump -----------------\n");
887
888         /* Indices */
889         /* Common */
890         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
891                   "  spq_prod_idx(0x%x)\n",
892                   bp->def_idx, bp->def_att_idx,
893                   bp->attn_state, bp->spq_prod_idx);
894         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
895                   bp->def_status_blk->atten_status_block.attn_bits,
896                   bp->def_status_blk->atten_status_block.attn_bits_ack,
897                   bp->def_status_blk->atten_status_block.status_block_id,
898                   bp->def_status_blk->atten_status_block.attn_bits_index);
899         BNX2X_ERR("     def (");
900         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901                 pr_cont("0x%x%s",
902                        bp->def_status_blk->sp_sb.index_values[i],
903                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
904
905         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908                         i*sizeof(u32));
909
910         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
911                          "pf_id(0x%x)  vnic_id(0x%x)  "
912                          "vf_id(0x%x)  vf_valid (0x%x)\n",
913                sp_sb_data.igu_sb_id,
914                sp_sb_data.igu_seg_id,
915                sp_sb_data.p_func.pf_id,
916                sp_sb_data.p_func.vnic_id,
917                sp_sb_data.p_func.vf_id,
918                sp_sb_data.p_func.vf_valid);
919
920
921         for_each_queue(bp, i) {
922                 struct bnx2x_fastpath *fp = &bp->fp[i];
923                 int loop;
924                 struct hc_status_block_data_e2 sb_data_e2;
925                 struct hc_status_block_data_e1x sb_data_e1x;
926                 struct hc_status_block_sm  *hc_sm_p =
927                         CHIP_IS_E2(bp) ?
928                         sb_data_e2.common.state_machine :
929                         sb_data_e1x.common.state_machine;
930                 struct hc_index_data *hc_index_p =
931                         CHIP_IS_E2(bp) ?
932                         sb_data_e2.index_data :
933                         sb_data_e1x.index_data;
934                 int data_size;
935                 u32 *sb_data_p;
936
937                 /* Rx */
938                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
939                           "  rx_comp_prod(0x%x)"
940                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
941                           i, fp->rx_bd_prod, fp->rx_bd_cons,
942                           fp->rx_comp_prod,
943                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
944                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
945                           "  fp_hc_idx(0x%x)\n",
946                           fp->rx_sge_prod, fp->last_max_sge,
947                           le16_to_cpu(fp->fp_hc_idx));
948
949                 /* Tx */
950                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
951                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
952                           "  *tx_cons_sb(0x%x)\n",
953                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
955
956                 loop = CHIP_IS_E2(bp) ?
957                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
958
959                 /* host sb data */
960
961                 BNX2X_ERR("     run indexes (");
962                 for (j = 0; j < HC_SB_MAX_SM; j++)
963                         pr_cont("0x%x%s",
964                                fp->sb_running_index[j],
965                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967                 BNX2X_ERR("     indexes (");
968                 for (j = 0; j < loop; j++)
969                         pr_cont("0x%x%s",
970                                fp->sb_index_values[j],
971                                (j == loop - 1) ? ")" : " ");
972                 /* fw sb data */
973                 data_size = CHIP_IS_E2(bp) ?
974                         sizeof(struct hc_status_block_data_e2) :
975                         sizeof(struct hc_status_block_data_e1x);
976                 data_size /= sizeof(u32);
977                 sb_data_p = CHIP_IS_E2(bp) ?
978                         (u32 *)&sb_data_e2 :
979                         (u32 *)&sb_data_e1x;
980                 /* copy sb data in here */
981                 for (j = 0; j < data_size; j++)
982                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984                                 j * sizeof(u32));
985
986                 if (CHIP_IS_E2(bp)) {
987                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
988                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
989                                 sb_data_e2.common.p_func.pf_id,
990                                 sb_data_e2.common.p_func.vf_id,
991                                 sb_data_e2.common.p_func.vf_valid,
992                                 sb_data_e2.common.p_func.vnic_id,
993                                 sb_data_e2.common.same_igu_sb_1b);
994                 } else {
995                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
996                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
997                                 sb_data_e1x.common.p_func.pf_id,
998                                 sb_data_e1x.common.p_func.vf_id,
999                                 sb_data_e1x.common.p_func.vf_valid,
1000                                 sb_data_e1x.common.p_func.vnic_id,
1001                                 sb_data_e1x.common.same_igu_sb_1b);
1002                 }
1003
1004                 /* SB_SMs data */
1005                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006                         pr_cont("SM[%d] __flags (0x%x) "
1007                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1008                                "time_to_expire (0x%x) "
1009                                "timer_value(0x%x)\n", j,
1010                                hc_sm_p[j].__flags,
1011                                hc_sm_p[j].igu_sb_id,
1012                                hc_sm_p[j].igu_seg_id,
1013                                hc_sm_p[j].time_to_expire,
1014                                hc_sm_p[j].timer_value);
1015                 }
1016
1017                 /* Indecies data */
1018                 for (j = 0; j < loop; j++) {
1019                         pr_cont("INDEX[%d] flags (0x%x) "
1020                                          "timeout (0x%x)\n", j,
1021                                hc_index_p[j].flags,
1022                                hc_index_p[j].timeout);
1023                 }
1024         }
1025
1026 #ifdef BNX2X_STOP_ON_ERROR
1027         /* Rings */
1028         /* Rx */
1029         for_each_queue(bp, i) {
1030                 struct bnx2x_fastpath *fp = &bp->fp[i];
1031
1032                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1034                 for (j = start; j != end; j = RX_BD(j + 1)) {
1035                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
1038                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1039                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1040                 }
1041
1042                 start = RX_SGE(fp->rx_sge_prod);
1043                 end = RX_SGE(fp->last_max_sge);
1044                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1045                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
1048                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1049                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1050                 }
1051
1052                 start = RCQ_BD(fp->rx_comp_cons - 10);
1053                 end = RCQ_BD(fp->rx_comp_cons + 503);
1054                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1055                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
1057                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1059                 }
1060         }
1061
1062         /* Tx */
1063         for_each_queue(bp, i) {
1064                 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068                 for (j = start; j != end; j = TX_BD(j + 1)) {
1069                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
1071                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072                                   i, j, sw_bd->skb, sw_bd->first_bd);
1073                 }
1074
1075                 start = TX_BD(fp->tx_bd_cons - 10);
1076                 end = TX_BD(fp->tx_bd_cons + 254);
1077                 for (j = start; j != end; j = TX_BD(j + 1)) {
1078                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
1080                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1082                 }
1083         }
1084 #endif
1085         bnx2x_fw_dump(bp);
1086         bnx2x_mc_assert(bp);
1087         BNX2X_ERR("end crash dump -----------------\n");
1088 }
1089
1090 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1091 {
1092         int port = BP_PORT(bp);
1093         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094         u32 val = REG_RD(bp, addr);
1095         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1096         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1097
1098         if (msix) {
1099                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1101                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1103         } else if (msi) {
1104                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1108         } else {
1109                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1110                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1111                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1113
1114                 if (!CHIP_IS_E1(bp)) {
1115                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116                            val, port, addr);
1117
1118                         REG_WR(bp, addr, val);
1119
1120                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121                 }
1122         }
1123
1124         if (CHIP_IS_E1(bp))
1125                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
1127         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1128            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1129
1130         REG_WR(bp, addr, val);
1131         /*
1132          * Ensure that HC_CONFIG is written before leading/trailing edge config
1133          */
1134         mmiowb();
1135         barrier();
1136
1137         if (!CHIP_IS_E1(bp)) {
1138                 /* init leading/trailing edge */
1139                 if (IS_MF(bp)) {
1140                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1141                         if (bp->port.pmf)
1142                                 /* enable nig and gpio3 attention */
1143                                 val |= 0x1100;
1144                 } else
1145                         val = 0xffff;
1146
1147                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1149         }
1150
1151         /* Make sure that interrupts are indeed enabled from here on */
1152         mmiowb();
1153 }
1154
1155 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156 {
1157         u32 val;
1158         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163         if (msix) {
1164                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165                          IGU_PF_CONF_SINGLE_ISR_EN);
1166                 val |= (IGU_PF_CONF_FUNC_EN |
1167                         IGU_PF_CONF_MSI_MSIX_EN |
1168                         IGU_PF_CONF_ATTN_BIT_EN);
1169         } else if (msi) {
1170                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171                 val |= (IGU_PF_CONF_FUNC_EN |
1172                         IGU_PF_CONF_MSI_MSIX_EN |
1173                         IGU_PF_CONF_ATTN_BIT_EN |
1174                         IGU_PF_CONF_SINGLE_ISR_EN);
1175         } else {
1176                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177                 val |= (IGU_PF_CONF_FUNC_EN |
1178                         IGU_PF_CONF_INT_LINE_EN |
1179                         IGU_PF_CONF_ATTN_BIT_EN |
1180                         IGU_PF_CONF_SINGLE_ISR_EN);
1181         }
1182
1183         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1184            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188         barrier();
1189
1190         /* init leading/trailing edge */
1191         if (IS_MF(bp)) {
1192                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193                 if (bp->port.pmf)
1194                         /* enable nig and gpio3 attention */
1195                         val |= 0x1100;
1196         } else
1197                 val = 0xffff;
1198
1199         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202         /* Make sure that interrupts are indeed enabled from here on */
1203         mmiowb();
1204 }
1205
1206 void bnx2x_int_enable(struct bnx2x *bp)
1207 {
1208         if (bp->common.int_block == INT_BLOCK_HC)
1209                 bnx2x_hc_int_enable(bp);
1210         else
1211                 bnx2x_igu_int_enable(bp);
1212 }
1213
1214 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1215 {
1216         int port = BP_PORT(bp);
1217         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218         u32 val = REG_RD(bp, addr);
1219
1220         /*
1221          * in E1 we must use only PCI configuration space to disable
1222          * MSI/MSIX capablility
1223          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224          */
1225         if (CHIP_IS_E1(bp)) {
1226                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227                  *  Use mask register to prevent from HC sending interrupts
1228                  *  after we exit the function
1229                  */
1230                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235         } else
1236                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240
1241         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1242            val, port, addr);
1243
1244         /* flush all outstanding writes */
1245         mmiowb();
1246
1247         REG_WR(bp, addr, val);
1248         if (REG_RD(bp, addr) != val)
1249                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250 }
1251
1252 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253 {
1254         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257                  IGU_PF_CONF_INT_LINE_EN |
1258                  IGU_PF_CONF_ATTN_BIT_EN);
1259
1260         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262         /* flush all outstanding writes */
1263         mmiowb();
1264
1265         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268 }
1269
1270 void bnx2x_int_disable(struct bnx2x *bp)
1271 {
1272         if (bp->common.int_block == INT_BLOCK_HC)
1273                 bnx2x_hc_int_disable(bp);
1274         else
1275                 bnx2x_igu_int_disable(bp);
1276 }
1277
1278 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1279 {
1280         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1281         int i, offset;
1282
1283         /* disable interrupt handling */
1284         atomic_inc(&bp->intr_sem);
1285         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1286
1287         if (disable_hw)
1288                 /* prevent the HW from sending interrupts */
1289                 bnx2x_int_disable(bp);
1290
1291         /* make sure all ISRs are done */
1292         if (msix) {
1293                 synchronize_irq(bp->msix_table[0].vector);
1294                 offset = 1;
1295 #ifdef BCM_CNIC
1296                 offset++;
1297 #endif
1298                 for_each_queue(bp, i)
1299                         synchronize_irq(bp->msix_table[i + offset].vector);
1300         } else
1301                 synchronize_irq(bp->pdev->irq);
1302
1303         /* make sure sp_task is not running */
1304         cancel_delayed_work(&bp->sp_task);
1305         flush_workqueue(bnx2x_wq);
1306 }
1307
1308 /* fast path */
1309
1310 /*
1311  * General service functions
1312  */
1313
1314 /* Return true if succeeded to acquire the lock */
1315 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1316 {
1317         u32 lock_status;
1318         u32 resource_bit = (1 << resource);
1319         int func = BP_FUNC(bp);
1320         u32 hw_lock_control_reg;
1321
1322         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1323
1324         /* Validating that the resource is within range */
1325         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1326                 DP(NETIF_MSG_HW,
1327                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1329                 return false;
1330         }
1331
1332         if (func <= 5)
1333                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1334         else
1335                 hw_lock_control_reg =
1336                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1337
1338         /* Try to acquire the lock */
1339         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340         lock_status = REG_RD(bp, hw_lock_control_reg);
1341         if (lock_status & resource_bit)
1342                 return true;
1343
1344         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1345         return false;
1346 }
1347
1348 #ifdef BCM_CNIC
1349 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1350 #endif
1351
1352 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1353                            union eth_rx_cqe *rr_cqe)
1354 {
1355         struct bnx2x *bp = fp->bp;
1356         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358
1359         DP(BNX2X_MSG_SP,
1360            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1361            fp->index, cid, command, bp->state,
1362            rr_cqe->ramrod_cqe.ramrod_type);
1363
1364         switch (command | fp->state) {
1365         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367                 fp->state = BNX2X_FP_STATE_OPEN;
1368                 break;
1369
1370         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1372                 fp->state = BNX2X_FP_STATE_HALTED;
1373                 break;
1374
1375         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377                 fp->state = BNX2X_FP_STATE_TERMINATED;
1378                 break;
1379
1380         default:
1381                 BNX2X_ERR("unexpected MC reply (%d)  "
1382                           "fp[%d] state is %x\n",
1383                           command, fp->index, fp->state);
1384                 break;
1385         }
1386
1387         smp_mb__before_atomic_inc();
1388         atomic_inc(&bp->spq_left);
1389         /* push the change in fp->state and towards the memory */
1390         smp_wmb();
1391
1392         return;
1393 }
1394
1395 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1396 {
1397         struct bnx2x *bp = netdev_priv(dev_instance);
1398         u16 status = bnx2x_ack_int(bp);
1399         u16 mask;
1400         int i;
1401
1402         /* Return here if interrupt is shared and it's not for us */
1403         if (unlikely(status == 0)) {
1404                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1405                 return IRQ_NONE;
1406         }
1407         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1408
1409         /* Return here if interrupt is disabled */
1410         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1412                 return IRQ_HANDLED;
1413         }
1414
1415 #ifdef BNX2X_STOP_ON_ERROR
1416         if (unlikely(bp->panic))
1417                 return IRQ_HANDLED;
1418 #endif
1419
1420         for_each_queue(bp, i) {
1421                 struct bnx2x_fastpath *fp = &bp->fp[i];
1422
1423                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1424                 if (status & mask) {
1425                         /* Handle Rx and Tx according to SB id */
1426                         prefetch(fp->rx_cons_sb);
1427                         prefetch(fp->tx_cons_sb);
1428                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1429                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1430                         status &= ~mask;
1431                 }
1432         }
1433
1434 #ifdef BCM_CNIC
1435         mask = 0x2;
1436         if (status & (mask | 0x1)) {
1437                 struct cnic_ops *c_ops = NULL;
1438
1439                 rcu_read_lock();
1440                 c_ops = rcu_dereference(bp->cnic_ops);
1441                 if (c_ops)
1442                         c_ops->cnic_handler(bp->cnic_data, NULL);
1443                 rcu_read_unlock();
1444
1445                 status &= ~mask;
1446         }
1447 #endif
1448
1449         if (unlikely(status & 0x1)) {
1450                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1451
1452                 status &= ~0x1;
1453                 if (!status)
1454                         return IRQ_HANDLED;
1455         }
1456
1457         if (unlikely(status))
1458                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1459                    status);
1460
1461         return IRQ_HANDLED;
1462 }
1463
1464 /* end of fast path */
1465
1466
1467 /* Link */
1468
1469 /*
1470  * General service functions
1471  */
1472
1473 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1474 {
1475         u32 lock_status;
1476         u32 resource_bit = (1 << resource);
1477         int func = BP_FUNC(bp);
1478         u32 hw_lock_control_reg;
1479         int cnt;
1480
1481         /* Validating that the resource is within range */
1482         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1483                 DP(NETIF_MSG_HW,
1484                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1486                 return -EINVAL;
1487         }
1488
1489         if (func <= 5) {
1490                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1491         } else {
1492                 hw_lock_control_reg =
1493                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1494         }
1495
1496         /* Validating that the resource is not already taken */
1497         lock_status = REG_RD(bp, hw_lock_control_reg);
1498         if (lock_status & resource_bit) {
1499                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1500                    lock_status, resource_bit);
1501                 return -EEXIST;
1502         }
1503
1504         /* Try for 5 second every 5ms */
1505         for (cnt = 0; cnt < 1000; cnt++) {
1506                 /* Try to acquire the lock */
1507                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508                 lock_status = REG_RD(bp, hw_lock_control_reg);
1509                 if (lock_status & resource_bit)
1510                         return 0;
1511
1512                 msleep(5);
1513         }
1514         DP(NETIF_MSG_HW, "Timeout\n");
1515         return -EAGAIN;
1516 }
1517
1518 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1519 {
1520         u32 lock_status;
1521         u32 resource_bit = (1 << resource);
1522         int func = BP_FUNC(bp);
1523         u32 hw_lock_control_reg;
1524
1525         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1526
1527         /* Validating that the resource is within range */
1528         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1529                 DP(NETIF_MSG_HW,
1530                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1532                 return -EINVAL;
1533         }
1534
1535         if (func <= 5) {
1536                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1537         } else {
1538                 hw_lock_control_reg =
1539                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1540         }
1541
1542         /* Validating that the resource is currently taken */
1543         lock_status = REG_RD(bp, hw_lock_control_reg);
1544         if (!(lock_status & resource_bit)) {
1545                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1546                    lock_status, resource_bit);
1547                 return -EFAULT;
1548         }
1549
1550         REG_WR(bp, hw_lock_control_reg, resource_bit);
1551         return 0;
1552 }
1553
1554
1555 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1556 {
1557         /* The GPIO should be swapped if swap register is set and active */
1558         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560         int gpio_shift = gpio_num +
1561                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562         u32 gpio_mask = (1 << gpio_shift);
1563         u32 gpio_reg;
1564         int value;
1565
1566         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1568                 return -EINVAL;
1569         }
1570
1571         /* read GPIO value */
1572         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1573
1574         /* get the requested pin value */
1575         if ((gpio_reg & gpio_mask) == gpio_mask)
1576                 value = 1;
1577         else
1578                 value = 0;
1579
1580         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1581
1582         return value;
1583 }
1584
1585 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1586 {
1587         /* The GPIO should be swapped if swap register is set and active */
1588         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1589                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1590         int gpio_shift = gpio_num +
1591                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592         u32 gpio_mask = (1 << gpio_shift);
1593         u32 gpio_reg;
1594
1595         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1597                 return -EINVAL;
1598         }
1599
1600         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1601         /* read GPIO and mask except the float bits */
1602         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1603
1604         switch (mode) {
1605         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607                    gpio_num, gpio_shift);
1608                 /* clear FLOAT and set CLR */
1609                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1611                 break;
1612
1613         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615                    gpio_num, gpio_shift);
1616                 /* clear FLOAT and set SET */
1617                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1619                 break;
1620
1621         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1622                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623                    gpio_num, gpio_shift);
1624                 /* set FLOAT */
1625                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1626                 break;
1627
1628         default:
1629                 break;
1630         }
1631
1632         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1633         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1634
1635         return 0;
1636 }
1637
1638 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1639 {
1640         /* The GPIO should be swapped if swap register is set and active */
1641         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643         int gpio_shift = gpio_num +
1644                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645         u32 gpio_mask = (1 << gpio_shift);
1646         u32 gpio_reg;
1647
1648         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1650                 return -EINVAL;
1651         }
1652
1653         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1654         /* read GPIO int */
1655         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1656
1657         switch (mode) {
1658         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660                                    "output low\n", gpio_num, gpio_shift);
1661                 /* clear SET and set CLR */
1662                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1664                 break;
1665
1666         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668                                    "output high\n", gpio_num, gpio_shift);
1669                 /* clear CLR and set SET */
1670                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1672                 break;
1673
1674         default:
1675                 break;
1676         }
1677
1678         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1680
1681         return 0;
1682 }
1683
1684 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1685 {
1686         u32 spio_mask = (1 << spio_num);
1687         u32 spio_reg;
1688
1689         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690             (spio_num > MISC_REGISTERS_SPIO_7)) {
1691                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1692                 return -EINVAL;
1693         }
1694
1695         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1696         /* read SPIO and mask except the float bits */
1697         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1698
1699         switch (mode) {
1700         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1701                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702                 /* clear FLOAT and set CLR */
1703                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1705                 break;
1706
1707         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1708                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709                 /* clear FLOAT and set SET */
1710                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1712                 break;
1713
1714         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1716                 /* set FLOAT */
1717                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1718                 break;
1719
1720         default:
1721                 break;
1722         }
1723
1724         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1725         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1726
1727         return 0;
1728 }
1729
1730 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1731 {
1732         u32 sel_phy_idx = 0;
1733         if (bp->link_vars.link_up) {
1734                 sel_phy_idx = EXT_PHY1;
1735                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738                         sel_phy_idx = EXT_PHY2;
1739         } else {
1740
1741                 switch (bnx2x_phy_selection(&bp->link_params)) {
1742                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745                        sel_phy_idx = EXT_PHY1;
1746                        break;
1747                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749                        sel_phy_idx = EXT_PHY2;
1750                        break;
1751                 }
1752         }
1753         /*
1754         * The selected actived PHY is always after swapping (in case PHY
1755         * swapping is enabled). So when swapping is enabled, we need to reverse
1756         * the configuration
1757         */
1758
1759         if (bp->link_params.multi_phy_config &
1760             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761                 if (sel_phy_idx == EXT_PHY1)
1762                         sel_phy_idx = EXT_PHY2;
1763                 else if (sel_phy_idx == EXT_PHY2)
1764                         sel_phy_idx = EXT_PHY1;
1765         }
1766         return LINK_CONFIG_IDX(sel_phy_idx);
1767 }
1768
1769 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1770 {
1771         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1772         switch (bp->link_vars.ieee_fc &
1773                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1774         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1775                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1776                                                    ADVERTISED_Pause);
1777                 break;
1778
1779         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1780                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1781                                                   ADVERTISED_Pause);
1782                 break;
1783
1784         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1785                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1786                 break;
1787
1788         default:
1789                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1790                                                    ADVERTISED_Pause);
1791                 break;
1792         }
1793 }
1794
1795 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1796 {
1797         if (!BP_NOMCP(bp)) {
1798                 u8 rc;
1799                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1801                 /* Initialize link parameters structure variables */
1802                 /* It is recommended to turn off RX FC for jumbo frames
1803                    for better performance */
1804                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1805                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1806                 else
1807                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1808
1809                 bnx2x_acquire_phy_lock(bp);
1810
1811                 if (load_mode == LOAD_DIAG) {
1812                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1813                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1814                 }
1815
1816                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1817
1818                 bnx2x_release_phy_lock(bp);
1819
1820                 bnx2x_calc_fc_adv(bp);
1821
1822                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1824                         bnx2x_link_report(bp);
1825                 }
1826                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1827                 return rc;
1828         }
1829         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1830         return -EINVAL;
1831 }
1832
1833 void bnx2x_link_set(struct bnx2x *bp)
1834 {
1835         if (!BP_NOMCP(bp)) {
1836                 bnx2x_acquire_phy_lock(bp);
1837                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1838                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1839                 bnx2x_release_phy_lock(bp);
1840
1841                 bnx2x_calc_fc_adv(bp);
1842         } else
1843                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1844 }
1845
1846 static void bnx2x__link_reset(struct bnx2x *bp)
1847 {
1848         if (!BP_NOMCP(bp)) {
1849                 bnx2x_acquire_phy_lock(bp);
1850                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1851                 bnx2x_release_phy_lock(bp);
1852         } else
1853                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1854 }
1855
1856 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1857 {
1858         u8 rc = 0;
1859
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1863                                      is_serdes);
1864                 bnx2x_release_phy_lock(bp);
1865         } else
1866                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1867
1868         return rc;
1869 }
1870
1871 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1872 {
1873         u32 r_param = bp->link_vars.line_speed / 8;
1874         u32 fair_periodic_timeout_usec;
1875         u32 t_fair;
1876
1877         memset(&(bp->cmng.rs_vars), 0,
1878                sizeof(struct rate_shaping_vars_per_port));
1879         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1880
1881         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1883
1884         /* this is the threshold below which no timer arming will occur
1885            1.25 coefficient is for the threshold to be a little bigger
1886            than the real time, to compensate for timer in-accuracy */
1887         bp->cmng.rs_vars.rs_threshold =
1888                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1889
1890         /* resolution of fairness timer */
1891         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1894
1895         /* this is the threshold below which we won't arm the timer anymore */
1896         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1897
1898         /* we multiply by 1e3/8 to get bytes/msec.
1899            We don't want the credits to pass a credit
1900            of the t_fair*FAIR_MEM (algorithm resolution) */
1901         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902         /* since each tick is 4 usec */
1903         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1904 }
1905
1906 /* Calculates the sum of vn_min_rates.
1907    It's needed for further normalizing of the min_rates.
1908    Returns:
1909      sum of vn_min_rates.
1910        or
1911      0 - if all the min_rates are 0.
1912      In the later case fainess algorithm should be deactivated.
1913      If not all min_rates are zero then those that are zeroes will be set to 1.
1914  */
1915 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1916 {
1917         int all_zero = 1;
1918         int vn;
1919
1920         bp->vn_weight_sum = 0;
1921         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1922                 u32 vn_cfg = bp->mf_config[vn];
1923                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1925
1926                 /* Skip hidden vns */
1927                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1928                         continue;
1929
1930                 /* If min rate is zero - set it to 1 */
1931                 if (!vn_min_rate)
1932                         vn_min_rate = DEF_MIN_RATE;
1933                 else
1934                         all_zero = 0;
1935
1936                 bp->vn_weight_sum += vn_min_rate;
1937         }
1938
1939         /* ... only if all min rates are zeros - disable fairness */
1940         if (all_zero) {
1941                 bp->cmng.flags.cmng_enables &=
1942                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944                    "  fairness will be disabled\n");
1945         } else
1946                 bp->cmng.flags.cmng_enables |=
1947                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1948 }
1949
1950 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1951 {
1952         struct rate_shaping_vars_per_vn m_rs_vn;
1953         struct fairness_vars_per_vn m_fair_vn;
1954         u32 vn_cfg = bp->mf_config[vn];
1955         int func = 2*vn + BP_PORT(bp);
1956         u16 vn_min_rate, vn_max_rate;
1957         int i;
1958
1959         /* If function is hidden - set min and max to zeroes */
1960         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1961                 vn_min_rate = 0;
1962                 vn_max_rate = 0;
1963
1964         } else {
1965                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1967                 /* If min rate is zero - set it to 1 */
1968                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1969                         vn_min_rate = DEF_MIN_RATE;
1970                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1972         }
1973
1974         DP(NETIF_MSG_IFUP,
1975            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1976            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1977
1978         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1980
1981         /* global vn counter - maximal Mbps for this vn */
1982         m_rs_vn.vn_counter.rate = vn_max_rate;
1983
1984         /* quota - number of bytes transmitted in this period */
1985         m_rs_vn.vn_counter.quota =
1986                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1987
1988         if (bp->vn_weight_sum) {
1989                 /* credit for each period of the fairness algorithm:
1990                    number of bytes in T_FAIR (the vn share the port rate).
1991                    vn_weight_sum should not be larger than 10000, thus
1992                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1993                    than zero */
1994                 m_fair_vn.vn_credit_delta =
1995                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996                                                    (8 * bp->vn_weight_sum))),
1997                               (bp->cmng.fair_vars.fair_threshold * 2));
1998                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1999                    m_fair_vn.vn_credit_delta);
2000         }
2001
2002         /* Store it to internal memory */
2003         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006                        ((u32 *)(&m_rs_vn))[i]);
2007
2008         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011                        ((u32 *)(&m_fair_vn))[i]);
2012 }
2013
2014 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015 {
2016         if (CHIP_REV_IS_SLOW(bp))
2017                 return CMNG_FNS_NONE;
2018         if (IS_MF(bp))
2019                 return CMNG_FNS_MINMAX;
2020
2021         return CMNG_FNS_NONE;
2022 }
2023
2024 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025 {
2026         int vn;
2027
2028         if (BP_NOMCP(bp))
2029                 return; /* what should be the default bvalue in this case */
2030
2031         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032                 int /*abs*/func = 2*vn + BP_PORT(bp);
2033                 bp->mf_config[vn] =
2034                         MF_CFG_RD(bp, func_mf_config[func].config);
2035         }
2036 }
2037
2038 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039 {
2040
2041         if (cmng_type == CMNG_FNS_MINMAX) {
2042                 int vn;
2043
2044                 /* clear cmng_enables */
2045                 bp->cmng.flags.cmng_enables = 0;
2046
2047                 /* read mf conf from shmem */
2048                 if (read_cfg)
2049                         bnx2x_read_mf_cfg(bp);
2050
2051                 /* Init rate shaping and fairness contexts */
2052                 bnx2x_init_port_minmax(bp);
2053
2054                 /* vn_weight_sum and enable fairness if not 0 */
2055                 bnx2x_calc_vn_weight_sum(bp);
2056
2057                 /* calculate and set min-max rate for each vn */
2058                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059                         bnx2x_init_vn_minmax(bp, vn);
2060
2061                 /* always enable rate shaping and fairness */
2062                 bp->cmng.flags.cmng_enables |=
2063                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064                 if (!bp->vn_weight_sum)
2065                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066                                    "  fairness will be disabled\n");
2067                 return;
2068         }
2069
2070         /* rate shaping and fairness are disabled */
2071         DP(NETIF_MSG_IFUP,
2072            "rate shaping and fairness are disabled\n");
2073 }
2074
2075 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076 {
2077         int port = BP_PORT(bp);
2078         int func;
2079         int vn;
2080
2081         /* Set the attention towards other drivers on the same port */
2082         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083                 if (vn == BP_E1HVN(bp))
2084                         continue;
2085
2086                 func = ((vn << 1) | port);
2087                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089         }
2090 }
2091
2092 /* This function is called upon link interrupt */
2093 static void bnx2x_link_attn(struct bnx2x *bp)
2094 {
2095         u32 prev_link_status = bp->link_vars.link_status;
2096         /* Make sure that we are synced with the current statistics */
2097         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
2099         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2100
2101         if (bp->link_vars.link_up) {
2102
2103                 /* dropless flow control */
2104                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2105                         int port = BP_PORT(bp);
2106                         u32 pause_enabled = 0;
2107
2108                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109                                 pause_enabled = 1;
2110
2111                         REG_WR(bp, BAR_USTRORM_INTMEM +
2112                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2113                                pause_enabled);
2114                 }
2115
2116                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117                         struct host_port_stats *pstats;
2118
2119                         pstats = bnx2x_sp(bp, port_stats);
2120                         /* reset old bmac stats */
2121                         memset(&(pstats->mac_stx[0]), 0,
2122                                sizeof(struct mac_stx));
2123                 }
2124                 if (bp->state == BNX2X_STATE_OPEN)
2125                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126         }
2127
2128         /* indicate link status only if link status actually changed */
2129         if (prev_link_status != bp->link_vars.link_status)
2130                 bnx2x_link_report(bp);
2131
2132         if (IS_MF(bp))
2133                 bnx2x_link_sync_notify(bp);
2134
2135         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2137
2138                 if (cmng_fns != CMNG_FNS_NONE) {
2139                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2141                 } else
2142                         /* rate shaping and fairness are disabled */
2143                         DP(NETIF_MSG_IFUP,
2144                            "single function mode without fairness\n");
2145         }
2146 }
2147
2148 void bnx2x__link_status_update(struct bnx2x *bp)
2149 {
2150         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2151                 return;
2152
2153         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2154
2155         if (bp->link_vars.link_up)
2156                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157         else
2158                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2159
2160         /* the link status update could be the result of a DCC event
2161            hence re-read the shmem mf configuration */
2162         bnx2x_read_mf_cfg(bp);
2163
2164         /* indicate link status */
2165         bnx2x_link_report(bp);
2166 }
2167
2168 static void bnx2x_pmf_update(struct bnx2x *bp)
2169 {
2170         int port = BP_PORT(bp);
2171         u32 val;
2172
2173         bp->port.pmf = 1;
2174         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2175
2176         /* enable nig attention */
2177         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2178         if (bp->common.int_block == INT_BLOCK_HC) {
2179                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181         } else if (CHIP_IS_E2(bp)) {
2182                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184         }
2185
2186         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2187 }
2188
2189 /* end of Link */
2190
2191 /* slow path */
2192
2193 /*
2194  * General service functions
2195  */
2196
2197 /* send the MCP a request, block until there is a reply */
2198 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2199 {
2200         int mb_idx = BP_FW_MB_IDX(bp);
2201         u32 seq = ++bp->fw_seq;
2202         u32 rc = 0;
2203         u32 cnt = 1;
2204         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2205
2206         mutex_lock(&bp->fw_mb_mutex);
2207         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
2210         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2211
2212         do {
2213                 /* let the FW do it's magic ... */
2214                 msleep(delay);
2215
2216                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2217
2218                 /* Give the FW up to 5 second (500*10ms) */
2219         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2220
2221         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222            cnt*delay, rc, seq);
2223
2224         /* is this a reply to our command? */
2225         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226                 rc &= FW_MSG_CODE_MASK;
2227         else {
2228                 /* FW BUG! */
2229                 BNX2X_ERR("FW failed to respond!\n");
2230                 bnx2x_fw_dump(bp);
2231                 rc = 0;
2232         }
2233         mutex_unlock(&bp->fw_mb_mutex);
2234
2235         return rc;
2236 }
2237
2238 /* must be called under rtnl_lock */
2239 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240 {
2241         u32 mask = (1 << cl_id);
2242
2243         /* initial seeting is BNX2X_ACCEPT_NONE */
2244         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246         u8 unmatched_unicast = 0;
2247
2248         if (filters & BNX2X_PROMISCUOUS_MODE) {
2249                 /* promiscious - accept all, drop none */
2250                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252         }
2253         if (filters & BNX2X_ACCEPT_UNICAST) {
2254                 /* accept matched ucast */
2255                 drop_all_ucast = 0;
2256         }
2257         if (filters & BNX2X_ACCEPT_MULTICAST) {
2258                 /* accept matched mcast */
2259                 drop_all_mcast = 0;
2260         }
2261         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262                 /* accept all mcast */
2263                 drop_all_ucast = 0;
2264                 accp_all_ucast = 1;
2265         }
2266         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267                 /* accept all mcast */
2268                 drop_all_mcast = 0;
2269                 accp_all_mcast = 1;
2270         }
2271         if (filters & BNX2X_ACCEPT_BROADCAST) {
2272                 /* accept (all) bcast */
2273                 drop_all_bcast = 0;
2274                 accp_all_bcast = 1;
2275         }
2276
2277         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278                 bp->mac_filters.ucast_drop_all | mask :
2279                 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282                 bp->mac_filters.mcast_drop_all | mask :
2283                 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286                 bp->mac_filters.bcast_drop_all | mask :
2287                 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290                 bp->mac_filters.ucast_accept_all | mask :
2291                 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294                 bp->mac_filters.mcast_accept_all | mask :
2295                 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298                 bp->mac_filters.bcast_accept_all | mask :
2299                 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302                 bp->mac_filters.unmatched_unicast | mask :
2303                 bp->mac_filters.unmatched_unicast & ~mask;
2304 }
2305
2306 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307 {
2308         struct tstorm_eth_function_common_config tcfg = {0};
2309         u16 rss_flgs;
2310
2311         /* tpa */
2312         if (p->func_flgs & FUNC_FLG_TPA)
2313                 tcfg.config_flags |=
2314                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2315
2316         /* set rss flags */
2317         rss_flgs = (p->rss->mode <<
2318                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2319
2320         if (p->rss->cap & RSS_IPV4_CAP)
2321                 rss_flgs |= RSS_IPV4_CAP_MASK;
2322         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324         if (p->rss->cap & RSS_IPV6_CAP)
2325                 rss_flgs |= RSS_IPV6_CAP_MASK;
2326         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2328
2329         tcfg.config_flags |= rss_flgs;
2330         tcfg.rss_result_mask = p->rss->result_mask;
2331
2332         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2333
2334         /* Enable the function in the FW */
2335         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336         storm_memset_func_en(bp, p->func_id, 1);
2337
2338         /* statistics */
2339         if (p->func_flgs & FUNC_FLG_STATS) {
2340                 struct stats_indication_flags stats_flags = {0};
2341                 stats_flags.collect_eth = 1;
2342
2343                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354         }
2355
2356         /* spq */
2357         if (p->func_flgs & FUNC_FLG_SPQ) {
2358                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361         }
2362 }
2363
2364 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365                                      struct bnx2x_fastpath *fp)
2366 {
2367         u16 flags = 0;
2368
2369         /* calculate queue flags */
2370         flags |= QUEUE_FLG_CACHE_ALIGN;
2371         flags |= QUEUE_FLG_HC;
2372         flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2373
2374 #ifdef BCM_VLAN
2375         flags |= QUEUE_FLG_VLAN;
2376         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2377 #endif
2378
2379         if (!fp->disable_tpa)
2380                 flags |= QUEUE_FLG_TPA;
2381
2382         flags |= QUEUE_FLG_STATS;
2383
2384         return flags;
2385 }
2386
2387 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2388         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2389         struct bnx2x_rxq_init_params *rxq_init)
2390 {
2391         u16 max_sge = 0;
2392         u16 sge_sz = 0;
2393         u16 tpa_agg_size = 0;
2394
2395         /* calculate queue flags */
2396         u16 flags = bnx2x_get_cl_flags(bp, fp);
2397
2398         if (!fp->disable_tpa) {
2399                 pause->sge_th_hi = 250;
2400                 pause->sge_th_lo = 150;
2401                 tpa_agg_size = min_t(u32,
2402                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2403                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2404                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2405                         SGE_PAGE_SHIFT;
2406                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2407                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2408                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2409                                     0xffff);
2410         }
2411
2412         /* pause - not for e1 */
2413         if (!CHIP_IS_E1(bp)) {
2414                 pause->bd_th_hi = 350;
2415                 pause->bd_th_lo = 250;
2416                 pause->rcq_th_hi = 350;
2417                 pause->rcq_th_lo = 250;
2418                 pause->sge_th_hi = 0;
2419                 pause->sge_th_lo = 0;
2420                 pause->pri_map = 1;
2421         }
2422
2423         /* rxq setup */
2424         rxq_init->flags = flags;
2425         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2426         rxq_init->dscr_map = fp->rx_desc_mapping;
2427         rxq_init->sge_map = fp->rx_sge_mapping;
2428         rxq_init->rcq_map = fp->rx_comp_mapping;
2429         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2430         rxq_init->mtu = bp->dev->mtu;
2431         rxq_init->buf_sz = bp->rx_buf_size;
2432         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2433         rxq_init->cl_id = fp->cl_id;
2434         rxq_init->spcl_id = fp->cl_id;
2435         rxq_init->stat_id = fp->cl_id;
2436         rxq_init->tpa_agg_sz = tpa_agg_size;
2437         rxq_init->sge_buf_sz = sge_sz;
2438         rxq_init->max_sges_pkt = max_sge;
2439         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2440         rxq_init->fw_sb_id = fp->fw_sb_id;
2441
2442         rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2443
2444         rxq_init->cid = HW_CID(bp, fp->cid);
2445
2446         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2447 }
2448
2449 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2450         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2451 {
2452         u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454         txq_init->flags = flags;
2455         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2456         txq_init->dscr_map = fp->tx_desc_mapping;
2457         txq_init->stat_id = fp->cl_id;
2458         txq_init->cid = HW_CID(bp, fp->cid);
2459         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2460         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2461         txq_init->fw_sb_id = fp->fw_sb_id;
2462         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2463 }
2464
2465 void bnx2x_pf_init(struct bnx2x *bp)
2466 {
2467         struct bnx2x_func_init_params func_init = {0};
2468         struct bnx2x_rss_params rss = {0};
2469         struct event_ring_data eq_data = { {0} };
2470         u16 flags;
2471
2472         /* pf specific setups */
2473         if (!CHIP_IS_E1(bp))
2474                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2475
2476         if (CHIP_IS_E2(bp)) {
2477                 /* reset IGU PF statistics: MSIX + ATTN */
2478                 /* PF */
2479                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2480                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2481                            (CHIP_MODE_IS_4_PORT(bp) ?
2482                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2483                 /* ATTN */
2484                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2485                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2486                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2487                            (CHIP_MODE_IS_4_PORT(bp) ?
2488                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2489         }
2490
2491         /* function setup flags */
2492         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2493
2494         if (CHIP_IS_E1x(bp))
2495                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2496         else
2497                 flags |= FUNC_FLG_TPA;
2498
2499         /* function setup */
2500
2501         /**
2502          * Although RSS is meaningless when there is a single HW queue we
2503          * still need it enabled in order to have HW Rx hash generated.
2504          */
2505         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2506                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2507         rss.mode = bp->multi_mode;
2508         rss.result_mask = MULTI_MASK;
2509         func_init.rss = &rss;
2510
2511         func_init.func_flgs = flags;
2512         func_init.pf_id = BP_FUNC(bp);
2513         func_init.func_id = BP_FUNC(bp);
2514         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2515         func_init.spq_map = bp->spq_mapping;
2516         func_init.spq_prod = bp->spq_prod_idx;
2517
2518         bnx2x_func_init(bp, &func_init);
2519
2520         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2521
2522         /*
2523         Congestion management values depend on the link rate
2524         There is no active link so initial link rate is set to 10 Gbps.
2525         When the link comes up The congestion management values are
2526         re-calculated according to the actual link rate.
2527         */
2528         bp->link_vars.line_speed = SPEED_10000;
2529         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2530
2531         /* Only the PMF sets the HW */
2532         if (bp->port.pmf)
2533                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2534
2535         /* no rx until link is up */
2536         bp->rx_mode = BNX2X_RX_MODE_NONE;
2537         bnx2x_set_storm_rx_mode(bp);
2538
2539         /* init Event Queue */
2540         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2541         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2542         eq_data.producer = bp->eq_prod;
2543         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2544         eq_data.sb_id = DEF_SB_ID;
2545         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2546 }
2547
2548
2549 static void bnx2x_e1h_disable(struct bnx2x *bp)
2550 {
2551         int port = BP_PORT(bp);
2552
2553         netif_tx_disable(bp->dev);
2554
2555         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2556
2557         netif_carrier_off(bp->dev);
2558 }
2559
2560 static void bnx2x_e1h_enable(struct bnx2x *bp)
2561 {
2562         int port = BP_PORT(bp);
2563
2564         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2565
2566         /* Tx queue should be only reenabled */
2567         netif_tx_wake_all_queues(bp->dev);
2568
2569         /*
2570          * Should not call netif_carrier_on since it will be called if the link
2571          * is up when checking for link state
2572          */
2573 }
2574
2575 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2576 {
2577         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2578
2579         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2580
2581                 /*
2582                  * This is the only place besides the function initialization
2583                  * where the bp->flags can change so it is done without any
2584                  * locks
2585                  */
2586                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2587                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2588                         bp->flags |= MF_FUNC_DIS;
2589
2590                         bnx2x_e1h_disable(bp);
2591                 } else {
2592                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2593                         bp->flags &= ~MF_FUNC_DIS;
2594
2595                         bnx2x_e1h_enable(bp);
2596                 }
2597                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2598         }
2599         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2600
2601                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2602                 bnx2x_link_sync_notify(bp);
2603                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2604                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2605         }
2606
2607         /* Report results to MCP */
2608         if (dcc_event)
2609                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2610         else
2611                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2612 }
2613
2614 /* must be called under the spq lock */
2615 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2616 {
2617         struct eth_spe *next_spe = bp->spq_prod_bd;
2618
2619         if (bp->spq_prod_bd == bp->spq_last_bd) {
2620                 bp->spq_prod_bd = bp->spq;
2621                 bp->spq_prod_idx = 0;
2622                 DP(NETIF_MSG_TIMER, "end of spq\n");
2623         } else {
2624                 bp->spq_prod_bd++;
2625                 bp->spq_prod_idx++;
2626         }
2627         return next_spe;
2628 }
2629
2630 /* must be called under the spq lock */
2631 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2632 {
2633         int func = BP_FUNC(bp);
2634
2635         /* Make sure that BD data is updated before writing the producer */
2636         wmb();
2637
2638         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2639                  bp->spq_prod_idx);
2640         mmiowb();
2641 }
2642
2643 /* the slow path queue is odd since completions arrive on the fastpath ring */
2644 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2645                   u32 data_hi, u32 data_lo, int common)
2646 {
2647         struct eth_spe *spe;
2648         u16 type;
2649
2650 #ifdef BNX2X_STOP_ON_ERROR
2651         if (unlikely(bp->panic))
2652                 return -EIO;
2653 #endif
2654
2655         spin_lock_bh(&bp->spq_lock);
2656
2657         if (!atomic_read(&bp->spq_left)) {
2658                 BNX2X_ERR("BUG! SPQ ring full!\n");
2659                 spin_unlock_bh(&bp->spq_lock);
2660                 bnx2x_panic();
2661                 return -EBUSY;
2662         }
2663
2664         spe = bnx2x_sp_get_next(bp);
2665
2666         /* CID needs port number to be encoded int it */
2667         spe->hdr.conn_and_cmd_data =
2668                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2669                                     HW_CID(bp, cid));
2670
2671         if (common)
2672                 /* Common ramrods:
2673                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2674                  *      TRAFFIC_STOP, TRAFFIC_START
2675                  */
2676                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2677                         & SPE_HDR_CONN_TYPE;
2678         else
2679                 /* ETH ramrods: SETUP, HALT */
2680                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2681                         & SPE_HDR_CONN_TYPE;
2682
2683         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2684                  SPE_HDR_FUNCTION_ID);
2685
2686         spe->hdr.type = cpu_to_le16(type);
2687
2688         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2689         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2690
2691         /* stats ramrod has it's own slot on the spq */
2692         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2693                 /* It's ok if the actual decrement is issued towards the memory
2694                  * somewhere between the spin_lock and spin_unlock. Thus no
2695                  * more explict memory barrier is needed.
2696                  */
2697                 atomic_dec(&bp->spq_left);
2698
2699         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2701            "type(0x%x) left %x\n",
2702            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2703            (u32)(U64_LO(bp->spq_mapping) +
2704            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2705            HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2706
2707         bnx2x_sp_prod_update(bp);
2708         spin_unlock_bh(&bp->spq_lock);
2709         return 0;
2710 }
2711
2712 /* acquire split MCP access lock register */
2713 static int bnx2x_acquire_alr(struct bnx2x *bp)
2714 {
2715         u32 j, val;
2716         int rc = 0;
2717
2718         might_sleep();
2719         for (j = 0; j < 1000; j++) {
2720                 val = (1UL << 31);
2721                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2722                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2723                 if (val & (1L << 31))
2724                         break;
2725
2726                 msleep(5);
2727         }
2728         if (!(val & (1L << 31))) {
2729                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2730                 rc = -EBUSY;
2731         }
2732
2733         return rc;
2734 }
2735
2736 /* release split MCP access lock register */
2737 static void bnx2x_release_alr(struct bnx2x *bp)
2738 {
2739         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2740 }
2741
2742 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2743 #define BNX2X_DEF_SB_IDX        0x0002
2744
2745 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2746 {
2747         struct host_sp_status_block *def_sb = bp->def_status_blk;
2748         u16 rc = 0;
2749
2750         barrier(); /* status block is written to by the chip */
2751         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2752                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2753                 rc |= BNX2X_DEF_SB_ATT_IDX;
2754         }
2755
2756         if (bp->def_idx != def_sb->sp_sb.running_index) {
2757                 bp->def_idx = def_sb->sp_sb.running_index;
2758                 rc |= BNX2X_DEF_SB_IDX;
2759         }
2760
2761         /* Do not reorder: indecies reading should complete before handling */
2762         barrier();
2763         return rc;
2764 }
2765
2766 /*
2767  * slow path service functions
2768  */
2769
2770 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2771 {
2772         int port = BP_PORT(bp);
2773         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2775         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2776                                        NIG_REG_MASK_INTERRUPT_PORT0;
2777         u32 aeu_mask;
2778         u32 nig_mask = 0;
2779         u32 reg_addr;
2780
2781         if (bp->attn_state & asserted)
2782                 BNX2X_ERR("IGU ERROR\n");
2783
2784         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785         aeu_mask = REG_RD(bp, aeu_addr);
2786
2787         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2788            aeu_mask, asserted);
2789         aeu_mask &= ~(asserted & 0x3ff);
2790         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2791
2792         REG_WR(bp, aeu_addr, aeu_mask);
2793         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2794
2795         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2796         bp->attn_state |= asserted;
2797         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2798
2799         if (asserted & ATTN_HARD_WIRED_MASK) {
2800                 if (asserted & ATTN_NIG_FOR_FUNC) {
2801
2802                         bnx2x_acquire_phy_lock(bp);
2803
2804                         /* save nig interrupt mask */
2805                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2806                         REG_WR(bp, nig_int_mask_addr, 0);
2807
2808                         bnx2x_link_attn(bp);
2809
2810                         /* handle unicore attn? */
2811                 }
2812                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2813                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2814
2815                 if (asserted & GPIO_2_FUNC)
2816                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2817
2818                 if (asserted & GPIO_3_FUNC)
2819                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2820
2821                 if (asserted & GPIO_4_FUNC)
2822                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2823
2824                 if (port == 0) {
2825                         if (asserted & ATTN_GENERAL_ATTN_1) {
2826                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2827                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2828                         }
2829                         if (asserted & ATTN_GENERAL_ATTN_2) {
2830                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2831                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2832                         }
2833                         if (asserted & ATTN_GENERAL_ATTN_3) {
2834                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2835                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2836                         }
2837                 } else {
2838                         if (asserted & ATTN_GENERAL_ATTN_4) {
2839                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2840                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2841                         }
2842                         if (asserted & ATTN_GENERAL_ATTN_5) {
2843                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2844                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2845                         }
2846                         if (asserted & ATTN_GENERAL_ATTN_6) {
2847                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2848                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2849                         }
2850                 }
2851
2852         } /* if hardwired */
2853
2854         if (bp->common.int_block == INT_BLOCK_HC)
2855                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2856                             COMMAND_REG_ATTN_BITS_SET);
2857         else
2858                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2859
2860         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2861            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2862         REG_WR(bp, reg_addr, asserted);
2863
2864         /* now set back the mask */
2865         if (asserted & ATTN_NIG_FOR_FUNC) {
2866                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2867                 bnx2x_release_phy_lock(bp);
2868         }
2869 }
2870
2871 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2872 {
2873         int port = BP_PORT(bp);
2874         u32 ext_phy_config;
2875         /* mark the failure */
2876         ext_phy_config =
2877                 SHMEM_RD(bp,
2878                          dev_info.port_hw_config[port].external_phy_config);
2879
2880         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2881         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2882         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2883                  ext_phy_config);
2884
2885         /* log the failure */
2886         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2887                " the driver to shutdown the card to prevent permanent"
2888                " damage.  Please contact OEM Support for assistance\n");
2889 }
2890
2891 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2892 {
2893         int port = BP_PORT(bp);
2894         int reg_offset;
2895         u32 val;
2896
2897         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2898                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2899
2900         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2901
2902                 val = REG_RD(bp, reg_offset);
2903                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2904                 REG_WR(bp, reg_offset, val);
2905
2906                 BNX2X_ERR("SPIO5 hw attention\n");
2907
2908                 /* Fan failure attention */
2909                 bnx2x_hw_reset_phy(&bp->link_params);
2910                 bnx2x_fan_failure(bp);
2911         }
2912
2913         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915                 bnx2x_acquire_phy_lock(bp);
2916                 bnx2x_handle_module_detect_int(&bp->link_params);
2917                 bnx2x_release_phy_lock(bp);
2918         }
2919
2920         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2921
2922                 val = REG_RD(bp, reg_offset);
2923                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924                 REG_WR(bp, reg_offset, val);
2925
2926                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2927                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2928                 bnx2x_panic();
2929         }
2930 }
2931
2932 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2933 {
2934         u32 val;
2935
2936         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2937
2938                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940                 /* DORQ discard attention */
2941                 if (val & 0x2)
2942                         BNX2X_ERR("FATAL error from DORQ\n");
2943         }
2944
2945         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2946
2947                 int port = BP_PORT(bp);
2948                 int reg_offset;
2949
2950                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2952
2953                 val = REG_RD(bp, reg_offset);
2954                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955                 REG_WR(bp, reg_offset, val);
2956
2957                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2958                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2959                 bnx2x_panic();
2960         }
2961 }
2962
2963 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2964 {
2965         u32 val;
2966
2967         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2968
2969                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971                 /* CFC error attention */
2972                 if (val & 0x2)
2973                         BNX2X_ERR("FATAL error from CFC\n");
2974         }
2975
2976         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2977
2978                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980                 /* RQ_USDMDP_FIFO_OVERFLOW */
2981                 if (val & 0x18000)
2982                         BNX2X_ERR("FATAL error from PXP\n");
2983                 if (CHIP_IS_E2(bp)) {
2984                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2985                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2986                 }
2987         }
2988
2989         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2990
2991                 int port = BP_PORT(bp);
2992                 int reg_offset;
2993
2994                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2995                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2996
2997                 val = REG_RD(bp, reg_offset);
2998                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2999                 REG_WR(bp, reg_offset, val);
3000
3001                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3002                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3003                 bnx2x_panic();
3004         }
3005 }
3006
3007 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3008 {
3009         u32 val;
3010
3011         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3012
3013                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3014                         int func = BP_FUNC(bp);
3015
3016                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3017                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3018                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3019                         val = SHMEM_RD(bp,
3020                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3021                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3022                                 bnx2x_dcc_event(bp,
3023                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3024                         bnx2x__link_status_update(bp);
3025                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3026                                 bnx2x_pmf_update(bp);
3027
3028                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3029
3030                         BNX2X_ERR("MC assert!\n");
3031                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3032                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3033                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3034                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3035                         bnx2x_panic();
3036
3037                 } else if (attn & BNX2X_MCP_ASSERT) {
3038
3039                         BNX2X_ERR("MCP assert!\n");
3040                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3041                         bnx2x_fw_dump(bp);
3042
3043                 } else
3044                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3045         }
3046
3047         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3048                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3049                 if (attn & BNX2X_GRC_TIMEOUT) {
3050                         val = CHIP_IS_E1(bp) ? 0 :
3051                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3052                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3053                 }
3054                 if (attn & BNX2X_GRC_RSV) {
3055                         val = CHIP_IS_E1(bp) ? 0 :
3056                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3057                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3058                 }
3059                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3060         }
3061 }
3062
3063 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3064 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3065 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3066 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3067 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3068 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3069
3070 /*
3071  * should be run under rtnl lock
3072  */
3073 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3074 {
3075         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3077         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078         barrier();
3079         mmiowb();
3080 }
3081
3082 /*
3083  * should be run under rtnl lock
3084  */
3085 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3086 {
3087         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088         val |= (1 << 16);
3089         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3090         barrier();
3091         mmiowb();
3092 }
3093
3094 /*
3095  * should be run under rtnl lock
3096  */
3097 bool bnx2x_reset_is_done(struct bnx2x *bp)
3098 {
3099         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3100         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3101         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3102 }
3103
3104 /*
3105  * should be run under rtnl lock
3106  */
3107 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3108 {
3109         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3110
3111         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3112
3113         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3114         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3115         barrier();
3116         mmiowb();
3117 }
3118
3119 /*
3120  * should be run under rtnl lock
3121  */
3122 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3123 {
3124         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3125
3126         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3127
3128         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3129         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3130         barrier();
3131         mmiowb();
3132
3133         return val1;
3134 }
3135
3136 /*
3137  * should be run under rtnl lock
3138  */
3139 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3140 {
3141         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3142 }
3143
3144 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3145 {
3146         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3147         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3148 }
3149
3150 static inline void _print_next_block(int idx, const char *blk)
3151 {
3152         if (idx)
3153                 pr_cont(", ");
3154         pr_cont("%s", blk);
3155 }
3156
3157 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3158 {
3159         int i = 0;
3160         u32 cur_bit = 0;
3161         for (i = 0; sig; i++) {
3162                 cur_bit = ((u32)0x1 << i);
3163                 if (sig & cur_bit) {
3164                         switch (cur_bit) {
3165                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3166                                 _print_next_block(par_num++, "BRB");
3167                                 break;
3168                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3169                                 _print_next_block(par_num++, "PARSER");
3170                                 break;
3171                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3172                                 _print_next_block(par_num++, "TSDM");
3173                                 break;
3174                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3175                                 _print_next_block(par_num++, "SEARCHER");
3176                                 break;
3177                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3178                                 _print_next_block(par_num++, "TSEMI");
3179                                 break;
3180                         }
3181
3182                         /* Clear the bit */
3183                         sig &= ~cur_bit;
3184                 }
3185         }
3186
3187         return par_num;
3188 }
3189
3190 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3191 {
3192         int i = 0;
3193         u32 cur_bit = 0;
3194         for (i = 0; sig; i++) {
3195                 cur_bit = ((u32)0x1 << i);
3196                 if (sig & cur_bit) {
3197                         switch (cur_bit) {
3198                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3199                                 _print_next_block(par_num++, "PBCLIENT");
3200                                 break;
3201                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3202                                 _print_next_block(par_num++, "QM");
3203                                 break;
3204                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3205                                 _print_next_block(par_num++, "XSDM");
3206                                 break;
3207                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3208                                 _print_next_block(par_num++, "XSEMI");
3209                                 break;
3210                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3211                                 _print_next_block(par_num++, "DOORBELLQ");
3212                                 break;
3213                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3214                                 _print_next_block(par_num++, "VAUX PCI CORE");
3215                                 break;
3216                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3217                                 _print_next_block(par_num++, "DEBUG");
3218                                 break;
3219                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3220                                 _print_next_block(par_num++, "USDM");
3221                                 break;
3222                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3223                                 _print_next_block(par_num++, "USEMI");
3224                                 break;
3225                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3226                                 _print_next_block(par_num++, "UPB");
3227                                 break;
3228                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3229                                 _print_next_block(par_num++, "CSDM");
3230                                 break;
3231                         }
3232
3233                         /* Clear the bit */
3234                         sig &= ~cur_bit;
3235                 }
3236         }
3237
3238         return par_num;
3239 }
3240
3241 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3242 {
3243         int i = 0;
3244         u32 cur_bit = 0;
3245         for (i = 0; sig; i++) {
3246                 cur_bit = ((u32)0x1 << i);
3247                 if (sig & cur_bit) {
3248                         switch (cur_bit) {
3249                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3250                                 _print_next_block(par_num++, "CSEMI");
3251                                 break;
3252                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3253                                 _print_next_block(par_num++, "PXP");
3254                                 break;
3255                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3256                                 _print_next_block(par_num++,
3257                                         "PXPPCICLOCKCLIENT");
3258                                 break;
3259                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3260                                 _print_next_block(par_num++, "CFC");
3261                                 break;
3262                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3263                                 _print_next_block(par_num++, "CDU");
3264                                 break;
3265                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "IGU");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "MISC");
3270                                 break;
3271                         }
3272
3273                         /* Clear the bit */
3274                         sig &= ~cur_bit;
3275                 }
3276         }
3277
3278         return par_num;
3279 }
3280
3281 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3282 {
3283         int i = 0;
3284         u32 cur_bit = 0;
3285         for (i = 0; sig; i++) {
3286                 cur_bit = ((u32)0x1 << i);
3287                 if (sig & cur_bit) {
3288                         switch (cur_bit) {
3289                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3290                                 _print_next_block(par_num++, "MCP ROM");
3291                                 break;
3292                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3293                                 _print_next_block(par_num++, "MCP UMP RX");
3294                                 break;
3295                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3296                                 _print_next_block(par_num++, "MCP UMP TX");
3297                                 break;
3298                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3299                                 _print_next_block(par_num++, "MCP SCPAD");
3300                                 break;
3301                         }
3302
3303                         /* Clear the bit */
3304                         sig &= ~cur_bit;
3305                 }
3306         }
3307
3308         return par_num;
3309 }
3310
3311 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3312                                      u32 sig2, u32 sig3)
3313 {
3314         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3315             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3316                 int par_num = 0;
3317                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3318                         "[0]:0x%08x [1]:0x%08x "
3319                         "[2]:0x%08x [3]:0x%08x\n",
3320                           sig0 & HW_PRTY_ASSERT_SET_0,
3321                           sig1 & HW_PRTY_ASSERT_SET_1,
3322                           sig2 & HW_PRTY_ASSERT_SET_2,
3323                           sig3 & HW_PRTY_ASSERT_SET_3);
3324                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3325                        bp->dev->name);
3326                 par_num = bnx2x_print_blocks_with_parity0(
3327                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3328                 par_num = bnx2x_print_blocks_with_parity1(
3329                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3330                 par_num = bnx2x_print_blocks_with_parity2(
3331                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3332                 par_num = bnx2x_print_blocks_with_parity3(
3333                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3334                 printk("\n");
3335                 return true;
3336         } else
3337                 return false;
3338 }
3339
3340 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3341 {
3342         struct attn_route attn;
3343         int port = BP_PORT(bp);
3344
3345         attn.sig[0] = REG_RD(bp,
3346                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3347                              port*4);
3348         attn.sig[1] = REG_RD(bp,
3349                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3350                              port*4);
3351         attn.sig[2] = REG_RD(bp,
3352                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3353                              port*4);
3354         attn.sig[3] = REG_RD(bp,
3355                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3356                              port*4);
3357
3358         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3359                                         attn.sig[3]);
3360 }
3361
3362
3363 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3364 {
3365         u32 val;
3366         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3367
3368                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3369                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3370                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3371                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3372                                   "ADDRESS_ERROR\n");
3373                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3374                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3375                                   "INCORRECT_RCV_BEHAVIOR\n");
3376                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3377                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378                                   "WAS_ERROR_ATTN\n");
3379                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3380                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381                                   "VF_LENGTH_VIOLATION_ATTN\n");
3382                 if (val &
3383                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3384                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3385                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3386                 if (val &
3387                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3388                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3389                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3390                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3391                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3392                                   "TCPL_ERROR_ATTN\n");
3393                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3394                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3395                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3396                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3397                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3398                                   "CSSNOOP_FIFO_OVERFLOW\n");
3399         }
3400         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3401                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3402                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3403                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3404                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3405                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3406                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3407                                   "_ATC_TCPL_TO_NOT_PEND\n");
3408                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3409                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3410                                   "ATC_GPA_MULTIPLE_HITS\n");
3411                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3412                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3413                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3414                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3415                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3416                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3417                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3418                                   "ATC_IREQ_LESS_THAN_STU\n");
3419         }
3420
3421         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3422                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3423                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3424                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3425                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3426         }
3427
3428 }
3429
3430 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3431 {
3432         struct attn_route attn, *group_mask;
3433         int port = BP_PORT(bp);
3434         int index;
3435         u32 reg_addr;
3436         u32 val;
3437         u32 aeu_mask;
3438
3439         /* need to take HW lock because MCP or other port might also
3440            try to handle this event */
3441         bnx2x_acquire_alr(bp);
3442
3443         if (bnx2x_chk_parity_attn(bp)) {
3444                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3445                 bnx2x_set_reset_in_progress(bp);
3446                 schedule_delayed_work(&bp->reset_task, 0);
3447                 /* Disable HW interrupts */
3448                 bnx2x_int_disable(bp);
3449                 bnx2x_release_alr(bp);
3450                 /* In case of parity errors don't handle attentions so that
3451                  * other function would "see" parity errors.
3452                  */
3453                 return;
3454         }
3455
3456         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3457         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3458         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3459         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3460         if (CHIP_IS_E2(bp))
3461                 attn.sig[4] =
3462                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3463         else
3464                 attn.sig[4] = 0;
3465
3466         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3467            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3468
3469         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3470                 if (deasserted & (1 << index)) {
3471                         group_mask = &bp->attn_group[index];
3472
3473                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3474                                          "%08x %08x %08x\n",
3475                            index,
3476                            group_mask->sig[0], group_mask->sig[1],
3477                            group_mask->sig[2], group_mask->sig[3],
3478                            group_mask->sig[4]);
3479
3480                         bnx2x_attn_int_deasserted4(bp,
3481                                         attn.sig[4] & group_mask->sig[4]);
3482                         bnx2x_attn_int_deasserted3(bp,
3483                                         attn.sig[3] & group_mask->sig[3]);
3484                         bnx2x_attn_int_deasserted1(bp,
3485                                         attn.sig[1] & group_mask->sig[1]);
3486                         bnx2x_attn_int_deasserted2(bp,
3487                                         attn.sig[2] & group_mask->sig[2]);
3488                         bnx2x_attn_int_deasserted0(bp,
3489                                         attn.sig[0] & group_mask->sig[0]);
3490                 }
3491         }
3492
3493         bnx2x_release_alr(bp);
3494
3495         if (bp->common.int_block == INT_BLOCK_HC)
3496                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3497                             COMMAND_REG_ATTN_BITS_CLR);
3498         else
3499                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3500
3501         val = ~deasserted;
3502         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3503            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3504         REG_WR(bp, reg_addr, val);
3505
3506         if (~bp->attn_state & deasserted)
3507                 BNX2X_ERR("IGU ERROR\n");
3508
3509         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3510                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3511
3512         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3513         aeu_mask = REG_RD(bp, reg_addr);
3514
3515         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3516            aeu_mask, deasserted);
3517         aeu_mask |= (deasserted & 0x3ff);
3518         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3519
3520         REG_WR(bp, reg_addr, aeu_mask);
3521         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3522
3523         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3524         bp->attn_state &= ~deasserted;
3525         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3526 }
3527
3528 static void bnx2x_attn_int(struct bnx2x *bp)
3529 {
3530         /* read local copy of bits */
3531         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532                                                                 attn_bits);
3533         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3534                                                                 attn_bits_ack);
3535         u32 attn_state = bp->attn_state;
3536
3537         /* look for changed bits */
3538         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3539         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3540
3541         DP(NETIF_MSG_HW,
3542            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3543            attn_bits, attn_ack, asserted, deasserted);
3544
3545         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3546                 BNX2X_ERR("BAD attention state\n");
3547
3548         /* handle bits that were raised */
3549         if (asserted)
3550                 bnx2x_attn_int_asserted(bp, asserted);
3551
3552         if (deasserted)
3553                 bnx2x_attn_int_deasserted(bp, deasserted);
3554 }
3555
3556 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3557 {
3558         /* No memory barriers */
3559         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3560         mmiowb(); /* keep prod updates ordered */
3561 }
3562
3563 #ifdef BCM_CNIC
3564 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3565                                       union event_ring_elem *elem)
3566 {
3567         if (!bp->cnic_eth_dev.starting_cid  ||
3568             cid < bp->cnic_eth_dev.starting_cid)
3569                 return 1;
3570
3571         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3572
3573         if (unlikely(elem->message.data.cfc_del_event.error)) {
3574                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3575                           cid);
3576                 bnx2x_panic_dump(bp);
3577         }
3578         bnx2x_cnic_cfc_comp(bp, cid);
3579         return 0;
3580 }
3581 #endif
3582
3583 static void bnx2x_eq_int(struct bnx2x *bp)
3584 {
3585         u16 hw_cons, sw_cons, sw_prod;
3586         union event_ring_elem *elem;
3587         u32 cid;
3588         u8 opcode;
3589         int spqe_cnt = 0;
3590
3591         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3592
3593         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3594          * when we get the the next-page we nned to adjust so the loop
3595          * condition below will be met. The next element is the size of a
3596          * regular element and hence incrementing by 1
3597          */
3598         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3599                 hw_cons++;
3600
3601         /* This function may never run in parralel with itself for a
3602          * specific bp, thus there is no need in "paired" read memory
3603          * barrier here.
3604          */
3605         sw_cons = bp->eq_cons;
3606         sw_prod = bp->eq_prod;
3607
3608         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
3609                         hw_cons, sw_cons, atomic_read(&bp->spq_left));
3610
3611         for (; sw_cons != hw_cons;
3612               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3613
3614
3615                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3616
3617                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3618                 opcode = elem->message.opcode;
3619
3620
3621                 /* handle eq element */
3622                 switch (opcode) {
3623                 case EVENT_RING_OPCODE_STAT_QUERY:
3624                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3625                         /* nothing to do with stats comp */
3626                         continue;
3627
3628                 case EVENT_RING_OPCODE_CFC_DEL:
3629                         /* handle according to cid range */
3630                         /*
3631                          * we may want to verify here that the bp state is
3632                          * HALTING
3633                          */
3634                         DP(NETIF_MSG_IFDOWN,
3635                            "got delete ramrod for MULTI[%d]\n", cid);
3636 #ifdef BCM_CNIC
3637                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3638                                 goto next_spqe;
3639 #endif
3640                         bnx2x_fp(bp, cid, state) =
3641                                                 BNX2X_FP_STATE_CLOSED;
3642
3643                         goto next_spqe;
3644                 }
3645
3646                 switch (opcode | bp->state) {
3647                 case (EVENT_RING_OPCODE_FUNCTION_START |
3648                       BNX2X_STATE_OPENING_WAIT4_PORT):
3649                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3650                         bp->state = BNX2X_STATE_FUNC_STARTED;
3651                         break;
3652
3653                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3654                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3655                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3656                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3657                         break;
3658
3659                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3660                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3661                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3662                         bp->set_mac_pending = 0;
3663                         break;
3664
3665                 case (EVENT_RING_OPCODE_SET_MAC |
3666                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3667                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3668                         bp->set_mac_pending = 0;
3669                         break;
3670                 default:
3671                         /* unknown event log error and continue */
3672                         BNX2X_ERR("Unknown EQ event %d\n",
3673                                   elem->message.opcode);
3674                 }
3675 next_spqe:
3676                 spqe_cnt++;
3677         } /* for */
3678
3679         smp_mb__before_atomic_inc();
3680         atomic_add(spqe_cnt, &bp->spq_left);
3681
3682         bp->eq_cons = sw_cons;
3683         bp->eq_prod = sw_prod;
3684         /* Make sure that above mem writes were issued towards the memory */
3685         smp_wmb();
3686
3687         /* update producer */
3688         bnx2x_update_eq_prod(bp, bp->eq_prod);
3689 }
3690
3691 static void bnx2x_sp_task(struct work_struct *work)
3692 {
3693         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3694         u16 status;
3695
3696         /* Return here if interrupt is disabled */
3697         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3698                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3699                 return;
3700         }
3701
3702         status = bnx2x_update_dsb_idx(bp);
3703 /*      if (status == 0)                                     */
3704 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3705
3706         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3707
3708         /* HW attentions */
3709         if (status & BNX2X_DEF_SB_ATT_IDX) {
3710                 bnx2x_attn_int(bp);
3711                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3712         }
3713
3714         /* SP events: STAT_QUERY and others */
3715         if (status & BNX2X_DEF_SB_IDX) {
3716
3717                 /* Handle EQ completions */
3718                 bnx2x_eq_int(bp);
3719
3720                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3721                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3722
3723                 status &= ~BNX2X_DEF_SB_IDX;
3724         }
3725
3726         if (unlikely(status))
3727                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3728                    status);
3729
3730         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3731              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3732 }
3733
3734 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3735 {
3736         struct net_device *dev = dev_instance;
3737         struct bnx2x *bp = netdev_priv(dev);
3738
3739         /* Return here if interrupt is disabled */
3740         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3741                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3742                 return IRQ_HANDLED;
3743         }
3744
3745         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3746                      IGU_INT_DISABLE, 0);
3747
3748 #ifdef BNX2X_STOP_ON_ERROR
3749         if (unlikely(bp->panic))
3750                 return IRQ_HANDLED;
3751 #endif
3752
3753 #ifdef BCM_CNIC
3754         {
3755                 struct cnic_ops *c_ops;
3756
3757                 rcu_read_lock();
3758                 c_ops = rcu_dereference(bp->cnic_ops);
3759                 if (c_ops)
3760                         c_ops->cnic_handler(bp->cnic_data, NULL);
3761                 rcu_read_unlock();
3762         }
3763 #endif
3764         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3765
3766         return IRQ_HANDLED;
3767 }
3768
3769 /* end of slow path */
3770
3771 static void bnx2x_timer(unsigned long data)
3772 {
3773         struct bnx2x *bp = (struct bnx2x *) data;
3774
3775         if (!netif_running(bp->dev))
3776                 return;
3777
3778         if (atomic_read(&bp->intr_sem) != 0)
3779                 goto timer_restart;
3780
3781         if (poll) {
3782                 struct bnx2x_fastpath *fp = &bp->fp[0];
3783                 int rc;
3784
3785                 bnx2x_tx_int(fp);
3786                 rc = bnx2x_rx_int(fp, 1000);
3787         }
3788
3789         if (!BP_NOMCP(bp)) {
3790                 int mb_idx = BP_FW_MB_IDX(bp);
3791                 u32 drv_pulse;
3792                 u32 mcp_pulse;
3793
3794                 ++bp->fw_drv_pulse_wr_seq;
3795                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3796                 /* TBD - add SYSTEM_TIME */
3797                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3798                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3799
3800                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3801                              MCP_PULSE_SEQ_MASK);
3802                 /* The delta between driver pulse and mcp response
3803                  * should be 1 (before mcp response) or 0 (after mcp response)
3804                  */
3805                 if ((drv_pulse != mcp_pulse) &&
3806                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3807                         /* someone lost a heartbeat... */
3808                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3809                                   drv_pulse, mcp_pulse);
3810                 }
3811         }
3812
3813         if (bp->state == BNX2X_STATE_OPEN)
3814                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3815
3816 timer_restart:
3817         mod_timer(&bp->timer, jiffies + bp->current_interval);
3818 }
3819
3820 /* end of Statistics */
3821
3822 /* nic init */
3823
3824 /*
3825  * nic init service functions
3826  */
3827
3828 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3829 {
3830         u32 i;
3831         if (!(len%4) && !(addr%4))
3832                 for (i = 0; i < len; i += 4)
3833                         REG_WR(bp, addr + i, fill);
3834         else
3835                 for (i = 0; i < len; i++)
3836                         REG_WR8(bp, addr + i, fill);
3837
3838 }
3839
3840 /* helper: writes FP SP data to FW - data_size in dwords */
3841 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3842                                        int fw_sb_id,
3843                                        u32 *sb_data_p,
3844                                        u32 data_size)
3845 {
3846         int index;
3847         for (index = 0; index < data_size; index++)
3848                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3849                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3850                         sizeof(u32)*index,
3851                         *(sb_data_p + index));
3852 }
3853
3854 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3855 {
3856         u32 *sb_data_p;
3857         u32 data_size = 0;
3858         struct hc_status_block_data_e2 sb_data_e2;
3859         struct hc_status_block_data_e1x sb_data_e1x;
3860
3861         /* disable the function first */
3862         if (CHIP_IS_E2(bp)) {
3863                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3864                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3865                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3866                 sb_data_e2.common.p_func.vf_valid = false;
3867                 sb_data_p = (u32 *)&sb_data_e2;
3868                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3869         } else {
3870                 memset(&sb_data_e1x, 0,
3871                        sizeof(struct hc_status_block_data_e1x));
3872                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3873                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3874                 sb_data_e1x.common.p_func.vf_valid = false;
3875                 sb_data_p = (u32 *)&sb_data_e1x;
3876                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3877         }
3878         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3879
3880         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3881                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3882                         CSTORM_STATUS_BLOCK_SIZE);
3883         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3884                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3885                         CSTORM_SYNC_BLOCK_SIZE);
3886 }
3887
3888 /* helper:  writes SP SB data to FW */
3889 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3890                 struct hc_sp_status_block_data *sp_sb_data)
3891 {
3892         int func = BP_FUNC(bp);
3893         int i;
3894         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3895                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3896                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3897                         i*sizeof(u32),
3898                         *((u32 *)sp_sb_data + i));
3899 }
3900
3901 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3902 {
3903         int func = BP_FUNC(bp);
3904         struct hc_sp_status_block_data sp_sb_data;
3905         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3906
3907         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3908         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3909         sp_sb_data.p_func.vf_valid = false;
3910
3911         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3912
3913         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3914                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3915                         CSTORM_SP_STATUS_BLOCK_SIZE);
3916         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3917                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3918                         CSTORM_SP_SYNC_BLOCK_SIZE);
3919
3920 }
3921
3922
3923 static inline
3924 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3925                                            int igu_sb_id, int igu_seg_id)
3926 {
3927         hc_sm->igu_sb_id = igu_sb_id;
3928         hc_sm->igu_seg_id = igu_seg_id;
3929         hc_sm->timer_value = 0xFF;
3930         hc_sm->time_to_expire = 0xFFFFFFFF;
3931 }
3932
3933 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3934                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
3935 {
3936         int igu_seg_id;
3937
3938         struct hc_status_block_data_e2 sb_data_e2;
3939         struct hc_status_block_data_e1x sb_data_e1x;
3940         struct hc_status_block_sm  *hc_sm_p;
3941         struct hc_index_data *hc_index_p;
3942         int data_size;
3943         u32 *sb_data_p;
3944
3945         if (CHIP_INT_MODE_IS_BC(bp))
3946                 igu_seg_id = HC_SEG_ACCESS_NORM;
3947         else
3948                 igu_seg_id = IGU_SEG_ACCESS_NORM;
3949
3950         bnx2x_zero_fp_sb(bp, fw_sb_id);
3951
3952         if (CHIP_IS_E2(bp)) {
3953                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3954                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3955                 sb_data_e2.common.p_func.vf_id = vfid;
3956                 sb_data_e2.common.p_func.vf_valid = vf_valid;
3957                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3958                 sb_data_e2.common.same_igu_sb_1b = true;
3959                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3960                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3961                 hc_sm_p = sb_data_e2.common.state_machine;
3962                 hc_index_p = sb_data_e2.index_data;
3963                 sb_data_p = (u32 *)&sb_data_e2;
3964                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3965         } else {
3966                 memset(&sb_data_e1x, 0,
3967                        sizeof(struct hc_status_block_data_e1x));
3968                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3969                 sb_data_e1x.common.p_func.vf_id = 0xff;
3970                 sb_data_e1x.common.p_func.vf_valid = false;
3971                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3972                 sb_data_e1x.common.same_igu_sb_1b = true;
3973                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3974                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3975                 hc_sm_p = sb_data_e1x.common.state_machine;
3976                 hc_index_p = sb_data_e1x.index_data;
3977                 sb_data_p = (u32 *)&sb_data_e1x;
3978                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3979         }
3980
3981         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3982                                        igu_sb_id, igu_seg_id);
3983         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3984                                        igu_sb_id, igu_seg_id);
3985
3986         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3987
3988         /* write indecies to HW */
3989         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3990 }
3991
3992 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3993                                         u8 sb_index, u8 disable, u16 usec)
3994 {
3995         int port = BP_PORT(bp);
3996         u8 ticks = usec / BNX2X_BTR;
3997
3998         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3999
4000         disable = disable ? 1 : (usec ? 0 : 1);
4001         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4002 }
4003
4004 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4005                                      u16 tx_usec, u16 rx_usec)
4006 {
4007         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4008                                     false, rx_usec);
4009         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4010                                     false, tx_usec);
4011 }
4012
4013 static void bnx2x_init_def_sb(struct bnx2x *bp)
4014 {
4015         struct host_sp_status_block *def_sb = bp->def_status_blk;
4016         dma_addr_t mapping = bp->def_status_blk_mapping;
4017         int igu_sp_sb_index;
4018         int igu_seg_id;
4019         int port = BP_PORT(bp);
4020         int func = BP_FUNC(bp);
4021         int reg_offset;
4022         u64 section;
4023         int index;
4024         struct hc_sp_status_block_data sp_sb_data;
4025         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4026
4027         if (CHIP_INT_MODE_IS_BC(bp)) {
4028                 igu_sp_sb_index = DEF_SB_IGU_ID;
4029                 igu_seg_id = HC_SEG_ACCESS_DEF;
4030         } else {
4031                 igu_sp_sb_index = bp->igu_dsb_id;
4032                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4033         }
4034
4035         /* ATTN */
4036         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4037                                             atten_status_block);
4038         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4039
4040         bp->attn_state = 0;
4041
4042         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4043                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4044         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4045                 int sindex;
4046                 /* take care of sig[0]..sig[4] */
4047                 for (sindex = 0; sindex < 4; sindex++)
4048                         bp->attn_group[index].sig[sindex] =
4049                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4050
4051                 if (CHIP_IS_E2(bp))
4052                         /*
4053                          * enable5 is separate from the rest of the registers,
4054                          * and therefore the address skip is 4
4055                          * and not 16 between the different groups
4056                          */
4057                         bp->attn_group[index].sig[4] = REG_RD(bp,
4058                                         reg_offset + 0x10 + 0x4*index);
4059                 else
4060                         bp->attn_group[index].sig[4] = 0;
4061         }
4062
4063         if (bp->common.int_block == INT_BLOCK_HC) {
4064                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4065                                      HC_REG_ATTN_MSG0_ADDR_L);
4066
4067                 REG_WR(bp, reg_offset, U64_LO(section));
4068                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4069         } else if (CHIP_IS_E2(bp)) {
4070                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4071                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4072         }
4073
4074         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4075                                             sp_sb);
4076
4077         bnx2x_zero_sp_sb(bp);
4078
4079         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4080         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4081         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4082         sp_sb_data.igu_seg_id           = igu_seg_id;
4083         sp_sb_data.p_func.pf_id         = func;
4084         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4085         sp_sb_data.p_func.vf_id         = 0xff;
4086
4087         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4088
4089         bp->stats_pending = 0;
4090         bp->set_mac_pending = 0;
4091
4092         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4093 }
4094
4095 void bnx2x_update_coalesce(struct bnx2x *bp)
4096 {
4097         int i;
4098
4099         for_each_queue(bp, i)
4100                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4101                                          bp->rx_ticks, bp->tx_ticks);
4102 }
4103
4104 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4105 {
4106         spin_lock_init(&bp->spq_lock);
4107         atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4108
4109         bp->spq_prod_idx = 0;
4110         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4111         bp->spq_prod_bd = bp->spq;
4112         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4113 }
4114
4115 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4116 {
4117         int i;
4118         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4119                 union event_ring_elem *elem =
4120                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4121
4122                 elem->next_page.addr.hi =
4123                         cpu_to_le32(U64_HI(bp->eq_mapping +
4124                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4125                 elem->next_page.addr.lo =
4126                         cpu_to_le32(U64_LO(bp->eq_mapping +
4127                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4128         }
4129         bp->eq_cons = 0;
4130         bp->eq_prod = NUM_EQ_DESC;
4131         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4132 }
4133
4134 static void bnx2x_init_ind_table(struct bnx2x *bp)
4135 {
4136         int func = BP_FUNC(bp);
4137         int i;
4138
4139         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4140                 return;
4141
4142         DP(NETIF_MSG_IFUP,
4143            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4144         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4145                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4146                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4147                         bp->fp->cl_id + (i % bp->num_queues));
4148 }
4149
4150 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4151 {
4152         int mode = bp->rx_mode;
4153         u16 cl_id;
4154
4155         /* All but management unicast packets should pass to the host as well */
4156         u32 llh_mask =
4157                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4158                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4159                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4160                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4161
4162         switch (mode) {
4163         case BNX2X_RX_MODE_NONE: /* no Rx */
4164                 cl_id = BP_L_ID(bp);
4165                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4166                 break;
4167
4168         case BNX2X_RX_MODE_NORMAL:
4169                 cl_id = BP_L_ID(bp);
4170                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4171                         BNX2X_ACCEPT_UNICAST |
4172                         BNX2X_ACCEPT_BROADCAST |
4173                         BNX2X_ACCEPT_MULTICAST);
4174                 break;
4175
4176         case BNX2X_RX_MODE_ALLMULTI:
4177                 cl_id = BP_L_ID(bp);
4178                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4179                         BNX2X_ACCEPT_UNICAST |
4180                         BNX2X_ACCEPT_BROADCAST |
4181                         BNX2X_ACCEPT_ALL_MULTICAST);
4182                 break;
4183
4184         case BNX2X_RX_MODE_PROMISC:
4185                 cl_id = BP_L_ID(bp);
4186                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4187
4188                 /* pass management unicast packets as well */
4189                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4190                 break;
4191
4192         default:
4193                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4194                 break;
4195         }
4196
4197         REG_WR(bp,
4198                BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4199                              NIG_REG_LLH0_BRB1_DRV_MASK,
4200                llh_mask);
4201
4202         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4203                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4204                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4205                 bp->mac_filters.ucast_drop_all,
4206                 bp->mac_filters.mcast_drop_all,
4207                 bp->mac_filters.bcast_drop_all,
4208                 bp->mac_filters.ucast_accept_all,
4209                 bp->mac_filters.mcast_accept_all,
4210                 bp->mac_filters.bcast_accept_all
4211         );
4212
4213         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4214 }
4215
4216 static void bnx2x_init_internal_common(struct bnx2x *bp)
4217 {
4218         int i;
4219
4220         if (!CHIP_IS_E1(bp)) {
4221
4222                 /* xstorm needs to know whether to add  ovlan to packets or not,
4223                  * in switch-independent we'll write 0 to here... */
4224                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4225                         bp->mf_mode);
4226                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4227                         bp->mf_mode);
4228                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4229                         bp->mf_mode);
4230                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4231                         bp->mf_mode);
4232         }
4233
4234         /* Zero this manually as its initialization is
4235            currently missing in the initTool */
4236         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4237                 REG_WR(bp, BAR_USTRORM_INTMEM +
4238                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4239         if (CHIP_IS_E2(bp)) {
4240                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4241                         CHIP_INT_MODE_IS_BC(bp) ?
4242                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4243         }
4244 }
4245
4246 static void bnx2x_init_internal_port(struct bnx2x *bp)
4247 {
4248         /* port */
4249 }
4250
4251 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4252 {
4253         switch (load_code) {
4254         case FW_MSG_CODE_DRV_LOAD_COMMON:
4255         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4256                 bnx2x_init_internal_common(bp);
4257                 /* no break */
4258
4259         case FW_MSG_CODE_DRV_LOAD_PORT:
4260                 bnx2x_init_internal_port(bp);
4261                 /* no break */
4262
4263         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4264                 /* internal memory per function is
4265                    initialized inside bnx2x_pf_init */
4266                 break;
4267
4268         default:
4269                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4270                 break;
4271         }
4272 }
4273
4274 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4275 {
4276         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4277
4278         fp->state = BNX2X_FP_STATE_CLOSED;
4279
4280         fp->index = fp->cid = fp_idx;
4281         fp->cl_id = BP_L_ID(bp) + fp_idx;
4282         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4283         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4284         /* qZone id equals to FW (per path) client id */
4285         fp->cl_qzone_id  = fp->cl_id +
4286                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4287                                 ETH_MAX_RX_CLIENTS_E1H);
4288         /* init shortcut */
4289         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4290                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4291                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4292         /* Setup SB indicies */
4293         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4294         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4295
4296         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4297                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4298                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4299                    fp->igu_sb_id);
4300         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4301                       fp->fw_sb_id, fp->igu_sb_id);
4302
4303         bnx2x_update_fpsb_idx(fp);
4304 }
4305
4306 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4307 {
4308         int i;
4309
4310         for_each_queue(bp, i)
4311                 bnx2x_init_fp_sb(bp, i);
4312 #ifdef BCM_CNIC
4313
4314         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4315                       BNX2X_VF_ID_INVALID, false,
4316                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4317
4318 #endif
4319
4320         /* ensure status block indices were read */
4321         rmb();
4322
4323         bnx2x_init_def_sb(bp);
4324         bnx2x_update_dsb_idx(bp);
4325         bnx2x_init_rx_rings(bp);
4326         bnx2x_init_tx_rings(bp);
4327         bnx2x_init_sp_ring(bp);
4328         bnx2x_init_eq_ring(bp);
4329         bnx2x_init_internal(bp, load_code);
4330         bnx2x_pf_init(bp);
4331         bnx2x_init_ind_table(bp);
4332         bnx2x_stats_init(bp);
4333
4334         /* At this point, we are ready for interrupts */
4335         atomic_set(&bp->intr_sem, 0);
4336
4337         /* flush all before enabling interrupts */
4338         mb();
4339         mmiowb();
4340
4341         bnx2x_int_enable(bp);
4342
4343         /* Check for SPIO5 */
4344         bnx2x_attn_int_deasserted0(bp,
4345                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4346                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4347 }
4348
4349 /* end of nic init */
4350
4351 /*
4352  * gzip service functions
4353  */
4354
4355 static int bnx2x_gunzip_init(struct bnx2x *bp)
4356 {
4357         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4358                                             &bp->gunzip_mapping, GFP_KERNEL);
4359         if (bp->gunzip_buf  == NULL)
4360                 goto gunzip_nomem1;
4361
4362         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4363         if (bp->strm  == NULL)
4364                 goto gunzip_nomem2;
4365
4366         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4367                                       GFP_KERNEL);
4368         if (bp->strm->workspace == NULL)
4369                 goto gunzip_nomem3;
4370
4371         return 0;
4372
4373 gunzip_nomem3:
4374         kfree(bp->strm);
4375         bp->strm = NULL;
4376
4377 gunzip_nomem2:
4378         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4379                           bp->gunzip_mapping);
4380         bp->gunzip_buf = NULL;
4381
4382 gunzip_nomem1:
4383         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4384                " un-compression\n");
4385         return -ENOMEM;
4386 }
4387
4388 static void bnx2x_gunzip_end(struct bnx2x *bp)
4389 {
4390         kfree(bp->strm->workspace);
4391         kfree(bp->strm);
4392         bp->strm = NULL;
4393
4394         if (bp->gunzip_buf) {
4395                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4396                                   bp->gunzip_mapping);
4397                 bp->gunzip_buf = NULL;
4398         }
4399 }
4400
4401 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4402 {
4403         int n, rc;
4404
4405         /* check gzip header */
4406         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4407                 BNX2X_ERR("Bad gzip header\n");
4408                 return -EINVAL;
4409         }
4410
4411         n = 10;
4412
4413 #define FNAME                           0x8
4414
4415         if (zbuf[3] & FNAME)
4416                 while ((zbuf[n++] != 0) && (n < len));
4417
4418         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4419         bp->strm->avail_in = len - n;
4420         bp->strm->next_out = bp->gunzip_buf;
4421         bp->strm->avail_out = FW_BUF_SIZE;
4422
4423         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4424         if (rc != Z_OK)
4425                 return rc;
4426
4427         rc = zlib_inflate(bp->strm, Z_FINISH);
4428         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4429                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4430                            bp->strm->msg);
4431
4432         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4433         if (bp->gunzip_outlen & 0x3)
4434                 netdev_err(bp->dev, "Firmware decompression error:"
4435                                     " gunzip_outlen (%d) not aligned\n",
4436                                 bp->gunzip_outlen);
4437         bp->gunzip_outlen >>= 2;
4438
4439         zlib_inflateEnd(bp->strm);
4440
4441         if (rc == Z_STREAM_END)
4442                 return 0;
4443
4444         return rc;
4445 }
4446
4447 /* nic load/unload */
4448
4449 /*
4450  * General service functions
4451  */
4452
4453 /* send a NIG loopback debug packet */
4454 static void bnx2x_lb_pckt(struct bnx2x *bp)
4455 {
4456         u32 wb_write[3];
4457
4458         /* Ethernet source and destination addresses */
4459         wb_write[0] = 0x55555555;
4460         wb_write[1] = 0x55555555;
4461         wb_write[2] = 0x20;             /* SOP */
4462         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4463
4464         /* NON-IP protocol */
4465         wb_write[0] = 0x09000000;
4466         wb_write[1] = 0x55555555;
4467         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4468         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4469 }
4470
4471 /* some of the internal memories
4472  * are not directly readable from the driver
4473  * to test them we send debug packets
4474  */
4475 static int bnx2x_int_mem_test(struct bnx2x *bp)
4476 {
4477         int factor;
4478         int count, i;
4479         u32 val = 0;
4480
4481         if (CHIP_REV_IS_FPGA(bp))
4482                 factor = 120;
4483         else if (CHIP_REV_IS_EMUL(bp))
4484                 factor = 200;
4485         else
4486                 factor = 1;
4487
4488         /* Disable inputs of parser neighbor blocks */
4489         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4490         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4491         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4492         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4493
4494         /*  Write 0 to parser credits for CFC search request */
4495         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4496
4497         /* send Ethernet packet */
4498         bnx2x_lb_pckt(bp);
4499
4500         /* TODO do i reset NIG statistic? */
4501         /* Wait until NIG register shows 1 packet of size 0x10 */
4502         count = 1000 * factor;
4503         while (count) {
4504
4505                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4506                 val = *bnx2x_sp(bp, wb_data[0]);
4507                 if (val == 0x10)
4508                         break;
4509
4510                 msleep(10);
4511                 count--;
4512         }
4513         if (val != 0x10) {
4514                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4515                 return -1;
4516         }
4517
4518         /* Wait until PRS register shows 1 packet */
4519         count = 1000 * factor;
4520         while (count) {
4521                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4522                 if (val == 1)
4523                         break;
4524
4525                 msleep(10);
4526                 count--;
4527         }
4528         if (val != 0x1) {
4529                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4530                 return -2;
4531         }
4532
4533         /* Reset and init BRB, PRS */
4534         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4535         msleep(50);
4536         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4537         msleep(50);
4538         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4539         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4540
4541         DP(NETIF_MSG_HW, "part2\n");
4542
4543         /* Disable inputs of parser neighbor blocks */
4544         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4545         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4546         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4547         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4548
4549         /* Write 0 to parser credits for CFC search request */
4550         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4551
4552         /* send 10 Ethernet packets */
4553         for (i = 0; i < 10; i++)
4554                 bnx2x_lb_pckt(bp);
4555
4556         /* Wait until NIG register shows 10 + 1
4557            packets of size 11*0x10 = 0xb0 */
4558         count = 1000 * factor;
4559         while (count) {
4560
4561                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4562                 val = *bnx2x_sp(bp, wb_data[0]);
4563                 if (val == 0xb0)
4564                         break;
4565
4566                 msleep(10);
4567                 count--;
4568         }
4569         if (val != 0xb0) {
4570                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4571                 return -3;
4572         }
4573
4574         /* Wait until PRS register shows 2 packets */
4575         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4576         if (val != 2)
4577                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4578
4579         /* Write 1 to parser credits for CFC search request */
4580         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4581
4582         /* Wait until PRS register shows 3 packets */
4583         msleep(10 * factor);
4584         /* Wait until NIG register shows 1 packet of size 0x10 */
4585         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4586         if (val != 3)
4587                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4588
4589         /* clear NIG EOP FIFO */
4590         for (i = 0; i < 11; i++)
4591                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4592         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4593         if (val != 1) {
4594                 BNX2X_ERR("clear of NIG failed\n");
4595                 return -4;
4596         }
4597
4598         /* Reset and init BRB, PRS, NIG */
4599         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4600         msleep(50);
4601         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4602         msleep(50);
4603         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4604         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4605 #ifndef BCM_CNIC
4606         /* set NIC mode */
4607         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4608 #endif
4609
4610         /* Enable inputs of parser neighbor blocks */
4611         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4612         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4613         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4614         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4615
4616         DP(NETIF_MSG_HW, "done\n");
4617
4618         return 0; /* OK */
4619 }
4620
4621 static void enable_blocks_attention(struct bnx2x *bp)
4622 {
4623         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4624         if (CHIP_IS_E2(bp))
4625                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4626         else
4627                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4628         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4629         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4630         /*
4631          * mask read length error interrupts in brb for parser
4632          * (parsing unit and 'checksum and crc' unit)
4633          * these errors are legal (PU reads fixed length and CAC can cause
4634          * read length error on truncated packets)
4635          */
4636         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4637         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4638         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4639         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4640         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4641         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4642 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4643 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4644         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4645         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4646         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4647 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4648 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4649         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4650         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4651         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4652         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4653 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4654 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4655
4656         if (CHIP_REV_IS_FPGA(bp))
4657                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4658         else if (CHIP_IS_E2(bp))
4659                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4660                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4661                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4662                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4663                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4664                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4665         else
4666                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4667         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4668         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4669         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4670 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4671 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4672         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4673         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4674 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4675         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
4676 }
4677
4678 static const struct {
4679         u32 addr;
4680         u32 mask;
4681 } bnx2x_parity_mask[] = {
4682         {PXP_REG_PXP_PRTY_MASK,         0x3ffffff},
4683         {PXP2_REG_PXP2_PRTY_MASK_0,     0xffffffff},
4684         {PXP2_REG_PXP2_PRTY_MASK_1,     0x7f},
4685         {HC_REG_HC_PRTY_MASK,           0x7},
4686         {MISC_REG_MISC_PRTY_MASK,       0x1},
4687         {QM_REG_QM_PRTY_MASK,           0x0},
4688         {DORQ_REG_DORQ_PRTY_MASK,       0x0},
4689         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4690         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4691         {SRC_REG_SRC_PRTY_MASK,         0x4}, /* bit 2 */
4692         {CDU_REG_CDU_PRTY_MASK,         0x0},
4693         {CFC_REG_CFC_PRTY_MASK,         0x0},
4694         {DBG_REG_DBG_PRTY_MASK,         0x0},
4695         {DMAE_REG_DMAE_PRTY_MASK,       0x0},
4696         {BRB1_REG_BRB1_PRTY_MASK,       0x0},
4697         {PRS_REG_PRS_PRTY_MASK,         (1<<6)},/* bit 6 */
4698         {TSDM_REG_TSDM_PRTY_MASK,       0x18},  /* bit 3,4 */
4699         {CSDM_REG_CSDM_PRTY_MASK,       0x8},   /* bit 3 */
4700         {USDM_REG_USDM_PRTY_MASK,       0x38},  /* bit 3,4,5 */
4701         {XSDM_REG_XSDM_PRTY_MASK,       0x8},   /* bit 3 */
4702         {TSEM_REG_TSEM_PRTY_MASK_0,     0x0},
4703         {TSEM_REG_TSEM_PRTY_MASK_1,     0x0},
4704         {USEM_REG_USEM_PRTY_MASK_0,     0x0},
4705         {USEM_REG_USEM_PRTY_MASK_1,     0x0},
4706         {CSEM_REG_CSEM_PRTY_MASK_0,     0x0},
4707         {CSEM_REG_CSEM_PRTY_MASK_1,     0x0},
4708         {XSEM_REG_XSEM_PRTY_MASK_0,     0x0},
4709         {XSEM_REG_XSEM_PRTY_MASK_1,     0x0}
4710 };
4711
4712 static void enable_blocks_parity(struct bnx2x *bp)
4713 {
4714         int i;
4715
4716         for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4717                 REG_WR(bp, bnx2x_parity_mask[i].addr,
4718                         bnx2x_parity_mask[i].mask);
4719 }
4720
4721
4722 static void bnx2x_reset_common(struct bnx2x *bp)
4723 {
4724         /* reset_common */
4725         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4726                0xd3ffff7f);
4727         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4728 }
4729
4730 static void bnx2x_init_pxp(struct bnx2x *bp)
4731 {
4732         u16 devctl;
4733         int r_order, w_order;
4734
4735         pci_read_config_word(bp->pdev,
4736                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4737         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4738         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4739         if (bp->mrrs == -1)
4740                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4741         else {
4742                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4743                 r_order = bp->mrrs;
4744         }
4745
4746         bnx2x_init_pxp_arb(bp, r_order, w_order);
4747 }
4748
4749 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4750 {
4751         int is_required;
4752         u32 val;
4753         int port;
4754
4755         if (BP_NOMCP(bp))
4756                 return;
4757
4758         is_required = 0;
4759         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4760               SHARED_HW_CFG_FAN_FAILURE_MASK;
4761
4762         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4763                 is_required = 1;
4764
4765         /*
4766          * The fan failure mechanism is usually related to the PHY type since
4767          * the power consumption of the board is affected by the PHY. Currently,
4768          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4769          */
4770         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4771                 for (port = PORT_0; port < PORT_MAX; port++) {
4772                         is_required |=
4773                                 bnx2x_fan_failure_det_req(
4774                                         bp,
4775                                         bp->common.shmem_base,
4776                                         bp->common.shmem2_base,
4777                                         port);
4778                 }
4779
4780         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4781
4782         if (is_required == 0)
4783                 return;
4784
4785         /* Fan failure is indicated by SPIO 5 */
4786         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4787                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4788
4789         /* set to active low mode */
4790         val = REG_RD(bp, MISC_REG_SPIO_INT);
4791         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4792                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4793         REG_WR(bp, MISC_REG_SPIO_INT, val);
4794
4795         /* enable interrupt to signal the IGU */
4796         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4797         val |= (1 << MISC_REGISTERS_SPIO_5);
4798         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4799 }
4800
4801 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4802 {
4803         u32 offset = 0;
4804
4805         if (CHIP_IS_E1(bp))
4806                 return;
4807         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4808                 return;
4809
4810         switch (BP_ABS_FUNC(bp)) {
4811         case 0:
4812                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4813                 break;
4814         case 1:
4815                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4816                 break;
4817         case 2:
4818                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4819                 break;
4820         case 3:
4821                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4822                 break;
4823         case 4:
4824                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4825                 break;
4826         case 5:
4827                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4828                 break;
4829         case 6:
4830                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4831                 break;
4832         case 7:
4833                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4834                 break;
4835         default:
4836                 return;
4837         }
4838
4839         REG_WR(bp, offset, pretend_func_num);
4840         REG_RD(bp, offset);
4841         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4842 }
4843
4844 static void bnx2x_pf_disable(struct bnx2x *bp)
4845 {
4846         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4847         val &= ~IGU_PF_CONF_FUNC_EN;
4848
4849         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4850         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4851         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4852 }
4853
4854 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4855 {
4856         u32 val, i;
4857
4858         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4859
4860         bnx2x_reset_common(bp);
4861         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4862         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4863
4864         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4865         if (!CHIP_IS_E1(bp))
4866                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4867
4868         if (CHIP_IS_E2(bp)) {
4869                 u8 fid;
4870
4871                 /**
4872                  * 4-port mode or 2-port mode we need to turn of master-enable
4873                  * for everyone, after that, turn it back on for self.
4874                  * so, we disregard multi-function or not, and always disable
4875                  * for all functions on the given path, this means 0,2,4,6 for
4876                  * path 0 and 1,3,5,7 for path 1
4877                  */
4878                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
4879                         if (fid == BP_ABS_FUNC(bp)) {
4880                                 REG_WR(bp,
4881                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4882                                     1);
4883                                 continue;
4884                         }
4885
4886                         bnx2x_pretend_func(bp, fid);
4887                         /* clear pf enable */
4888                         bnx2x_pf_disable(bp);
4889                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4890                 }
4891         }
4892
4893         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4894         if (CHIP_IS_E1(bp)) {
4895                 /* enable HW interrupt from PXP on USDM overflow
4896                    bit 16 on INT_MASK_0 */
4897                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4898         }
4899
4900         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4901         bnx2x_init_pxp(bp);
4902
4903 #ifdef __BIG_ENDIAN
4904         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4905         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4906         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4907         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4908         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4909         /* make sure this value is 0 */
4910         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4911
4912 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4913         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4914         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4915         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4916         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4917 #endif
4918
4919         bnx2x_ilt_init_page_size(bp, INITOP_SET);
4920
4921         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4922                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4923
4924         /* let the HW do it's magic ... */
4925         msleep(100);
4926         /* finish PXP init */
4927         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4928         if (val != 1) {
4929                 BNX2X_ERR("PXP2 CFG failed\n");
4930                 return -EBUSY;
4931         }
4932         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4933         if (val != 1) {
4934                 BNX2X_ERR("PXP2 RD_INIT failed\n");
4935                 return -EBUSY;
4936         }
4937
4938         /* Timers bug workaround E2 only. We need to set the entire ILT to
4939          * have entries with value "0" and valid bit on.
4940          * This needs to be done by the first PF that is loaded in a path
4941          * (i.e. common phase)
4942          */
4943         if (CHIP_IS_E2(bp)) {
4944                 struct ilt_client_info ilt_cli;
4945                 struct bnx2x_ilt ilt;
4946                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4947                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4948
4949                 /* initalize dummy TM client */
4950                 ilt_cli.start = 0;
4951                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4952                 ilt_cli.client_num = ILT_CLIENT_TM;
4953
4954                 /* Step 1: set zeroes to all ilt page entries with valid bit on
4955                  * Step 2: set the timers first/last ilt entry to point
4956                  * to the entire range to prevent ILT range error for 3rd/4th
4957                  * vnic (this code assumes existance of the vnic)
4958                  *
4959                  * both steps performed by call to bnx2x_ilt_client_init_op()
4960                  * with dummy TM client
4961                  *
4962                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4963                  * and his brother are split registers
4964                  */
4965                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4966                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4967                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4968
4969                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4970                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4971                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4972         }
4973
4974
4975         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4976         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4977
4978         if (CHIP_IS_E2(bp)) {
4979                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4980                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4981                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4982
4983                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4984
4985                 /* let the HW do it's magic ... */
4986                 do {
4987                         msleep(200);
4988                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4989                 } while (factor-- && (val != 1));
4990
4991                 if (val != 1) {
4992                         BNX2X_ERR("ATC_INIT failed\n");
4993                         return -EBUSY;
4994                 }
4995         }
4996
4997         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4998
4999         /* clean the DMAE memory */
5000         bp->dmae_ready = 1;
5001         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5002
5003         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5004         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5005         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5006         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5007
5008         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5009         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5010         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5011         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5012
5013         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5014
5015         if (CHIP_MODE_IS_4_PORT(bp))
5016                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5017
5018         /* QM queues pointers table */
5019         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5020
5021         /* soft reset pulse */
5022         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5023         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5024
5025 #ifdef BCM_CNIC
5026         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5027 #endif
5028
5029         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5030         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5031
5032         if (!CHIP_REV_IS_SLOW(bp)) {
5033                 /* enable hw interrupt from doorbell Q */
5034                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5035         }
5036
5037         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5038         if (CHIP_MODE_IS_4_PORT(bp)) {
5039                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5040                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5041         }
5042
5043         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5044         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5045 #ifndef BCM_CNIC
5046         /* set NIC mode */
5047         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5048 #endif
5049         if (!CHIP_IS_E1(bp))
5050                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5051
5052         if (CHIP_IS_E2(bp)) {
5053                 /* Bit-map indicating which L2 hdrs may appear after the
5054                    basic Ethernet header */
5055                 int has_ovlan = IS_MF(bp);
5056                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5057                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5058         }
5059
5060         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5061         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5062         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5063         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5064
5065         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5069
5070         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5071         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5072         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5073         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5074
5075         if (CHIP_MODE_IS_4_PORT(bp))
5076                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5077
5078         /* sync semi rtc */
5079         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5080                0x80000000);
5081         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5082                0x80000000);
5083
5084         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5085         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5086         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5087
5088         if (CHIP_IS_E2(bp)) {
5089                 int has_ovlan = IS_MF(bp);
5090                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5091                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5092         }
5093
5094         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5095         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5096                 REG_WR(bp, i, random32());
5097
5098         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5099 #ifdef BCM_CNIC
5100         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5101         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5102         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5103         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5104         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5105         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5106         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5107         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5108         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5109         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5110 #endif
5111         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5112
5113         if (sizeof(union cdu_context) != 1024)
5114                 /* we currently assume that a context is 1024 bytes */
5115                 dev_alert(&bp->pdev->dev, "please adjust the size "
5116                                           "of cdu_context(%ld)\n",
5117                          (long)sizeof(union cdu_context));
5118
5119         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5120         val = (4 << 24) + (0 << 12) + 1024;
5121         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5122
5123         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5124         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5125         /* enable context validation interrupt from CFC */
5126         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5127
5128         /* set the thresholds to prevent CFC/CDU race */
5129         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5130
5131         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5132
5133         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5134                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5135
5136         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5137         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5138
5139         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5140         /* Reset PCIE errors for debug */
5141         REG_WR(bp, 0x2814, 0xffffffff);
5142         REG_WR(bp, 0x3820, 0xffffffff);
5143
5144         if (CHIP_IS_E2(bp)) {
5145                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5146                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5147                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5148                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5149                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5150                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5151                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5152                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5153                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5154                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5155                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5156         }
5157
5158         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5159         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5160         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5161         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5162
5163         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5164         if (!CHIP_IS_E1(bp)) {
5165                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5166                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5167         }
5168         if (CHIP_IS_E2(bp)) {
5169                 /* Bit-map indicating which L2 hdrs may appear after the
5170                    basic Ethernet header */
5171                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5172         }
5173
5174         if (CHIP_REV_IS_SLOW(bp))
5175                 msleep(200);
5176
5177         /* finish CFC init */
5178         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5179         if (val != 1) {
5180                 BNX2X_ERR("CFC LL_INIT failed\n");
5181                 return -EBUSY;
5182         }
5183         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5184         if (val != 1) {
5185                 BNX2X_ERR("CFC AC_INIT failed\n");
5186                 return -EBUSY;
5187         }
5188         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5189         if (val != 1) {
5190                 BNX2X_ERR("CFC CAM_INIT failed\n");
5191                 return -EBUSY;
5192         }
5193         REG_WR(bp, CFC_REG_DEBUG0, 0);
5194
5195         if (CHIP_IS_E1(bp)) {
5196                 /* read NIG statistic
5197                    to see if this is our first up since powerup */
5198                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5199                 val = *bnx2x_sp(bp, wb_data[0]);
5200
5201                 /* do internal memory self test */
5202                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5203                         BNX2X_ERR("internal mem self test failed\n");
5204                         return -EBUSY;
5205                 }
5206         }
5207
5208         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5209                                                        bp->common.shmem_base,
5210                                                        bp->common.shmem2_base);
5211
5212         bnx2x_setup_fan_failure_detection(bp);
5213
5214         /* clear PXP2 attentions */
5215         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5216
5217         enable_blocks_attention(bp);
5218         if (CHIP_PARITY_SUPPORTED(bp))
5219                 enable_blocks_parity(bp);
5220
5221         if (!BP_NOMCP(bp)) {
5222                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5223                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5224                     CHIP_IS_E1x(bp)) {
5225                         u32 shmem_base[2], shmem2_base[2];
5226                         shmem_base[0] =  bp->common.shmem_base;
5227                         shmem2_base[0] = bp->common.shmem2_base;
5228                         if (CHIP_IS_E2(bp)) {
5229                                 shmem_base[1] =
5230                                         SHMEM2_RD(bp, other_shmem_base_addr);
5231                                 shmem2_base[1] =
5232                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5233                         }
5234                         bnx2x_acquire_phy_lock(bp);
5235                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5236                                               bp->common.chip_id);
5237                         bnx2x_release_phy_lock(bp);
5238                 }
5239         } else
5240                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5241
5242         return 0;
5243 }
5244
5245 static int bnx2x_init_hw_port(struct bnx2x *bp)
5246 {
5247         int port = BP_PORT(bp);
5248         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5249         u32 low, high;
5250         u32 val;
5251
5252         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5253
5254         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5255
5256         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5257         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5258
5259         /* Timers bug workaround: disables the pf_master bit in pglue at
5260          * common phase, we need to enable it here before any dmae access are
5261          * attempted. Therefore we manually added the enable-master to the
5262          * port phase (it also happens in the function phase)
5263          */
5264         if (CHIP_IS_E2(bp))
5265                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5266
5267         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5268         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5269         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5270         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5271
5272         /* QM cid (connection) count */
5273         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5274
5275 #ifdef BCM_CNIC
5276         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5277         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5278         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5279 #endif
5280
5281         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5282
5283         if (CHIP_MODE_IS_4_PORT(bp))
5284                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5285
5286         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5287                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5288                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5289                         /* no pause for emulation and FPGA */
5290                         low = 0;
5291                         high = 513;
5292                 } else {
5293                         if (IS_MF(bp))
5294                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5295                         else if (bp->dev->mtu > 4096) {
5296                                 if (bp->flags & ONE_PORT_FLAG)
5297                                         low = 160;
5298                                 else {
5299                                         val = bp->dev->mtu;
5300                                         /* (24*1024 + val*4)/256 */
5301                                         low = 96 + (val/64) +
5302                                                         ((val % 64) ? 1 : 0);
5303                                 }
5304                         } else
5305                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5306                         high = low + 56;        /* 14*1024/256 */
5307                 }
5308                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5309                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5310         }
5311
5312         if (CHIP_MODE_IS_4_PORT(bp)) {
5313                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5314                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5315                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5316                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5317         }
5318
5319         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5320
5321         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5322         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5323         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5324         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5325
5326         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5327         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5328         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5329         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5330         if (CHIP_MODE_IS_4_PORT(bp))
5331                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5332
5333         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5334         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5335
5336         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5337
5338         if (!CHIP_IS_E2(bp)) {
5339                 /* configure PBF to work without PAUSE mtu 9000 */
5340                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5341
5342                 /* update threshold */
5343                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5344                 /* update init credit */
5345                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5346
5347                 /* probe changes */
5348                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5349                 udelay(50);
5350                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5351         }
5352
5353 #ifdef BCM_CNIC
5354         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5355 #endif
5356         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5357         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5358
5359         if (CHIP_IS_E1(bp)) {
5360                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5361                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5362         }
5363         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5364
5365         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5366
5367         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5368         /* init aeu_mask_attn_func_0/1:
5369          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5370          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5371          *             bits 4-7 are used for "per vn group attention" */
5372         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5373                (IS_MF(bp) ? 0xF7 : 0x7));
5374
5375         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5376         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5377         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5378         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5379         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5380
5381         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5382
5383         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5384
5385         if (!CHIP_IS_E1(bp)) {
5386                 /* 0x2 disable mf_ov, 0x1 enable */
5387                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5388                        (IS_MF(bp) ? 0x1 : 0x2));
5389
5390                 if (CHIP_IS_E2(bp)) {
5391                         val = 0;
5392                         switch (bp->mf_mode) {
5393                         case MULTI_FUNCTION_SD:
5394                                 val = 1;
5395                                 break;
5396                         case MULTI_FUNCTION_SI:
5397                                 val = 2;
5398                                 break;
5399                         }
5400
5401                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5402                                                   NIG_REG_LLH0_CLS_TYPE), val);
5403                 }
5404                 {
5405                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5406                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5407                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5408                 }
5409         }
5410
5411         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5412         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5413         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5414                                                        bp->common.shmem_base,
5415                                                        bp->common.shmem2_base);
5416         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5417                                       bp->common.shmem2_base, port)) {
5418                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5419                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5420                 val = REG_RD(bp, reg_addr);
5421                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5422                 REG_WR(bp, reg_addr, val);
5423         }
5424         bnx2x__link_reset(bp);
5425
5426         return 0;
5427 }
5428
5429 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5430 {
5431         int reg;
5432
5433         if (CHIP_IS_E1(bp))
5434                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5435         else
5436                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5437
5438         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5439 }
5440
5441 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5442 {
5443         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5444 }
5445
5446 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5447 {
5448         u32 i, base = FUNC_ILT_BASE(func);
5449         for (i = base; i < base + ILT_PER_FUNC; i++)
5450                 bnx2x_ilt_wr(bp, i, 0);
5451 }
5452
5453 static int bnx2x_init_hw_func(struct bnx2x *bp)
5454 {
5455         int port = BP_PORT(bp);
5456         int func = BP_FUNC(bp);
5457         struct bnx2x_ilt *ilt = BP_ILT(bp);
5458         u16 cdu_ilt_start;
5459         u32 addr, val;
5460         int i;
5461
5462         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5463
5464         /* set MSI reconfigure capability */
5465         if (bp->common.int_block == INT_BLOCK_HC) {
5466                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5467                 val = REG_RD(bp, addr);
5468                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5469                 REG_WR(bp, addr, val);
5470         }
5471
5472         ilt = BP_ILT(bp);
5473         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5474
5475         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5476                 ilt->lines[cdu_ilt_start + i].page =
5477                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5478                 ilt->lines[cdu_ilt_start + i].page_mapping =
5479                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5480                 /* cdu ilt pages are allocated manually so there's no need to
5481                 set the size */
5482         }
5483         bnx2x_ilt_init_op(bp, INITOP_SET);
5484
5485 #ifdef BCM_CNIC
5486         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5487
5488         /* T1 hash bits value determines the T1 number of entries */
5489         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5490 #endif
5491
5492 #ifndef BCM_CNIC
5493         /* set NIC mode */
5494         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5495 #endif  /* BCM_CNIC */
5496
5497         if (CHIP_IS_E2(bp)) {
5498                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5499
5500                 /* Turn on a single ISR mode in IGU if driver is going to use
5501                  * INT#x or MSI
5502                  */
5503                 if (!(bp->flags & USING_MSIX_FLAG))
5504                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5505                 /*
5506                  * Timers workaround bug: function init part.
5507                  * Need to wait 20msec after initializing ILT,
5508                  * needed to make sure there are no requests in
5509                  * one of the PXP internal queues with "old" ILT addresses
5510                  */
5511                 msleep(20);
5512                 /*
5513                  * Master enable - Due to WB DMAE writes performed before this
5514                  * register is re-initialized as part of the regular function
5515                  * init
5516                  */
5517                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5518                 /* Enable the function in IGU */
5519                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5520         }
5521
5522         bp->dmae_ready = 1;
5523
5524         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5525
5526         if (CHIP_IS_E2(bp))
5527                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5528
5529         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5530         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5531         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5532         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5533         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5534         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5535         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5536         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5537         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5538
5539         if (CHIP_IS_E2(bp)) {
5540                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5541                                                                 BP_PATH(bp));
5542                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5543                                                                 BP_PATH(bp));
5544         }
5545
5546         if (CHIP_MODE_IS_4_PORT(bp))
5547                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5548
5549         if (CHIP_IS_E2(bp))
5550                 REG_WR(bp, QM_REG_PF_EN, 1);
5551
5552         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5553
5554         if (CHIP_MODE_IS_4_PORT(bp))
5555                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5556
5557         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5558         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5559         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5560         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5561         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5562         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5563         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5564         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5565         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5566         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5567         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5568         if (CHIP_IS_E2(bp))
5569                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5570
5571         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5572
5573         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5574
5575         if (CHIP_IS_E2(bp))
5576                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5577
5578         if (IS_MF(bp)) {
5579                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5580                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5581         }
5582
5583         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5584
5585         /* HC init per function */
5586         if (bp->common.int_block == INT_BLOCK_HC) {
5587                 if (CHIP_IS_E1H(bp)) {
5588                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5589
5590                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5591                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5592                 }
5593                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5594
5595         } else {
5596                 int num_segs, sb_idx, prod_offset;
5597
5598                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5599
5600                 if (CHIP_IS_E2(bp)) {
5601                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5602                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5603                 }
5604
5605                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5606
5607                 if (CHIP_IS_E2(bp)) {
5608                         int dsb_idx = 0;
5609                         /**
5610                          * Producer memory:
5611                          * E2 mode: address 0-135 match to the mapping memory;
5612                          * 136 - PF0 default prod; 137 - PF1 default prod;
5613                          * 138 - PF2 default prod; 139 - PF3 default prod;
5614                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5615                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5616                          * 144-147 reserved.
5617                          *
5618                          * E1.5 mode - In backward compatible mode;
5619                          * for non default SB; each even line in the memory
5620                          * holds the U producer and each odd line hold
5621                          * the C producer. The first 128 producers are for
5622                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5623                          * producers are for the DSB for each PF.
5624                          * Each PF has five segments: (the order inside each
5625                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5626                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5627                          * 144-147 attn prods;
5628                          */
5629                         /* non-default-status-blocks */
5630                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5631                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5632                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5633                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5634                                         num_segs;
5635
5636                                 for (i = 0; i < num_segs; i++) {
5637                                         addr = IGU_REG_PROD_CONS_MEMORY +
5638                                                         (prod_offset + i) * 4;
5639                                         REG_WR(bp, addr, 0);
5640                                 }
5641                                 /* send consumer update with value 0 */
5642                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5643                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5644                                 bnx2x_igu_clear_sb(bp,
5645                                                    bp->igu_base_sb + sb_idx);
5646                         }
5647
5648                         /* default-status-blocks */
5649                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5650                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5651
5652                         if (CHIP_MODE_IS_4_PORT(bp))
5653                                 dsb_idx = BP_FUNC(bp);
5654                         else
5655                                 dsb_idx = BP_E1HVN(bp);
5656
5657                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5658                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5659                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5660
5661                         for (i = 0; i < (num_segs * E1HVN_MAX);
5662                              i += E1HVN_MAX) {
5663                                 addr = IGU_REG_PROD_CONS_MEMORY +
5664                                                         (prod_offset + i)*4;
5665                                 REG_WR(bp, addr, 0);
5666                         }
5667                         /* send consumer update with 0 */
5668                         if (CHIP_INT_MODE_IS_BC(bp)) {
5669                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5670                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5671                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5673                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5674                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5675                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5676                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5677                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5678                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5679                         } else {
5680                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5681                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5682                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5683                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5684                         }
5685                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5686
5687                         /* !!! these should become driver const once
5688                            rf-tool supports split-68 const */
5689                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5690                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5691                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5692                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5693                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5694                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5695                 }
5696         }
5697
5698         /* Reset PCIE errors for debug */
5699         REG_WR(bp, 0x2114, 0xffffffff);
5700         REG_WR(bp, 0x2120, 0xffffffff);
5701
5702         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5703         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5704         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5705         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5706         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5707         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5708
5709         bnx2x_phy_probe(&bp->link_params);
5710
5711         return 0;
5712 }
5713
5714 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5715 {
5716         int rc = 0;
5717
5718         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5719            BP_ABS_FUNC(bp), load_code);
5720
5721         bp->dmae_ready = 0;
5722         mutex_init(&bp->dmae_mutex);
5723         rc = bnx2x_gunzip_init(bp);
5724         if (rc)
5725                 return rc;
5726
5727         switch (load_code) {
5728         case FW_MSG_CODE_DRV_LOAD_COMMON:
5729         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5730                 rc = bnx2x_init_hw_common(bp, load_code);
5731                 if (rc)
5732                         goto init_hw_err;
5733                 /* no break */
5734
5735         case FW_MSG_CODE_DRV_LOAD_PORT:
5736                 rc = bnx2x_init_hw_port(bp);
5737                 if (rc)
5738                         goto init_hw_err;
5739                 /* no break */
5740
5741         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5742                 rc = bnx2x_init_hw_func(bp);
5743                 if (rc)
5744                         goto init_hw_err;
5745                 break;
5746
5747         default:
5748                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5749                 break;
5750         }
5751
5752         if (!BP_NOMCP(bp)) {
5753                 int mb_idx = BP_FW_MB_IDX(bp);
5754
5755                 bp->fw_drv_pulse_wr_seq =
5756                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5757                                  DRV_PULSE_SEQ_MASK);
5758                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5759         }
5760
5761 init_hw_err:
5762         bnx2x_gunzip_end(bp);
5763
5764         return rc;
5765 }
5766
5767 void bnx2x_free_mem(struct bnx2x *bp)
5768 {
5769
5770 #define BNX2X_PCI_FREE(x, y, size) \
5771         do { \
5772                 if (x) { \
5773                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5774                         x = NULL; \
5775                         y = 0; \
5776                 } \
5777         } while (0)
5778
5779 #define BNX2X_FREE(x) \
5780         do { \
5781                 if (x) { \
5782                         kfree((void *)x); \
5783                         x = NULL; \
5784                 } \
5785         } while (0)
5786
5787         int i;
5788
5789         /* fastpath */
5790         /* Common */
5791         for_each_queue(bp, i) {
5792                 /* status blocks */
5793                 if (CHIP_IS_E2(bp))
5794                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5795                                        bnx2x_fp(bp, i, status_blk_mapping),
5796                                        sizeof(struct host_hc_status_block_e2));
5797                 else
5798                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5799                                        bnx2x_fp(bp, i, status_blk_mapping),
5800                                        sizeof(struct host_hc_status_block_e1x));
5801         }
5802         /* Rx */
5803         for_each_queue(bp, i) {
5804
5805                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5806                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5807                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5808                                bnx2x_fp(bp, i, rx_desc_mapping),
5809                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5810
5811                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5812                                bnx2x_fp(bp, i, rx_comp_mapping),
5813                                sizeof(struct eth_fast_path_rx_cqe) *
5814                                NUM_RCQ_BD);
5815
5816                 /* SGE ring */
5817                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5818                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5819                                bnx2x_fp(bp, i, rx_sge_mapping),
5820                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5821         }
5822         /* Tx */
5823         for_each_queue(bp, i) {
5824
5825                 /* fastpath tx rings: tx_buf tx_desc */
5826                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5827                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5828                                bnx2x_fp(bp, i, tx_desc_mapping),
5829                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5830         }
5831         /* end of fastpath */
5832
5833         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834                        sizeof(struct host_sp_status_block));
5835
5836         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837                        sizeof(struct bnx2x_slowpath));
5838
5839         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5840                        bp->context.size);
5841
5842         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5843
5844         BNX2X_FREE(bp->ilt->lines);
5845
5846 #ifdef BCM_CNIC
5847         if (CHIP_IS_E2(bp))
5848                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5849                                sizeof(struct host_hc_status_block_e2));
5850         else
5851                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5852                                sizeof(struct host_hc_status_block_e1x));
5853
5854         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5855 #endif
5856
5857         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5858
5859         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5860                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
5861
5862 #undef BNX2X_PCI_FREE
5863 #undef BNX2X_KFREE
5864 }
5865
5866 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5867 {
5868         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5869         if (CHIP_IS_E2(bp)) {
5870                 bnx2x_fp(bp, index, sb_index_values) =
5871                         (__le16 *)status_blk.e2_sb->sb.index_values;
5872                 bnx2x_fp(bp, index, sb_running_index) =
5873                         (__le16 *)status_blk.e2_sb->sb.running_index;
5874         } else {
5875                 bnx2x_fp(bp, index, sb_index_values) =
5876                         (__le16 *)status_blk.e1x_sb->sb.index_values;
5877                 bnx2x_fp(bp, index, sb_running_index) =
5878                         (__le16 *)status_blk.e1x_sb->sb.running_index;
5879         }
5880 }
5881
5882 int bnx2x_alloc_mem(struct bnx2x *bp)
5883 {
5884 #define BNX2X_PCI_ALLOC(x, y, size) \
5885         do { \
5886                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5887                 if (x == NULL) \
5888                         goto alloc_mem_err; \
5889                 memset(x, 0, size); \
5890         } while (0)
5891
5892 #define BNX2X_ALLOC(x, size) \
5893         do { \
5894                 x = kzalloc(size, GFP_KERNEL); \
5895                 if (x == NULL) \
5896                         goto alloc_mem_err; \
5897         } while (0)
5898
5899         int i;
5900
5901         /* fastpath */
5902         /* Common */
5903         for_each_queue(bp, i) {
5904                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5905                 bnx2x_fp(bp, i, bp) = bp;
5906                 /* status blocks */
5907                 if (CHIP_IS_E2(bp))
5908                         BNX2X_PCI_ALLOC(sb->e2_sb,
5909                                 &bnx2x_fp(bp, i, status_blk_mapping),
5910                                 sizeof(struct host_hc_status_block_e2));
5911                 else
5912                         BNX2X_PCI_ALLOC(sb->e1x_sb,
5913                                 &bnx2x_fp(bp, i, status_blk_mapping),
5914                                 sizeof(struct host_hc_status_block_e1x));
5915
5916                 set_sb_shortcuts(bp, i);
5917         }
5918         /* Rx */
5919         for_each_queue(bp, i) {
5920
5921                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5922                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5923                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5924                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5925                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5926                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5927
5928                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5929                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5930                                 sizeof(struct eth_fast_path_rx_cqe) *
5931                                 NUM_RCQ_BD);
5932
5933                 /* SGE ring */
5934                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5935                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5936                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5937                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5938                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5939         }
5940         /* Tx */
5941         for_each_queue(bp, i) {
5942
5943                 /* fastpath tx rings: tx_buf tx_desc */
5944                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5945                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5946                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5947                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5948                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5949         }
5950         /* end of fastpath */
5951
5952 #ifdef BCM_CNIC
5953         if (CHIP_IS_E2(bp))
5954                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5955                                 sizeof(struct host_hc_status_block_e2));
5956         else
5957                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5958                                 sizeof(struct host_hc_status_block_e1x));
5959
5960         /* allocate searcher T2 table */
5961         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5962 #endif
5963
5964
5965         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5966                         sizeof(struct host_sp_status_block));
5967
5968         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5969                         sizeof(struct bnx2x_slowpath));
5970
5971         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5972
5973         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5974                         bp->context.size);
5975
5976         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5977
5978         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5979                 goto alloc_mem_err;
5980
5981         /* Slow path ring */
5982         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5983
5984         /* EQ */
5985         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5986                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
5987         return 0;
5988
5989 alloc_mem_err:
5990         bnx2x_free_mem(bp);
5991         return -ENOMEM;
5992
5993 #undef BNX2X_PCI_ALLOC
5994 #undef BNX2X_ALLOC
5995 }
5996
5997 /*
5998  * Init service functions
5999  */
6000 int bnx2x_func_start(struct bnx2x *bp)
6001 {
6002         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6003
6004         /* Wait for completion */
6005         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6006                                  WAIT_RAMROD_COMMON);
6007 }
6008
6009 int bnx2x_func_stop(struct bnx2x *bp)
6010 {
6011         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6012
6013         /* Wait for completion */
6014         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6015                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6016 }
6017
6018 /**
6019  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6020  *
6021  * @param bp driver descriptor
6022  * @param set set or clear an entry (1 or 0)
6023  * @param mac pointer to a buffer containing a MAC
6024  * @param cl_bit_vec bit vector of clients to register a MAC for
6025  * @param cam_offset offset in a CAM to use
6026  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6027  */
6028 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6029                                    u32 cl_bit_vec, u8 cam_offset,
6030                                    u8 is_bcast)
6031 {
6032         struct mac_configuration_cmd *config =
6033                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6034         int ramrod_flags = WAIT_RAMROD_COMMON;
6035
6036         bp->set_mac_pending = 1;
6037         smp_wmb();
6038
6039         config->hdr.length = 1;
6040         config->hdr.offset = cam_offset;
6041         config->hdr.client_id = 0xff;
6042         config->hdr.reserved1 = 0;
6043
6044         /* primary MAC */
6045         config->config_table[0].msb_mac_addr =
6046                                         swab16(*(u16 *)&mac[0]);
6047         config->config_table[0].middle_mac_addr =
6048                                         swab16(*(u16 *)&mac[2]);
6049         config->config_table[0].lsb_mac_addr =
6050                                         swab16(*(u16 *)&mac[4]);
6051         config->config_table[0].clients_bit_vector =
6052                                         cpu_to_le32(cl_bit_vec);
6053         config->config_table[0].vlan_id = 0;
6054         config->config_table[0].pf_id = BP_FUNC(bp);
6055         if (set)
6056                 SET_FLAG(config->config_table[0].flags,
6057                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6058                         T_ETH_MAC_COMMAND_SET);
6059         else
6060                 SET_FLAG(config->config_table[0].flags,
6061                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6062                         T_ETH_MAC_COMMAND_INVALIDATE);
6063
6064         if (is_bcast)
6065                 SET_FLAG(config->config_table[0].flags,
6066                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6067
6068         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6069            (set ? "setting" : "clearing"),
6070            config->config_table[0].msb_mac_addr,
6071            config->config_table[0].middle_mac_addr,
6072            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6073
6074         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6075                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6076                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6077
6078         /* Wait for a completion */
6079         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6080 }
6081
6082 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6083                       int *state_p, int flags)
6084 {
6085         /* can take a while if any port is running */
6086         int cnt = 5000;
6087         u8 poll = flags & WAIT_RAMROD_POLL;
6088         u8 common = flags & WAIT_RAMROD_COMMON;
6089
6090         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6091            poll ? "polling" : "waiting", state, idx);
6092
6093         might_sleep();
6094         while (cnt--) {
6095                 if (poll) {
6096                         if (common)
6097                                 bnx2x_eq_int(bp);
6098                         else {
6099                                 bnx2x_rx_int(bp->fp, 10);
6100                                 /* if index is different from 0
6101                                  * the reply for some commands will
6102                                  * be on the non default queue
6103                                  */
6104                                 if (idx)
6105                                         bnx2x_rx_int(&bp->fp[idx], 10);
6106                         }
6107                 }
6108
6109                 mb(); /* state is changed by bnx2x_sp_event() */
6110                 if (*state_p == state) {
6111 #ifdef BNX2X_STOP_ON_ERROR
6112                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6113 #endif
6114                         return 0;
6115                 }
6116
6117                 msleep(1);
6118
6119                 if (bp->panic)
6120                         return -EIO;
6121         }
6122
6123         /* timeout! */
6124         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6125                   poll ? "polling" : "waiting", state, idx);
6126 #ifdef BNX2X_STOP_ON_ERROR
6127         bnx2x_panic();
6128 #endif
6129
6130         return -EBUSY;
6131 }
6132
6133 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6134 {
6135         if (CHIP_IS_E1H(bp))
6136                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6137         else if (CHIP_MODE_IS_4_PORT(bp))
6138                 return BP_FUNC(bp) * 32  + rel_offset;
6139         else
6140                 return BP_VN(bp) * 32  + rel_offset;
6141 }
6142
6143 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6144 {
6145         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6146                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6147
6148         /* networking  MAC */
6149         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6150                                (1 << bp->fp->cl_id), cam_offset , 0);
6151
6152         if (CHIP_IS_E1(bp)) {
6153                 /* broadcast MAC */
6154                 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6155                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6156         }
6157 }
6158 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6159 {
6160         int i = 0, old;
6161         struct net_device *dev = bp->dev;
6162         struct netdev_hw_addr *ha;
6163         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6164         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6165
6166         netdev_for_each_mc_addr(ha, dev) {
6167                 /* copy mac */
6168                 config_cmd->config_table[i].msb_mac_addr =
6169                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6170                 config_cmd->config_table[i].middle_mac_addr =
6171                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6172                 config_cmd->config_table[i].lsb_mac_addr =
6173                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6174
6175                 config_cmd->config_table[i].vlan_id = 0;
6176                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6177                 config_cmd->config_table[i].clients_bit_vector =
6178                         cpu_to_le32(1 << BP_L_ID(bp));
6179
6180                 SET_FLAG(config_cmd->config_table[i].flags,
6181                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6182                         T_ETH_MAC_COMMAND_SET);
6183
6184                 DP(NETIF_MSG_IFUP,
6185                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6186                    config_cmd->config_table[i].msb_mac_addr,
6187                    config_cmd->config_table[i].middle_mac_addr,
6188                    config_cmd->config_table[i].lsb_mac_addr);
6189                 i++;
6190         }
6191         old = config_cmd->hdr.length;
6192         if (old > i) {
6193                 for (; i < old; i++) {
6194                         if (CAM_IS_INVALID(config_cmd->
6195                                            config_table[i])) {
6196                                 /* already invalidated */
6197                                 break;
6198                         }
6199                         /* invalidate */
6200                         SET_FLAG(config_cmd->config_table[i].flags,
6201                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6202                                 T_ETH_MAC_COMMAND_INVALIDATE);
6203                 }
6204         }
6205
6206         config_cmd->hdr.length = i;
6207         config_cmd->hdr.offset = offset;
6208         config_cmd->hdr.client_id = 0xff;
6209         config_cmd->hdr.reserved1 = 0;
6210
6211         bp->set_mac_pending = 1;
6212         smp_wmb();
6213
6214         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6215                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6216 }
6217 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6218 {
6219         int i;
6220         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6221         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6222         int ramrod_flags = WAIT_RAMROD_COMMON;
6223
6224         bp->set_mac_pending = 1;
6225         smp_wmb();
6226
6227         for (i = 0; i < config_cmd->hdr.length; i++)
6228                 SET_FLAG(config_cmd->config_table[i].flags,
6229                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6230                         T_ETH_MAC_COMMAND_INVALIDATE);
6231
6232         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6233                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6234
6235         /* Wait for a completion */
6236         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6237                                 ramrod_flags);
6238
6239 }
6240
6241 #ifdef BCM_CNIC
6242 /**
6243  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6244  * MAC(s). This function will wait until the ramdord completion
6245  * returns.
6246  *
6247  * @param bp driver handle
6248  * @param set set or clear the CAM entry
6249  *
6250  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6251  */
6252 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6253 {
6254         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6255                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6256         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6257         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6258
6259         /* Send a SET_MAC ramrod */
6260         bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6261                                cam_offset, 0);
6262         return 0;
6263 }
6264 #endif
6265
6266 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6267                                     struct bnx2x_client_init_params *params,
6268                                     u8 activate,
6269                                     struct client_init_ramrod_data *data)
6270 {
6271         /* Clear the buffer */
6272         memset(data, 0, sizeof(*data));
6273
6274         /* general */
6275         data->general.client_id = params->rxq_params.cl_id;
6276         data->general.statistics_counter_id = params->rxq_params.stat_id;
6277         data->general.statistics_en_flg =
6278                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6279         data->general.activate_flg = activate;
6280         data->general.sp_client_id = params->rxq_params.spcl_id;
6281
6282         /* Rx data */
6283         data->rx.tpa_en_flg =
6284                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6285         data->rx.vmqueue_mode_en_flg = 0;
6286         data->rx.cache_line_alignment_log_size =
6287                 params->rxq_params.cache_line_log;
6288         data->rx.enable_dynamic_hc =
6289                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6290         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6291         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6292         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6293
6294         /* We don't set drop flags */
6295         data->rx.drop_ip_cs_err_flg = 0;
6296         data->rx.drop_tcp_cs_err_flg = 0;
6297         data->rx.drop_ttl0_flg = 0;
6298         data->rx.drop_udp_cs_err_flg = 0;
6299
6300         data->rx.inner_vlan_removal_enable_flg =
6301                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6302         data->rx.outer_vlan_removal_enable_flg =
6303                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6304         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6305         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6306         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6307         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6308         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6309         data->rx.bd_page_base.lo =
6310                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6311         data->rx.bd_page_base.hi =
6312                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6313         data->rx.sge_page_base.lo =
6314                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6315         data->rx.sge_page_base.hi =
6316                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6317         data->rx.cqe_page_base.lo =
6318                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6319         data->rx.cqe_page_base.hi =
6320                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6321         data->rx.is_leading_rss =
6322                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6323         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6324
6325         /* Tx data */
6326         data->tx.enforce_security_flg = 0; /* VF specific */
6327         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6328         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6329         data->tx.mtu = 0; /* VF specific */
6330         data->tx.tx_bd_page_base.lo =
6331                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6332         data->tx.tx_bd_page_base.hi =
6333                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6334
6335         /* flow control data */
6336         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6337         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6338         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6339         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6340         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6341         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6342         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6343
6344         data->fc.safc_group_num = params->txq_params.cos;
6345         data->fc.safc_group_en_flg =
6346                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6347         data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6348 }
6349
6350 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6351 {
6352         /* ustorm cxt validation */
6353         cxt->ustorm_ag_context.cdu_usage =
6354                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6355                                        ETH_CONNECTION_TYPE);
6356         /* xcontext validation */
6357         cxt->xstorm_ag_context.cdu_reserved =
6358                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6359                                        ETH_CONNECTION_TYPE);
6360 }
6361
6362 int bnx2x_setup_fw_client(struct bnx2x *bp,
6363                           struct bnx2x_client_init_params *params,
6364                           u8 activate,
6365                           struct client_init_ramrod_data *data,
6366                           dma_addr_t data_mapping)
6367 {
6368         u16 hc_usec;
6369         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6370         int ramrod_flags = 0, rc;
6371
6372         /* HC and context validation values */
6373         hc_usec = params->txq_params.hc_rate ?
6374                 1000000 / params->txq_params.hc_rate : 0;
6375         bnx2x_update_coalesce_sb_index(bp,
6376                         params->txq_params.fw_sb_id,
6377                         params->txq_params.sb_cq_index,
6378                         !(params->txq_params.flags & QUEUE_FLG_HC),
6379                         hc_usec);
6380
6381         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6382
6383         hc_usec = params->rxq_params.hc_rate ?
6384                 1000000 / params->rxq_params.hc_rate : 0;
6385         bnx2x_update_coalesce_sb_index(bp,
6386                         params->rxq_params.fw_sb_id,
6387                         params->rxq_params.sb_cq_index,
6388                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6389                         hc_usec);
6390
6391         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6392                                  params->rxq_params.cid);
6393
6394         /* zero stats */
6395         if (params->txq_params.flags & QUEUE_FLG_STATS)
6396                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6397                                          params->txq_params.stat_id);
6398
6399         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6400                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6401                                          params->rxq_params.stat_id);
6402                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6403                                          params->rxq_params.stat_id);
6404         }
6405
6406         /* Fill the ramrod data */
6407         bnx2x_fill_cl_init_data(bp, params, activate, data);
6408
6409         /* SETUP ramrod.
6410          *
6411          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6412          * barrier except from mmiowb() is needed to impose a
6413          * proper ordering of memory operations.
6414          */
6415         mmiowb();
6416
6417
6418         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6419                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6420
6421         /* Wait for completion */
6422         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6423                                  params->ramrod_params.index,
6424                                  params->ramrod_params.pstate,
6425                                  ramrod_flags);
6426         return rc;
6427 }
6428
6429 /**
6430  * Configure interrupt mode according to current configuration.
6431  * In case of MSI-X it will also try to enable MSI-X.
6432  *
6433  * @param bp
6434  *
6435  * @return int
6436  */
6437 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6438 {
6439         int rc = 0;
6440
6441         switch (bp->int_mode) {
6442         case INT_MODE_MSI:
6443                 bnx2x_enable_msi(bp);
6444                 /* falling through... */
6445         case INT_MODE_INTx:
6446                 bp->num_queues = 1;
6447                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6448                 break;
6449         default:
6450                 /* Set number of queues according to bp->multi_mode value */
6451                 bnx2x_set_num_queues(bp);
6452
6453                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6454                    bp->num_queues);
6455
6456                 /* if we can't use MSI-X we only need one fp,
6457                  * so try to enable MSI-X with the requested number of fp's
6458                  * and fallback to MSI or legacy INTx with one fp
6459                  */
6460                 rc = bnx2x_enable_msix(bp);
6461                 if (rc) {
6462                         /* failed to enable MSI-X */
6463                         if (bp->multi_mode)
6464                                 DP(NETIF_MSG_IFUP,
6465                                           "Multi requested but failed to "
6466                                           "enable MSI-X (%d), "
6467                                           "set number of queues to %d\n",
6468                                    bp->num_queues,
6469                                    1);
6470                         bp->num_queues = 1;
6471
6472                         if (!(bp->flags & DISABLE_MSI_FLAG))
6473                                 bnx2x_enable_msi(bp);
6474                 }
6475
6476                 break;
6477         }
6478
6479         return rc;
6480 }
6481
6482 /* must be called prioir to any HW initializations */
6483 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6484 {
6485         return L2_ILT_LINES(bp);
6486 }
6487
6488 void bnx2x_ilt_set_info(struct bnx2x *bp)
6489 {
6490         struct ilt_client_info *ilt_client;
6491         struct bnx2x_ilt *ilt = BP_ILT(bp);
6492         u16 line = 0;
6493
6494         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6495         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6496
6497         /* CDU */
6498         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6499         ilt_client->client_num = ILT_CLIENT_CDU;
6500         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6501         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6502         ilt_client->start = line;
6503         line += L2_ILT_LINES(bp);
6504 #ifdef BCM_CNIC
6505         line += CNIC_ILT_LINES;
6506 #endif
6507         ilt_client->end = line - 1;
6508
6509         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6510                                          "flags 0x%x, hw psz %d\n",
6511            ilt_client->start,
6512            ilt_client->end,
6513            ilt_client->page_size,
6514            ilt_client->flags,
6515            ilog2(ilt_client->page_size >> 12));
6516
6517         /* QM */
6518         if (QM_INIT(bp->qm_cid_count)) {
6519                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6520                 ilt_client->client_num = ILT_CLIENT_QM;
6521                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6522                 ilt_client->flags = 0;
6523                 ilt_client->start = line;
6524
6525                 /* 4 bytes for each cid */
6526                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6527                                                          QM_ILT_PAGE_SZ);
6528
6529                 ilt_client->end = line - 1;
6530
6531                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6532                                                  "flags 0x%x, hw psz %d\n",
6533                    ilt_client->start,
6534                    ilt_client->end,
6535                    ilt_client->page_size,
6536                    ilt_client->flags,
6537                    ilog2(ilt_client->page_size >> 12));
6538
6539         }
6540         /* SRC */
6541         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6542 #ifdef BCM_CNIC
6543         ilt_client->client_num = ILT_CLIENT_SRC;
6544         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6545         ilt_client->flags = 0;
6546         ilt_client->start = line;
6547         line += SRC_ILT_LINES;
6548         ilt_client->end = line - 1;
6549
6550         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6551                                          "flags 0x%x, hw psz %d\n",
6552            ilt_client->start,
6553            ilt_client->end,
6554            ilt_client->page_size,
6555            ilt_client->flags,
6556            ilog2(ilt_client->page_size >> 12));
6557
6558 #else
6559         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6560 #endif
6561
6562         /* TM */
6563         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6564 #ifdef BCM_CNIC
6565         ilt_client->client_num = ILT_CLIENT_TM;
6566         ilt_client->page_size = TM_ILT_PAGE_SZ;
6567         ilt_client->flags = 0;
6568         ilt_client->start = line;
6569         line += TM_ILT_LINES;
6570         ilt_client->end = line - 1;
6571
6572         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6573                                          "flags 0x%x, hw psz %d\n",
6574            ilt_client->start,
6575            ilt_client->end,
6576            ilt_client->page_size,
6577            ilt_client->flags,
6578            ilog2(ilt_client->page_size >> 12));
6579
6580 #else
6581         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6582 #endif
6583 }
6584
6585 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6586                        int is_leading)
6587 {
6588         struct bnx2x_client_init_params params = { {0} };
6589         int rc;
6590
6591         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6592                              IGU_INT_ENABLE, 0);
6593
6594         params.ramrod_params.pstate = &fp->state;
6595         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6596         params.ramrod_params.index = fp->index;
6597         params.ramrod_params.cid = fp->cid;
6598
6599         if (is_leading)
6600                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6601
6602         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6603
6604         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6605
6606         rc = bnx2x_setup_fw_client(bp, &params, 1,
6607                                      bnx2x_sp(bp, client_init_data),
6608                                      bnx2x_sp_mapping(bp, client_init_data));
6609         return rc;
6610 }
6611
6612 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6613 {
6614         int rc;
6615
6616         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6617
6618         /* halt the connection */
6619         *p->pstate = BNX2X_FP_STATE_HALTING;
6620         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6621                                                   p->cl_id, 0);
6622
6623         /* Wait for completion */
6624         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6625                                p->pstate, poll_flag);
6626         if (rc) /* timeout */
6627                 return rc;
6628
6629         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6630         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6631                                                        p->cl_id, 0);
6632         /* Wait for completion */
6633         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6634                                p->pstate, poll_flag);
6635         if (rc) /* timeout */
6636                 return rc;
6637
6638
6639         /* delete cfc entry */
6640         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6641
6642         /* Wait for completion */
6643         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6644                                p->pstate, WAIT_RAMROD_COMMON);
6645         return rc;
6646 }
6647
6648 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6649 {
6650         struct bnx2x_client_ramrod_params client_stop = {0};
6651         struct bnx2x_fastpath *fp = &bp->fp[index];
6652
6653         client_stop.index = index;
6654         client_stop.cid = fp->cid;
6655         client_stop.cl_id = fp->cl_id;
6656         client_stop.pstate = &(fp->state);
6657         client_stop.poll = 0;
6658
6659         return bnx2x_stop_fw_client(bp, &client_stop);
6660 }
6661
6662
6663 static void bnx2x_reset_func(struct bnx2x *bp)
6664 {
6665         int port = BP_PORT(bp);
6666         int func = BP_FUNC(bp);
6667         int i;
6668         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6669                         (CHIP_IS_E2(bp) ?
6670                          offsetof(struct hc_status_block_data_e2, common) :
6671                          offsetof(struct hc_status_block_data_e1x, common));
6672         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6673         int pfid_offset = offsetof(struct pci_entity, pf_id);
6674
6675         /* Disable the function in the FW */
6676         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6677         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6678         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6679         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6680
6681         /* FP SBs */
6682         for_each_queue(bp, i) {
6683                 struct bnx2x_fastpath *fp = &bp->fp[i];
6684                 REG_WR8(bp,
6685                         BAR_CSTRORM_INTMEM +
6686                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6687                         + pfunc_offset_fp + pfid_offset,
6688                         HC_FUNCTION_DISABLED);
6689         }
6690
6691         /* SP SB */
6692         REG_WR8(bp,
6693                 BAR_CSTRORM_INTMEM +
6694                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6695                 pfunc_offset_sp + pfid_offset,
6696                 HC_FUNCTION_DISABLED);
6697
6698
6699         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6700                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6701                        0);
6702
6703         /* Configure IGU */
6704         if (bp->common.int_block == INT_BLOCK_HC) {
6705                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6706                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6707         } else {
6708                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6709                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6710         }
6711
6712 #ifdef BCM_CNIC
6713         /* Disable Timer scan */
6714         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6715         /*
6716          * Wait for at least 10ms and up to 2 second for the timers scan to
6717          * complete
6718          */
6719         for (i = 0; i < 200; i++) {
6720                 msleep(10);
6721                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6722                         break;
6723         }
6724 #endif
6725         /* Clear ILT */
6726         bnx2x_clear_func_ilt(bp, func);
6727
6728         /* Timers workaround bug for E2: if this is vnic-3,
6729          * we need to set the entire ilt range for this timers.
6730          */
6731         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6732                 struct ilt_client_info ilt_cli;
6733                 /* use dummy TM client */
6734                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6735                 ilt_cli.start = 0;
6736                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6737                 ilt_cli.client_num = ILT_CLIENT_TM;
6738
6739                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6740         }
6741
6742         /* this assumes that reset_port() called before reset_func()*/
6743         if (CHIP_IS_E2(bp))
6744                 bnx2x_pf_disable(bp);
6745
6746         bp->dmae_ready = 0;
6747 }
6748
6749 static void bnx2x_reset_port(struct bnx2x *bp)
6750 {
6751         int port = BP_PORT(bp);
6752         u32 val;
6753
6754         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6755
6756         /* Do not rcv packets to BRB */
6757         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6758         /* Do not direct rcv packets that are not for MCP to the BRB */
6759         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6760                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6761
6762         /* Configure AEU */
6763         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6764
6765         msleep(100);
6766         /* Check for BRB port occupancy */
6767         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6768         if (val)
6769                 DP(NETIF_MSG_IFDOWN,
6770                    "BRB1 is not empty  %d blocks are occupied\n", val);
6771
6772         /* TODO: Close Doorbell port? */
6773 }
6774
6775 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6776 {
6777         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6778            BP_ABS_FUNC(bp), reset_code);
6779
6780         switch (reset_code) {
6781         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6782                 bnx2x_reset_port(bp);
6783                 bnx2x_reset_func(bp);
6784                 bnx2x_reset_common(bp);
6785                 break;
6786
6787         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6788                 bnx2x_reset_port(bp);
6789                 bnx2x_reset_func(bp);
6790                 break;
6791
6792         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6793                 bnx2x_reset_func(bp);
6794                 break;
6795
6796         default:
6797                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6798                 break;
6799         }
6800 }
6801
6802 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6803 {
6804         int port = BP_PORT(bp);
6805         u32 reset_code = 0;
6806         int i, cnt, rc;
6807
6808         /* Wait until tx fastpath tasks complete */
6809         for_each_queue(bp, i) {
6810                 struct bnx2x_fastpath *fp = &bp->fp[i];
6811
6812                 cnt = 1000;
6813                 while (bnx2x_has_tx_work_unload(fp)) {
6814
6815                         if (!cnt) {
6816                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6817                                           i);
6818 #ifdef BNX2X_STOP_ON_ERROR
6819                                 bnx2x_panic();
6820                                 return -EBUSY;
6821 #else
6822                                 break;
6823 #endif
6824                         }
6825                         cnt--;
6826                         msleep(1);
6827                 }
6828         }
6829         /* Give HW time to discard old tx messages */
6830         msleep(1);
6831
6832         if (CHIP_IS_E1(bp)) {
6833                 /* invalidate mc list,
6834                  * wait and poll (interrupts are off)
6835                  */
6836                 bnx2x_invlidate_e1_mc_list(bp);
6837                 bnx2x_set_eth_mac(bp, 0);
6838
6839         } else {
6840                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6841
6842                 bnx2x_set_eth_mac(bp, 0);
6843
6844                 for (i = 0; i < MC_HASH_SIZE; i++)
6845                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6846         }
6847
6848 #ifdef BCM_CNIC
6849         /* Clear iSCSI L2 MAC */
6850         mutex_lock(&bp->cnic_mutex);
6851         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6852                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6853                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6854         }
6855         mutex_unlock(&bp->cnic_mutex);
6856 #endif
6857
6858         if (unload_mode == UNLOAD_NORMAL)
6859                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6860
6861         else if (bp->flags & NO_WOL_FLAG)
6862                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6863
6864         else if (bp->wol) {
6865                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6866                 u8 *mac_addr = bp->dev->dev_addr;
6867                 u32 val;
6868                 /* The mac address is written to entries 1-4 to
6869                    preserve entry 0 which is used by the PMF */
6870                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6871
6872                 val = (mac_addr[0] << 8) | mac_addr[1];
6873                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6874
6875                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6876                       (mac_addr[4] << 8) | mac_addr[5];
6877                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6878
6879                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6880
6881         } else
6882                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883
6884         /* Close multi and leading connections
6885            Completions for ramrods are collected in a synchronous way */
6886         for_each_queue(bp, i)
6887
6888                 if (bnx2x_stop_client(bp, i))
6889 #ifdef BNX2X_STOP_ON_ERROR
6890                         return;
6891 #else
6892                         goto unload_error;
6893 #endif
6894
6895         rc = bnx2x_func_stop(bp);
6896         if (rc) {
6897                 BNX2X_ERR("Function stop failed!\n");
6898 #ifdef BNX2X_STOP_ON_ERROR
6899                 return;
6900 #else
6901                 goto unload_error;
6902 #endif
6903         }
6904 #ifndef BNX2X_STOP_ON_ERROR
6905 unload_error:
6906 #endif
6907         if (!BP_NOMCP(bp))
6908                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6909         else {
6910                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
6911                                      "%d, %d, %d\n", BP_PATH(bp),
6912                    load_count[BP_PATH(bp)][0],
6913                    load_count[BP_PATH(bp)][1],
6914                    load_count[BP_PATH(bp)][2]);
6915                 load_count[BP_PATH(bp)][0]--;
6916                 load_count[BP_PATH(bp)][1 + port]--;
6917                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
6918                                      "%d, %d, %d\n", BP_PATH(bp),
6919                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6920                    load_count[BP_PATH(bp)][2]);
6921                 if (load_count[BP_PATH(bp)][0] == 0)
6922                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6923                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6924                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6925                 else
6926                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6927         }
6928
6929         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6930             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6931                 bnx2x__link_reset(bp);
6932
6933         /* Disable HW interrupts, NAPI */
6934         bnx2x_netif_stop(bp, 1);
6935
6936         /* Release IRQs */
6937         bnx2x_free_irq(bp);
6938
6939         /* Reset the chip */
6940         bnx2x_reset_chip(bp, reset_code);
6941
6942         /* Report UNLOAD_DONE to MCP */
6943         if (!BP_NOMCP(bp))
6944                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6945
6946 }
6947
6948 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6949 {
6950         u32 val;
6951
6952         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6953
6954         if (CHIP_IS_E1(bp)) {
6955                 int port = BP_PORT(bp);
6956                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6957                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
6958
6959                 val = REG_RD(bp, addr);
6960                 val &= ~(0x300);
6961                 REG_WR(bp, addr, val);
6962         } else if (CHIP_IS_E1H(bp)) {
6963                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6964                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6965                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6966                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6967         }
6968 }
6969
6970 /* Close gates #2, #3 and #4: */
6971 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6972 {
6973         u32 val, addr;
6974
6975         /* Gates #2 and #4a are closed/opened for "not E1" only */
6976         if (!CHIP_IS_E1(bp)) {
6977                 /* #4 */
6978                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6979                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6980                        close ? (val | 0x1) : (val & (~(u32)1)));
6981                 /* #2 */
6982                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6983                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6984                        close ? (val | 0x1) : (val & (~(u32)1)));
6985         }
6986
6987         /* #3 */
6988         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6989         val = REG_RD(bp, addr);
6990         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6991
6992         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6993                 close ? "closing" : "opening");
6994         mmiowb();
6995 }
6996
6997 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
6998
6999 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7000 {
7001         /* Do some magic... */
7002         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7003         *magic_val = val & SHARED_MF_CLP_MAGIC;
7004         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7005 }
7006
7007 /* Restore the value of the `magic' bit.
7008  *
7009  * @param pdev Device handle.
7010  * @param magic_val Old value of the `magic' bit.
7011  */
7012 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7013 {
7014         /* Restore the `magic' bit value... */
7015         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7016         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7017                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7018 }
7019
7020 /**
7021  * Prepares for MCP reset: takes care of CLP configurations.
7022  *
7023  * @param bp
7024  * @param magic_val Old value of 'magic' bit.
7025  */
7026 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7027 {
7028         u32 shmem;
7029         u32 validity_offset;
7030
7031         DP(NETIF_MSG_HW, "Starting\n");
7032
7033         /* Set `magic' bit in order to save MF config */
7034         if (!CHIP_IS_E1(bp))
7035                 bnx2x_clp_reset_prep(bp, magic_val);
7036
7037         /* Get shmem offset */
7038         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7039         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7040
7041         /* Clear validity map flags */
7042         if (shmem > 0)
7043                 REG_WR(bp, shmem + validity_offset, 0);
7044 }
7045
7046 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7047 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7048
7049 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7050  * depending on the HW type.
7051  *
7052  * @param bp
7053  */
7054 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7055 {
7056         /* special handling for emulation and FPGA,
7057            wait 10 times longer */
7058         if (CHIP_REV_IS_SLOW(bp))
7059                 msleep(MCP_ONE_TIMEOUT*10);
7060         else
7061                 msleep(MCP_ONE_TIMEOUT);
7062 }
7063
7064 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7065 {
7066         u32 shmem, cnt, validity_offset, val;
7067         int rc = 0;
7068
7069         msleep(100);
7070
7071         /* Get shmem offset */
7072         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7073         if (shmem == 0) {
7074                 BNX2X_ERR("Shmem 0 return failure\n");
7075                 rc = -ENOTTY;
7076                 goto exit_lbl;
7077         }
7078
7079         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7080
7081         /* Wait for MCP to come up */
7082         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7083                 /* TBD: its best to check validity map of last port.
7084                  * currently checks on port 0.
7085                  */
7086                 val = REG_RD(bp, shmem + validity_offset);
7087                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7088                    shmem + validity_offset, val);
7089
7090                 /* check that shared memory is valid. */
7091                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7092                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7093                         break;
7094
7095                 bnx2x_mcp_wait_one(bp);
7096         }
7097
7098         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7099
7100         /* Check that shared memory is valid. This indicates that MCP is up. */
7101         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7102             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7103                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7104                 rc = -ENOTTY;
7105                 goto exit_lbl;
7106         }
7107
7108 exit_lbl:
7109         /* Restore the `magic' bit value */
7110         if (!CHIP_IS_E1(bp))
7111                 bnx2x_clp_reset_done(bp, magic_val);
7112
7113         return rc;
7114 }
7115
7116 static void bnx2x_pxp_prep(struct bnx2x *bp)
7117 {
7118         if (!CHIP_IS_E1(bp)) {
7119                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7120                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7121                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7122                 mmiowb();
7123         }
7124 }
7125
7126 /*
7127  * Reset the whole chip except for:
7128  *      - PCIE core
7129  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7130  *              one reset bit)
7131  *      - IGU
7132  *      - MISC (including AEU)
7133  *      - GRC
7134  *      - RBCN, RBCP
7135  */
7136 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7137 {
7138         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7139
7140         not_reset_mask1 =
7141                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7142                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7143                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7144
7145         not_reset_mask2 =
7146                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7147                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7148                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7149                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7150                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7151                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7152                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7153                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7154
7155         reset_mask1 = 0xffffffff;
7156
7157         if (CHIP_IS_E1(bp))
7158                 reset_mask2 = 0xffff;
7159         else
7160                 reset_mask2 = 0x1ffff;
7161
7162         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7163                reset_mask1 & (~not_reset_mask1));
7164         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7165                reset_mask2 & (~not_reset_mask2));
7166
7167         barrier();
7168         mmiowb();
7169
7170         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7171         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7172         mmiowb();
7173 }
7174
7175 static int bnx2x_process_kill(struct bnx2x *bp)
7176 {
7177         int cnt = 1000;
7178         u32 val = 0;
7179         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7180
7181
7182         /* Empty the Tetris buffer, wait for 1s */
7183         do {
7184                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7185                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7186                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7187                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7188                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7189                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7190                     ((port_is_idle_0 & 0x1) == 0x1) &&
7191                     ((port_is_idle_1 & 0x1) == 0x1) &&
7192                     (pgl_exp_rom2 == 0xffffffff))
7193                         break;
7194                 msleep(1);
7195         } while (cnt-- > 0);
7196
7197         if (cnt <= 0) {
7198                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7199                           " are still"
7200                           " outstanding read requests after 1s!\n");
7201                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7202                           " port_is_idle_0=0x%08x,"
7203                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7204                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7205                           pgl_exp_rom2);
7206                 return -EAGAIN;
7207         }
7208
7209         barrier();
7210
7211         /* Close gates #2, #3 and #4 */
7212         bnx2x_set_234_gates(bp, true);
7213
7214         /* TBD: Indicate that "process kill" is in progress to MCP */
7215
7216         /* Clear "unprepared" bit */
7217         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7218         barrier();
7219
7220         /* Make sure all is written to the chip before the reset */
7221         mmiowb();
7222
7223         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7224          * PSWHST, GRC and PSWRD Tetris buffer.
7225          */
7226         msleep(1);
7227
7228         /* Prepare to chip reset: */
7229         /* MCP */
7230         bnx2x_reset_mcp_prep(bp, &val);
7231
7232         /* PXP */
7233         bnx2x_pxp_prep(bp);
7234         barrier();
7235
7236         /* reset the chip */
7237         bnx2x_process_kill_chip_reset(bp);
7238         barrier();
7239
7240         /* Recover after reset: */
7241         /* MCP */
7242         if (bnx2x_reset_mcp_comp(bp, val))
7243                 return -EAGAIN;
7244
7245         /* PXP */
7246         bnx2x_pxp_prep(bp);
7247
7248         /* Open the gates #2, #3 and #4 */
7249         bnx2x_set_234_gates(bp, false);
7250
7251         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7252          * reset state, re-enable attentions. */
7253
7254         return 0;
7255 }
7256
7257 static int bnx2x_leader_reset(struct bnx2x *bp)
7258 {
7259         int rc = 0;
7260         /* Try to recover after the failure */
7261         if (bnx2x_process_kill(bp)) {
7262                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7263                        bp->dev->name);
7264                 rc = -EAGAIN;
7265                 goto exit_leader_reset;
7266         }
7267
7268         /* Clear "reset is in progress" bit and update the driver state */
7269         bnx2x_set_reset_done(bp);
7270         bp->recovery_state = BNX2X_RECOVERY_DONE;
7271
7272 exit_leader_reset:
7273         bp->is_leader = 0;
7274         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7275         smp_wmb();
7276         return rc;
7277 }
7278
7279 /* Assumption: runs under rtnl lock. This together with the fact
7280  * that it's called only from bnx2x_reset_task() ensure that it
7281  * will never be called when netif_running(bp->dev) is false.
7282  */
7283 static void bnx2x_parity_recover(struct bnx2x *bp)
7284 {
7285         DP(NETIF_MSG_HW, "Handling parity\n");
7286         while (1) {
7287                 switch (bp->recovery_state) {
7288                 case BNX2X_RECOVERY_INIT:
7289                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7290                         /* Try to get a LEADER_LOCK HW lock */
7291                         if (bnx2x_trylock_hw_lock(bp,
7292                                 HW_LOCK_RESOURCE_RESERVED_08))
7293                                 bp->is_leader = 1;
7294
7295                         /* Stop the driver */
7296                         /* If interface has been removed - break */
7297                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7298                                 return;
7299
7300                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7301                         /* Ensure "is_leader" and "recovery_state"
7302                          *  update values are seen on other CPUs
7303                          */
7304                         smp_wmb();
7305                         break;
7306
7307                 case BNX2X_RECOVERY_WAIT:
7308                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7309                         if (bp->is_leader) {
7310                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7311                                 if (load_counter) {
7312                                         /* Wait until all other functions get
7313                                          * down.
7314                                          */
7315                                         schedule_delayed_work(&bp->reset_task,
7316                                                                 HZ/10);
7317                                         return;
7318                                 } else {
7319                                         /* If all other functions got down -
7320                                          * try to bring the chip back to
7321                                          * normal. In any case it's an exit
7322                                          * point for a leader.
7323                                          */
7324                                         if (bnx2x_leader_reset(bp) ||
7325                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7326                                                 printk(KERN_ERR"%s: Recovery "
7327                                                 "has failed. Power cycle is "
7328                                                 "needed.\n", bp->dev->name);
7329                                                 /* Disconnect this device */
7330                                                 netif_device_detach(bp->dev);
7331                                                 /* Block ifup for all function
7332                                                  * of this ASIC until
7333                                                  * "process kill" or power
7334                                                  * cycle.
7335                                                  */
7336                                                 bnx2x_set_reset_in_progress(bp);
7337                                                 /* Shut down the power */
7338                                                 bnx2x_set_power_state(bp,
7339                                                                 PCI_D3hot);
7340                                                 return;
7341                                         }
7342
7343                                         return;
7344                                 }
7345                         } else { /* non-leader */
7346                                 if (!bnx2x_reset_is_done(bp)) {
7347                                         /* Try to get a LEADER_LOCK HW lock as
7348                                          * long as a former leader may have
7349                                          * been unloaded by the user or
7350                                          * released a leadership by another
7351                                          * reason.
7352                                          */
7353                                         if (bnx2x_trylock_hw_lock(bp,
7354                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7355                                                 /* I'm a leader now! Restart a
7356                                                  * switch case.
7357                                                  */
7358                                                 bp->is_leader = 1;
7359                                                 break;
7360                                         }
7361
7362                                         schedule_delayed_work(&bp->reset_task,
7363                                                                 HZ/10);
7364                                         return;
7365
7366                                 } else { /* A leader has completed
7367                                           * the "process kill". It's an exit
7368                                           * point for a non-leader.
7369                                           */
7370                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7371                                         bp->recovery_state =
7372                                                 BNX2X_RECOVERY_DONE;
7373                                         smp_wmb();
7374                                         return;
7375                                 }
7376                         }
7377                 default:
7378                         return;
7379                 }
7380         }
7381 }
7382
7383 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7384  * scheduled on a general queue in order to prevent a dead lock.
7385  */
7386 static void bnx2x_reset_task(struct work_struct *work)
7387 {
7388         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7389
7390 #ifdef BNX2X_STOP_ON_ERROR
7391         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7392                   " so reset not done to allow debug dump,\n"
7393          KERN_ERR " you will need to reboot when done\n");
7394         return;
7395 #endif
7396
7397         rtnl_lock();
7398
7399         if (!netif_running(bp->dev))
7400                 goto reset_task_exit;
7401
7402         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7403                 bnx2x_parity_recover(bp);
7404         else {
7405                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7406                 bnx2x_nic_load(bp, LOAD_NORMAL);
7407         }
7408
7409 reset_task_exit:
7410         rtnl_unlock();
7411 }
7412
7413 /* end of nic load/unload */
7414
7415 /*
7416  * Init service functions
7417  */
7418
7419 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7420 {
7421         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7422         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7423         return base + (BP_ABS_FUNC(bp)) * stride;
7424 }
7425
7426 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7427 {
7428         u32 reg = bnx2x_get_pretend_reg(bp);
7429
7430         /* Flush all outstanding writes */
7431         mmiowb();
7432
7433         /* Pretend to be function 0 */
7434         REG_WR(bp, reg, 0);
7435         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7436
7437         /* From now we are in the "like-E1" mode */
7438         bnx2x_int_disable(bp);
7439
7440         /* Flush all outstanding writes */
7441         mmiowb();
7442
7443         /* Restore the original function */
7444         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7445         REG_RD(bp, reg);
7446 }
7447
7448 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7449 {
7450         if (CHIP_IS_E1(bp))
7451                 bnx2x_int_disable(bp);
7452         else
7453                 bnx2x_undi_int_disable_e1h(bp);
7454 }
7455
7456 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7457 {
7458         u32 val;
7459
7460         /* Check if there is any driver already loaded */
7461         val = REG_RD(bp, MISC_REG_UNPREPARED);
7462         if (val == 0x1) {
7463                 /* Check if it is the UNDI driver
7464                  * UNDI driver initializes CID offset for normal bell to 0x7
7465                  */
7466                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7467                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7468                 if (val == 0x7) {
7469                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7470                         /* save our pf_num */
7471                         int orig_pf_num = bp->pf_num;
7472                         u32 swap_en;
7473                         u32 swap_val;
7474
7475                         /* clear the UNDI indication */
7476                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7477
7478                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7479
7480                         /* try unload UNDI on port 0 */
7481                         bp->pf_num = 0;
7482                         bp->fw_seq =
7483                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7484                                 DRV_MSG_SEQ_NUMBER_MASK);
7485                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7486
7487                         /* if UNDI is loaded on the other port */
7488                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7489
7490                                 /* send "DONE" for previous unload */
7491                                 bnx2x_fw_command(bp,
7492                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7493
7494                                 /* unload UNDI on port 1 */
7495                                 bp->pf_num = 1;
7496                                 bp->fw_seq =
7497                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7498                                         DRV_MSG_SEQ_NUMBER_MASK);
7499                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7500
7501                                 bnx2x_fw_command(bp, reset_code, 0);
7502                         }
7503
7504                         /* now it's safe to release the lock */
7505                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7506
7507                         bnx2x_undi_int_disable(bp);
7508
7509                         /* close input traffic and wait for it */
7510                         /* Do not rcv packets to BRB */
7511                         REG_WR(bp,
7512                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7513                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7514                         /* Do not direct rcv packets that are not for MCP to
7515                          * the BRB */
7516                         REG_WR(bp,
7517                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7518                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7519                         /* clear AEU */
7520                         REG_WR(bp,
7521                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7522                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7523                         msleep(10);
7524
7525                         /* save NIG port swap info */
7526                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7527                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7528                         /* reset device */
7529                         REG_WR(bp,
7530                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7531                                0xd3ffffff);
7532                         REG_WR(bp,
7533                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7534                                0x1403);
7535                         /* take the NIG out of reset and restore swap values */
7536                         REG_WR(bp,
7537                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7538                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7539                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7540                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7541
7542                         /* send unload done to the MCP */
7543                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7544
7545                         /* restore our func and fw_seq */
7546                         bp->pf_num = orig_pf_num;
7547                         bp->fw_seq =
7548                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7549                                 DRV_MSG_SEQ_NUMBER_MASK);
7550                 } else
7551                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7552         }
7553 }
7554
7555 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7556 {
7557         u32 val, val2, val3, val4, id;
7558         u16 pmc;
7559
7560         /* Get the chip revision id and number. */
7561         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7562         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7563         id = ((val & 0xffff) << 16);
7564         val = REG_RD(bp, MISC_REG_CHIP_REV);
7565         id |= ((val & 0xf) << 12);
7566         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7567         id |= ((val & 0xff) << 4);
7568         val = REG_RD(bp, MISC_REG_BOND_ID);
7569         id |= (val & 0xf);
7570         bp->common.chip_id = id;
7571
7572         /* Set doorbell size */
7573         bp->db_size = (1 << BNX2X_DB_SHIFT);
7574
7575         if (CHIP_IS_E2(bp)) {
7576                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7577                 if ((val & 1) == 0)
7578                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7579                 else
7580                         val = (val >> 1) & 1;
7581                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7582                                                        "2_PORT_MODE");
7583                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7584                                                  CHIP_2_PORT_MODE;
7585
7586                 if (CHIP_MODE_IS_4_PORT(bp))
7587                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7588                 else
7589                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7590         } else {
7591                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7592                 bp->pfid = bp->pf_num;                  /* 0..7 */
7593         }
7594
7595         /*
7596          * set base FW non-default (fast path) status block id, this value is
7597          * used to initialize the fw_sb_id saved on the fp/queue structure to
7598          * determine the id used by the FW.
7599          */
7600         if (CHIP_IS_E1x(bp))
7601                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7602         else /* E2 */
7603                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7604
7605         bp->link_params.chip_id = bp->common.chip_id;
7606         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7607
7608         val = (REG_RD(bp, 0x2874) & 0x55);
7609         if ((bp->common.chip_id & 0x1) ||
7610             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7611                 bp->flags |= ONE_PORT_FLAG;
7612                 BNX2X_DEV_INFO("single port device\n");
7613         }
7614
7615         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7616         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7617                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7618         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7619                        bp->common.flash_size, bp->common.flash_size);
7620
7621         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7622         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7623                                         MISC_REG_GENERIC_CR_1 :
7624                                         MISC_REG_GENERIC_CR_0));
7625         bp->link_params.shmem_base = bp->common.shmem_base;
7626         bp->link_params.shmem2_base = bp->common.shmem2_base;
7627         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7628                        bp->common.shmem_base, bp->common.shmem2_base);
7629
7630         if (!bp->common.shmem_base) {
7631                 BNX2X_DEV_INFO("MCP not active\n");
7632                 bp->flags |= NO_MCP_FLAG;
7633                 return;
7634         }
7635
7636         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7637         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7638                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7639                 BNX2X_ERR("BAD MCP validity signature\n");
7640
7641         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7642         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7643
7644         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7645                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7646                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7647
7648         bp->link_params.feature_config_flags = 0;
7649         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7650         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7651                 bp->link_params.feature_config_flags |=
7652                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7653         else
7654                 bp->link_params.feature_config_flags &=
7655                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7656
7657         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7658         bp->common.bc_ver = val;
7659         BNX2X_DEV_INFO("bc_ver %X\n", val);
7660         if (val < BNX2X_BC_VER) {
7661                 /* for now only warn
7662                  * later we might need to enforce this */
7663                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7664                           "please upgrade BC\n", BNX2X_BC_VER, val);
7665         }
7666         bp->link_params.feature_config_flags |=
7667                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7668                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7669
7670         bp->link_params.feature_config_flags |=
7671                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7672                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7673
7674         if (BP_E1HVN(bp) == 0) {
7675                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7676                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7677         } else {
7678                 /* no WOL capability for E1HVN != 0 */
7679                 bp->flags |= NO_WOL_FLAG;
7680         }
7681         BNX2X_DEV_INFO("%sWoL capable\n",
7682                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7683
7684         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7685         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7686         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7687         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7688
7689         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7690                  val, val2, val3, val4);
7691 }
7692
7693 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7694 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7695
7696 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7697 {
7698         int pfid = BP_FUNC(bp);
7699         int vn = BP_E1HVN(bp);
7700         int igu_sb_id;
7701         u32 val;
7702         u8 fid;
7703
7704         bp->igu_base_sb = 0xff;
7705         bp->igu_sb_cnt = 0;
7706         if (CHIP_INT_MODE_IS_BC(bp)) {
7707                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7708                                        bp->l2_cid_count);
7709
7710                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7711                         FP_SB_MAX_E1x;
7712
7713                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
7714                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7715
7716                 return;
7717         }
7718
7719         /* IGU in normal mode - read CAM */
7720         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7721              igu_sb_id++) {
7722                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7723                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7724                         continue;
7725                 fid = IGU_FID(val);
7726                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7727                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7728                                 continue;
7729                         if (IGU_VEC(val) == 0)
7730                                 /* default status block */
7731                                 bp->igu_dsb_id = igu_sb_id;
7732                         else {
7733                                 if (bp->igu_base_sb == 0xff)
7734                                         bp->igu_base_sb = igu_sb_id;
7735                                 bp->igu_sb_cnt++;
7736                         }
7737                 }
7738         }
7739         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7740         if (bp->igu_sb_cnt == 0)
7741                 BNX2X_ERR("CAM configuration error\n");
7742 }
7743
7744 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7745                                                     u32 switch_cfg)
7746 {
7747         int cfg_size = 0, idx, port = BP_PORT(bp);
7748
7749         /* Aggregation of supported attributes of all external phys */
7750         bp->port.supported[0] = 0;
7751         bp->port.supported[1] = 0;
7752         switch (bp->link_params.num_phys) {
7753         case 1:
7754                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7755                 cfg_size = 1;
7756                 break;
7757         case 2:
7758                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7759                 cfg_size = 1;
7760                 break;
7761         case 3:
7762                 if (bp->link_params.multi_phy_config &
7763                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7764                         bp->port.supported[1] =
7765                                 bp->link_params.phy[EXT_PHY1].supported;
7766                         bp->port.supported[0] =
7767                                 bp->link_params.phy[EXT_PHY2].supported;
7768                 } else {
7769                         bp->port.supported[0] =
7770                                 bp->link_params.phy[EXT_PHY1].supported;
7771                         bp->port.supported[1] =
7772                                 bp->link_params.phy[EXT_PHY2].supported;
7773                 }
7774                 cfg_size = 2;
7775                 break;
7776         }
7777
7778         if (!(bp->port.supported[0] || bp->port.supported[1])) {
7779                 BNX2X_ERR("NVRAM config error. BAD phy config."
7780                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
7781                            SHMEM_RD(bp,
7782                            dev_info.port_hw_config[port].external_phy_config),
7783                            SHMEM_RD(bp,
7784                            dev_info.port_hw_config[port].external_phy_config2));
7785                         return;
7786         }
7787
7788         switch (switch_cfg) {
7789         case SWITCH_CFG_1G:
7790                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7791                                            port*0x10);
7792                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7793                 break;
7794
7795         case SWITCH_CFG_10G:
7796                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7797                                            port*0x18);
7798                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7799                 break;
7800
7801         default:
7802                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7803                           bp->port.link_config[0]);
7804                 return;
7805         }
7806         /* mask what we support according to speed_cap_mask per configuration */
7807         for (idx = 0; idx < cfg_size; idx++) {
7808                 if (!(bp->link_params.speed_cap_mask[idx] &
7809                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7810                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7811
7812                 if (!(bp->link_params.speed_cap_mask[idx] &
7813                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7814                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7815
7816                 if (!(bp->link_params.speed_cap_mask[idx] &
7817                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7818                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7819
7820                 if (!(bp->link_params.speed_cap_mask[idx] &
7821                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7822                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7823
7824                 if (!(bp->link_params.speed_cap_mask[idx] &
7825                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7826                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7827                                                      SUPPORTED_1000baseT_Full);
7828
7829                 if (!(bp->link_params.speed_cap_mask[idx] &
7830                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7831                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7832
7833                 if (!(bp->link_params.speed_cap_mask[idx] &
7834                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7835                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7836
7837         }
7838
7839         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7840                        bp->port.supported[1]);
7841 }
7842
7843 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7844 {
7845         u32 link_config, idx, cfg_size = 0;
7846         bp->port.advertising[0] = 0;
7847         bp->port.advertising[1] = 0;
7848         switch (bp->link_params.num_phys) {
7849         case 1:
7850         case 2:
7851                 cfg_size = 1;
7852                 break;
7853         case 3:
7854                 cfg_size = 2;
7855                 break;
7856         }
7857         for (idx = 0; idx < cfg_size; idx++) {
7858                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7859                 link_config = bp->port.link_config[idx];
7860                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7861                 case PORT_FEATURE_LINK_SPEED_AUTO:
7862                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7863                                 bp->link_params.req_line_speed[idx] =
7864                                         SPEED_AUTO_NEG;
7865                                 bp->port.advertising[idx] |=
7866                                         bp->port.supported[idx];
7867                         } else {
7868                                 /* force 10G, no AN */
7869                                 bp->link_params.req_line_speed[idx] =
7870                                         SPEED_10000;
7871                                 bp->port.advertising[idx] |=
7872                                         (ADVERTISED_10000baseT_Full |
7873                                          ADVERTISED_FIBRE);
7874                                 continue;
7875                         }
7876                         break;
7877
7878                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7879                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7880                                 bp->link_params.req_line_speed[idx] =
7881                                         SPEED_10;
7882                                 bp->port.advertising[idx] |=
7883                                         (ADVERTISED_10baseT_Full |
7884                                          ADVERTISED_TP);
7885                         } else {
7886                                 BNX2X_ERROR("NVRAM config error. "
7887                                             "Invalid link_config 0x%x"
7888                                             "  speed_cap_mask 0x%x\n",
7889                                             link_config,
7890                                     bp->link_params.speed_cap_mask[idx]);
7891                                 return;
7892                         }
7893                         break;
7894
7895                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7896                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7897                                 bp->link_params.req_line_speed[idx] =
7898                                         SPEED_10;
7899                                 bp->link_params.req_duplex[idx] =
7900                                         DUPLEX_HALF;
7901                                 bp->port.advertising[idx] |=
7902                                         (ADVERTISED_10baseT_Half |
7903                                          ADVERTISED_TP);
7904                         } else {
7905                                 BNX2X_ERROR("NVRAM config error. "
7906                                             "Invalid link_config 0x%x"
7907                                             "  speed_cap_mask 0x%x\n",
7908                                             link_config,
7909                                           bp->link_params.speed_cap_mask[idx]);
7910                                 return;
7911                         }
7912                         break;
7913
7914                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7915                         if (bp->port.supported[idx] &
7916                             SUPPORTED_100baseT_Full) {
7917                                 bp->link_params.req_line_speed[idx] =
7918                                         SPEED_100;
7919                                 bp->port.advertising[idx] |=
7920                                         (ADVERTISED_100baseT_Full |
7921                                          ADVERTISED_TP);
7922                         } else {
7923                                 BNX2X_ERROR("NVRAM config error. "
7924                                             "Invalid link_config 0x%x"
7925                                             "  speed_cap_mask 0x%x\n",
7926                                             link_config,
7927                                           bp->link_params.speed_cap_mask[idx]);
7928                                 return;
7929                         }
7930                         break;
7931
7932                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7933                         if (bp->port.supported[idx] &
7934                             SUPPORTED_100baseT_Half) {
7935                                 bp->link_params.req_line_speed[idx] =
7936                                                                 SPEED_100;
7937                                 bp->link_params.req_duplex[idx] =
7938                                                                 DUPLEX_HALF;
7939                                 bp->port.advertising[idx] |=
7940                                         (ADVERTISED_100baseT_Half |
7941                                          ADVERTISED_TP);
7942                         } else {
7943                                 BNX2X_ERROR("NVRAM config error. "
7944                                     "Invalid link_config 0x%x"
7945                                     "  speed_cap_mask 0x%x\n",
7946                                     link_config,
7947                                     bp->link_params.speed_cap_mask[idx]);
7948                                 return;
7949                         }
7950                         break;
7951
7952                 case PORT_FEATURE_LINK_SPEED_1G:
7953                         if (bp->port.supported[idx] &
7954                             SUPPORTED_1000baseT_Full) {
7955                                 bp->link_params.req_line_speed[idx] =
7956                                         SPEED_1000;
7957                                 bp->port.advertising[idx] |=
7958                                         (ADVERTISED_1000baseT_Full |
7959                                          ADVERTISED_TP);
7960                         } else {
7961                                 BNX2X_ERROR("NVRAM config error. "
7962                                     "Invalid link_config 0x%x"
7963                                     "  speed_cap_mask 0x%x\n",
7964                                     link_config,
7965                                     bp->link_params.speed_cap_mask[idx]);
7966                                 return;
7967                         }
7968                         break;
7969
7970                 case PORT_FEATURE_LINK_SPEED_2_5G:
7971                         if (bp->port.supported[idx] &
7972                             SUPPORTED_2500baseX_Full) {
7973                                 bp->link_params.req_line_speed[idx] =
7974                                         SPEED_2500;
7975                                 bp->port.advertising[idx] |=
7976                                         (ADVERTISED_2500baseX_Full |
7977                                                 ADVERTISED_TP);
7978                         } else {
7979                                 BNX2X_ERROR("NVRAM config error. "
7980                                     "Invalid link_config 0x%x"
7981                                     "  speed_cap_mask 0x%x\n",
7982                                     link_config,
7983                                     bp->link_params.speed_cap_mask[idx]);
7984                                 return;
7985                         }
7986                         break;
7987
7988                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7989                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7990                 case PORT_FEATURE_LINK_SPEED_10G_KR:
7991                         if (bp->port.supported[idx] &
7992                             SUPPORTED_10000baseT_Full) {
7993                                 bp->link_params.req_line_speed[idx] =
7994                                         SPEED_10000;
7995                                 bp->port.advertising[idx] |=
7996                                         (ADVERTISED_10000baseT_Full |
7997                                                 ADVERTISED_FIBRE);
7998                         } else {
7999                                 BNX2X_ERROR("NVRAM config error. "
8000                                     "Invalid link_config 0x%x"
8001                                     "  speed_cap_mask 0x%x\n",
8002                                     link_config,
8003                                     bp->link_params.speed_cap_mask[idx]);
8004                                 return;
8005                         }
8006                         break;
8007
8008                 default:
8009                         BNX2X_ERROR("NVRAM config error. "
8010                                     "BAD link speed link_config 0x%x\n",
8011                                           link_config);
8012                                 bp->link_params.req_line_speed[idx] =
8013                                                         SPEED_AUTO_NEG;
8014                                 bp->port.advertising[idx] =
8015                                                 bp->port.supported[idx];
8016                         break;
8017                 }
8018
8019                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8020                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8021                 if ((bp->link_params.req_flow_ctrl[idx] ==
8022                      BNX2X_FLOW_CTRL_AUTO) &&
8023                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8024                         bp->link_params.req_flow_ctrl[idx] =
8025                                 BNX2X_FLOW_CTRL_NONE;
8026                 }
8027
8028                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8029                                " 0x%x advertising 0x%x\n",
8030                                bp->link_params.req_line_speed[idx],
8031                                bp->link_params.req_duplex[idx],
8032                                bp->link_params.req_flow_ctrl[idx],
8033                                bp->port.advertising[idx]);
8034         }
8035 }
8036
8037 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8038 {
8039         mac_hi = cpu_to_be16(mac_hi);
8040         mac_lo = cpu_to_be32(mac_lo);
8041         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8042         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8043 }
8044
8045 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8046 {
8047         int port = BP_PORT(bp);
8048         u32 val, val2;
8049         u32 config;
8050         u32 ext_phy_type, ext_phy_config;;
8051
8052         bp->link_params.bp = bp;
8053         bp->link_params.port = port;
8054
8055         bp->link_params.lane_config =
8056                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8057
8058         bp->link_params.speed_cap_mask[0] =
8059                 SHMEM_RD(bp,
8060                          dev_info.port_hw_config[port].speed_capability_mask);
8061         bp->link_params.speed_cap_mask[1] =
8062                 SHMEM_RD(bp,
8063                          dev_info.port_hw_config[port].speed_capability_mask2);
8064         bp->port.link_config[0] =
8065                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8066
8067         bp->port.link_config[1] =
8068                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8069
8070         bp->link_params.multi_phy_config =
8071                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8072         /* If the device is capable of WoL, set the default state according
8073          * to the HW
8074          */
8075         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8076         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8077                    (config & PORT_FEATURE_WOL_ENABLED));
8078
8079         BNX2X_DEV_INFO("lane_config 0x%08x  "
8080                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8081                        bp->link_params.lane_config,
8082                        bp->link_params.speed_cap_mask[0],
8083                        bp->port.link_config[0]);
8084
8085         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8086                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8087         bnx2x_phy_probe(&bp->link_params);
8088         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8089
8090         bnx2x_link_settings_requested(bp);
8091
8092         /*
8093          * If connected directly, work with the internal PHY, otherwise, work
8094          * with the external PHY
8095          */
8096         ext_phy_config =
8097                 SHMEM_RD(bp,
8098                          dev_info.port_hw_config[port].external_phy_config);
8099         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8100         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8101                 bp->mdio.prtad = bp->port.phy_addr;
8102
8103         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8104                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8105                 bp->mdio.prtad =
8106                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8107
8108         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8109         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8110         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8111         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8112         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8113
8114 #ifdef BCM_CNIC
8115         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8116         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8117         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8118 #endif
8119 }
8120
8121 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8122 {
8123         int func = BP_ABS_FUNC(bp);
8124         int vn;
8125         u32 val, val2;
8126         int rc = 0;
8127
8128         bnx2x_get_common_hwinfo(bp);
8129
8130         if (CHIP_IS_E1x(bp)) {
8131                 bp->common.int_block = INT_BLOCK_HC;
8132
8133                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8134                 bp->igu_base_sb = 0;
8135                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8136         } else {
8137                 bp->common.int_block = INT_BLOCK_IGU;
8138                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8139                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8140                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8141                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8142                 } else
8143                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8144
8145                 bnx2x_get_igu_cam_info(bp);
8146
8147         }
8148         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8149                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8150
8151         /*
8152          * Initialize MF configuration
8153          */
8154
8155         bp->mf_ov = 0;
8156         bp->mf_mode = 0;
8157         vn = BP_E1HVN(bp);
8158         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8159                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8160                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8161                 else
8162                         bp->common.mf_cfg_base = bp->common.shmem_base +
8163                                 offsetof(struct shmem_region, func_mb) +
8164                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8165                 bp->mf_config[vn] =
8166                         MF_CFG_RD(bp, func_mf_config[func].config);
8167
8168                 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8169                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8170                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8171                         bp->mf_mode = 1;
8172                 BNX2X_DEV_INFO("%s function mode\n",
8173                                IS_MF(bp) ? "multi" : "single");
8174
8175                 if (IS_MF(bp)) {
8176                         val = (MF_CFG_RD(bp, func_mf_config[func].
8177                                                                 e1hov_tag) &
8178                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8179                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8180                                 bp->mf_ov = val;
8181                                 BNX2X_DEV_INFO("MF OV for func %d is %d "
8182                                                "(0x%04x)\n",
8183                                                func, bp->mf_ov, bp->mf_ov);
8184                         } else {
8185                                 BNX2X_ERROR("No valid MF OV for func %d,"
8186                                             "  aborting\n", func);
8187                                 rc = -EPERM;
8188                         }
8189                 } else {
8190                         if (BP_VN(bp)) {
8191                                 BNX2X_ERROR("VN %d in single function mode,"
8192                                             "  aborting\n", BP_E1HVN(bp));
8193                                 rc = -EPERM;
8194                         }
8195                 }
8196         }
8197
8198         /* adjust igu_sb_cnt to MF for E1x */
8199         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8200                 bp->igu_sb_cnt /= E1HVN_MAX;
8201
8202         /*
8203          * adjust E2 sb count: to be removed when FW will support
8204          * more then 16 L2 clients
8205          */
8206 #define MAX_L2_CLIENTS                          16
8207         if (CHIP_IS_E2(bp))
8208                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8209                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8210
8211         if (!BP_NOMCP(bp)) {
8212                 bnx2x_get_port_hwinfo(bp);
8213
8214                 bp->fw_seq =
8215                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8216                          DRV_MSG_SEQ_NUMBER_MASK);
8217                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8218         }
8219
8220         if (IS_MF(bp)) {
8221                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8222                 val = MF_CFG_RD(bp,  func_mf_config[func].mac_lower);
8223                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8224                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8225                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8226                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8227                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8228                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8229                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8230                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8231                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8232                                ETH_ALEN);
8233                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8234                                ETH_ALEN);
8235                 }
8236
8237                 return rc;
8238         }
8239
8240         if (BP_NOMCP(bp)) {
8241                 /* only supposed to happen on emulation/FPGA */
8242                 BNX2X_ERROR("warning: random MAC workaround active\n");
8243                 random_ether_addr(bp->dev->dev_addr);
8244                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8245         }
8246
8247         return rc;
8248 }
8249
8250 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8251 {
8252         int cnt, i, block_end, rodi;
8253         char vpd_data[BNX2X_VPD_LEN+1];
8254         char str_id_reg[VENDOR_ID_LEN+1];
8255         char str_id_cap[VENDOR_ID_LEN+1];
8256         u8 len;
8257
8258         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8259         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8260
8261         if (cnt < BNX2X_VPD_LEN)
8262                 goto out_not_found;
8263
8264         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8265                              PCI_VPD_LRDT_RO_DATA);
8266         if (i < 0)
8267                 goto out_not_found;
8268
8269
8270         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8271                     pci_vpd_lrdt_size(&vpd_data[i]);
8272
8273         i += PCI_VPD_LRDT_TAG_SIZE;
8274
8275         if (block_end > BNX2X_VPD_LEN)
8276                 goto out_not_found;
8277
8278         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8279                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8280         if (rodi < 0)
8281                 goto out_not_found;
8282
8283         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8284
8285         if (len != VENDOR_ID_LEN)
8286                 goto out_not_found;
8287
8288         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8289
8290         /* vendor specific info */
8291         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8292         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8293         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8294             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8295
8296                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8297                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8298                 if (rodi >= 0) {
8299                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8300
8301                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8302
8303                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8304                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8305                                 bp->fw_ver[len] = ' ';
8306                         }
8307                 }
8308                 return;
8309         }
8310 out_not_found:
8311         return;
8312 }
8313
8314 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8315 {
8316         int func;
8317         int timer_interval;
8318         int rc;
8319
8320         /* Disable interrupt handling until HW is initialized */
8321         atomic_set(&bp->intr_sem, 1);
8322         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8323
8324         mutex_init(&bp->port.phy_mutex);
8325         mutex_init(&bp->fw_mb_mutex);
8326         spin_lock_init(&bp->stats_lock);
8327 #ifdef BCM_CNIC
8328         mutex_init(&bp->cnic_mutex);
8329 #endif
8330
8331         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8332         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8333
8334         rc = bnx2x_get_hwinfo(bp);
8335
8336         if (!rc)
8337                 rc = bnx2x_alloc_mem_bp(bp);
8338
8339         bnx2x_read_fwinfo(bp);
8340
8341         func = BP_FUNC(bp);
8342
8343         /* need to reset chip if undi was active */
8344         if (!BP_NOMCP(bp))
8345                 bnx2x_undi_unload(bp);
8346
8347         if (CHIP_REV_IS_FPGA(bp))
8348                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8349
8350         if (BP_NOMCP(bp) && (func == 0))
8351                 dev_err(&bp->pdev->dev, "MCP disabled, "
8352                                         "must load devices in order!\n");
8353
8354         /* Set multi queue mode */
8355         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8356             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8357                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8358                                         "requested is not MSI-X\n");
8359                 multi_mode = ETH_RSS_MODE_DISABLED;
8360         }
8361         bp->multi_mode = multi_mode;
8362         bp->int_mode = int_mode;
8363
8364         bp->dev->features |= NETIF_F_GRO;
8365
8366         /* Set TPA flags */
8367         if (disable_tpa) {
8368                 bp->flags &= ~TPA_ENABLE_FLAG;
8369                 bp->dev->features &= ~NETIF_F_LRO;
8370         } else {
8371                 bp->flags |= TPA_ENABLE_FLAG;
8372                 bp->dev->features |= NETIF_F_LRO;
8373         }
8374         bp->disable_tpa = disable_tpa;
8375
8376         if (CHIP_IS_E1(bp))
8377                 bp->dropless_fc = 0;
8378         else
8379                 bp->dropless_fc = dropless_fc;
8380
8381         bp->mrrs = mrrs;
8382
8383         bp->tx_ring_size = MAX_TX_AVAIL;
8384
8385         bp->rx_csum = 1;
8386
8387         /* make sure that the numbers are in the right granularity */
8388         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8389         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8390
8391         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8392         bp->current_interval = (poll ? poll : timer_interval);
8393
8394         init_timer(&bp->timer);
8395         bp->timer.expires = jiffies + bp->current_interval;
8396         bp->timer.data = (unsigned long) bp;
8397         bp->timer.function = bnx2x_timer;
8398
8399         return rc;
8400 }
8401
8402
8403 /****************************************************************************
8404 * General service functions
8405 ****************************************************************************/
8406
8407 /* called with rtnl_lock */
8408 static int bnx2x_open(struct net_device *dev)
8409 {
8410         struct bnx2x *bp = netdev_priv(dev);
8411
8412         netif_carrier_off(dev);
8413
8414         bnx2x_set_power_state(bp, PCI_D0);
8415
8416         if (!bnx2x_reset_is_done(bp)) {
8417                 do {
8418                         /* Reset MCP mail box sequence if there is on going
8419                          * recovery
8420                          */
8421                         bp->fw_seq = 0;
8422
8423                         /* If it's the first function to load and reset done
8424                          * is still not cleared it may mean that. We don't
8425                          * check the attention state here because it may have
8426                          * already been cleared by a "common" reset but we
8427                          * shell proceed with "process kill" anyway.
8428                          */
8429                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8430                                 bnx2x_trylock_hw_lock(bp,
8431                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8432                                 (!bnx2x_leader_reset(bp))) {
8433                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8434                                 break;
8435                         }
8436
8437                         bnx2x_set_power_state(bp, PCI_D3hot);
8438
8439                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8440                         " completed yet. Try again later. If u still see this"
8441                         " message after a few retries then power cycle is"
8442                         " required.\n", bp->dev->name);
8443
8444                         return -EAGAIN;
8445                 } while (0);
8446         }
8447
8448         bp->recovery_state = BNX2X_RECOVERY_DONE;
8449
8450         return bnx2x_nic_load(bp, LOAD_OPEN);
8451 }
8452
8453 /* called with rtnl_lock */
8454 static int bnx2x_close(struct net_device *dev)
8455 {
8456         struct bnx2x *bp = netdev_priv(dev);
8457
8458         /* Unload the driver, release IRQs */
8459         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8460         bnx2x_set_power_state(bp, PCI_D3hot);
8461
8462         return 0;
8463 }
8464
8465 /* called with netif_tx_lock from dev_mcast.c */
8466 void bnx2x_set_rx_mode(struct net_device *dev)
8467 {
8468         struct bnx2x *bp = netdev_priv(dev);
8469         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8470         int port = BP_PORT(bp);
8471
8472         if (bp->state != BNX2X_STATE_OPEN) {
8473                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8474                 return;
8475         }
8476
8477         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8478
8479         if (dev->flags & IFF_PROMISC)
8480                 rx_mode = BNX2X_RX_MODE_PROMISC;
8481         else if ((dev->flags & IFF_ALLMULTI) ||
8482                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8483                   CHIP_IS_E1(bp)))
8484                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8485         else { /* some multicasts */
8486                 if (CHIP_IS_E1(bp)) {
8487                         /*
8488                          * set mc list, do not wait as wait implies sleep
8489                          * and set_rx_mode can be invoked from non-sleepable
8490                          * context
8491                          */
8492                         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8493                                      BNX2X_MAX_EMUL_MULTI*(1 + port) :
8494                                      BNX2X_MAX_MULTICAST*(1 + port));
8495
8496                         bnx2x_set_e1_mc_list(bp, offset);
8497                 } else { /* E1H */
8498                         /* Accept one or more multicasts */
8499                         struct netdev_hw_addr *ha;
8500                         u32 mc_filter[MC_HASH_SIZE];
8501                         u32 crc, bit, regidx;
8502                         int i;
8503
8504                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8505
8506                         netdev_for_each_mc_addr(ha, dev) {
8507                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8508                                    bnx2x_mc_addr(ha));
8509
8510                                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8511                                                 ETH_ALEN);
8512                                 bit = (crc >> 24) & 0xff;
8513                                 regidx = bit >> 5;
8514                                 bit &= 0x1f;
8515                                 mc_filter[regidx] |= (1 << bit);
8516                         }
8517
8518                         for (i = 0; i < MC_HASH_SIZE; i++)
8519                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8520                                        mc_filter[i]);
8521                 }
8522         }
8523
8524         bp->rx_mode = rx_mode;
8525         bnx2x_set_storm_rx_mode(bp);
8526 }
8527
8528 /* called with rtnl_lock */
8529 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8530                            int devad, u16 addr)
8531 {
8532         struct bnx2x *bp = netdev_priv(netdev);
8533         u16 value;
8534         int rc;
8535
8536         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8537            prtad, devad, addr);
8538
8539         /* The HW expects different devad if CL22 is used */
8540         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8541
8542         bnx2x_acquire_phy_lock(bp);
8543         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8544         bnx2x_release_phy_lock(bp);
8545         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8546
8547         if (!rc)
8548                 rc = value;
8549         return rc;
8550 }
8551
8552 /* called with rtnl_lock */
8553 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8554                             u16 addr, u16 value)
8555 {
8556         struct bnx2x *bp = netdev_priv(netdev);
8557         int rc;
8558
8559         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8560                            " value 0x%x\n", prtad, devad, addr, value);
8561
8562         /* The HW expects different devad if CL22 is used */
8563         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8564
8565         bnx2x_acquire_phy_lock(bp);
8566         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8567         bnx2x_release_phy_lock(bp);
8568         return rc;
8569 }
8570
8571 /* called with rtnl_lock */
8572 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8573 {
8574         struct bnx2x *bp = netdev_priv(dev);
8575         struct mii_ioctl_data *mdio = if_mii(ifr);
8576
8577         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8578            mdio->phy_id, mdio->reg_num, mdio->val_in);
8579
8580         if (!netif_running(dev))
8581                 return -EAGAIN;
8582
8583         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8584 }
8585
8586 #ifdef CONFIG_NET_POLL_CONTROLLER
8587 static void poll_bnx2x(struct net_device *dev)
8588 {
8589         struct bnx2x *bp = netdev_priv(dev);
8590
8591         disable_irq(bp->pdev->irq);
8592         bnx2x_interrupt(bp->pdev->irq, dev);
8593         enable_irq(bp->pdev->irq);
8594 }
8595 #endif
8596
8597 static const struct net_device_ops bnx2x_netdev_ops = {
8598         .ndo_open               = bnx2x_open,
8599         .ndo_stop               = bnx2x_close,
8600         .ndo_start_xmit         = bnx2x_start_xmit,
8601         .ndo_set_multicast_list = bnx2x_set_rx_mode,
8602         .ndo_set_mac_address    = bnx2x_change_mac_addr,
8603         .ndo_validate_addr      = eth_validate_addr,
8604         .ndo_do_ioctl           = bnx2x_ioctl,
8605         .ndo_change_mtu         = bnx2x_change_mtu,
8606         .ndo_tx_timeout         = bnx2x_tx_timeout,
8607 #ifdef BCM_VLAN
8608         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
8609 #endif
8610 #ifdef CONFIG_NET_POLL_CONTROLLER
8611         .ndo_poll_controller    = poll_bnx2x,
8612 #endif
8613 };
8614
8615 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8616                                     struct net_device *dev)
8617 {
8618         struct bnx2x *bp;
8619         int rc;
8620
8621         SET_NETDEV_DEV(dev, &pdev->dev);
8622         bp = netdev_priv(dev);
8623
8624         bp->dev = dev;
8625         bp->pdev = pdev;
8626         bp->flags = 0;
8627         bp->pf_num = PCI_FUNC(pdev->devfn);
8628
8629         rc = pci_enable_device(pdev);
8630         if (rc) {
8631                 dev_err(&bp->pdev->dev,
8632                         "Cannot enable PCI device, aborting\n");
8633                 goto err_out;
8634         }
8635
8636         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8637                 dev_err(&bp->pdev->dev,
8638                         "Cannot find PCI device base address, aborting\n");
8639                 rc = -ENODEV;
8640                 goto err_out_disable;
8641         }
8642
8643         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8644                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8645                        " base address, aborting\n");
8646                 rc = -ENODEV;
8647                 goto err_out_disable;
8648         }
8649
8650         if (atomic_read(&pdev->enable_cnt) == 1) {
8651                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8652                 if (rc) {
8653                         dev_err(&bp->pdev->dev,
8654                                 "Cannot obtain PCI resources, aborting\n");
8655                         goto err_out_disable;
8656                 }
8657
8658                 pci_set_master(pdev);
8659                 pci_save_state(pdev);
8660         }
8661
8662         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8663         if (bp->pm_cap == 0) {
8664                 dev_err(&bp->pdev->dev,
8665                         "Cannot find power management capability, aborting\n");
8666                 rc = -EIO;
8667                 goto err_out_release;
8668         }
8669
8670         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8671         if (bp->pcie_cap == 0) {
8672                 dev_err(&bp->pdev->dev,
8673                         "Cannot find PCI Express capability, aborting\n");
8674                 rc = -EIO;
8675                 goto err_out_release;
8676         }
8677
8678         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8679                 bp->flags |= USING_DAC_FLAG;
8680                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8681                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8682                                " failed, aborting\n");
8683                         rc = -EIO;
8684                         goto err_out_release;
8685                 }
8686
8687         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8688                 dev_err(&bp->pdev->dev,
8689                         "System does not support DMA, aborting\n");
8690                 rc = -EIO;
8691                 goto err_out_release;
8692         }
8693
8694         dev->mem_start = pci_resource_start(pdev, 0);
8695         dev->base_addr = dev->mem_start;
8696         dev->mem_end = pci_resource_end(pdev, 0);
8697
8698         dev->irq = pdev->irq;
8699
8700         bp->regview = pci_ioremap_bar(pdev, 0);
8701         if (!bp->regview) {
8702                 dev_err(&bp->pdev->dev,
8703                         "Cannot map register space, aborting\n");
8704                 rc = -ENOMEM;
8705                 goto err_out_release;
8706         }
8707
8708         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8709                                         min_t(u64, BNX2X_DB_SIZE(bp),
8710                                               pci_resource_len(pdev, 2)));
8711         if (!bp->doorbells) {
8712                 dev_err(&bp->pdev->dev,
8713                         "Cannot map doorbell space, aborting\n");
8714                 rc = -ENOMEM;
8715                 goto err_out_unmap;
8716         }
8717
8718         bnx2x_set_power_state(bp, PCI_D0);
8719
8720         /* clean indirect addresses */
8721         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8722                                PCICFG_VENDOR_ID_OFFSET);
8723         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8724         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8725         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8726         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8727
8728         /* Reset the load counter */
8729         bnx2x_clear_load_cnt(bp);
8730
8731         dev->watchdog_timeo = TX_TIMEOUT;
8732
8733         dev->netdev_ops = &bnx2x_netdev_ops;
8734         bnx2x_set_ethtool_ops(dev);
8735         dev->features |= NETIF_F_SG;
8736         dev->features |= NETIF_F_HW_CSUM;
8737         if (bp->flags & USING_DAC_FLAG)
8738                 dev->features |= NETIF_F_HIGHDMA;
8739         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8740         dev->features |= NETIF_F_TSO6;
8741 #ifdef BCM_VLAN
8742         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8743         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
8744
8745         dev->vlan_features |= NETIF_F_SG;
8746         dev->vlan_features |= NETIF_F_HW_CSUM;
8747         if (bp->flags & USING_DAC_FLAG)
8748                 dev->vlan_features |= NETIF_F_HIGHDMA;
8749         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8750         dev->vlan_features |= NETIF_F_TSO6;
8751 #endif
8752
8753         /* get_port_hwinfo() will set prtad and mmds properly */
8754         bp->mdio.prtad = MDIO_PRTAD_NONE;
8755         bp->mdio.mmds = 0;
8756         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8757         bp->mdio.dev = dev;
8758         bp->mdio.mdio_read = bnx2x_mdio_read;
8759         bp->mdio.mdio_write = bnx2x_mdio_write;
8760
8761         return 0;
8762
8763 err_out_unmap:
8764         if (bp->regview) {
8765                 iounmap(bp->regview);
8766                 bp->regview = NULL;
8767         }
8768         if (bp->doorbells) {
8769                 iounmap(bp->doorbells);
8770                 bp->doorbells = NULL;
8771         }
8772
8773 err_out_release:
8774         if (atomic_read(&pdev->enable_cnt) == 1)
8775                 pci_release_regions(pdev);
8776
8777 err_out_disable:
8778         pci_disable_device(pdev);
8779         pci_set_drvdata(pdev, NULL);
8780
8781 err_out:
8782         return rc;
8783 }
8784
8785 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8786                                                  int *width, int *speed)
8787 {
8788         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8789
8790         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8791
8792         /* return value of 1=2.5GHz 2=5GHz */
8793         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8794 }
8795
8796 static int bnx2x_check_firmware(struct bnx2x *bp)
8797 {
8798         const struct firmware *firmware = bp->firmware;
8799         struct bnx2x_fw_file_hdr *fw_hdr;
8800         struct bnx2x_fw_file_section *sections;
8801         u32 offset, len, num_ops;
8802         u16 *ops_offsets;
8803         int i;
8804         const u8 *fw_ver;
8805
8806         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8807                 return -EINVAL;
8808
8809         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8810         sections = (struct bnx2x_fw_file_section *)fw_hdr;
8811
8812         /* Make sure none of the offsets and sizes make us read beyond
8813          * the end of the firmware data */
8814         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8815                 offset = be32_to_cpu(sections[i].offset);
8816                 len = be32_to_cpu(sections[i].len);
8817                 if (offset + len > firmware->size) {
8818                         dev_err(&bp->pdev->dev,
8819                                 "Section %d length is out of bounds\n", i);
8820                         return -EINVAL;
8821                 }
8822         }
8823
8824         /* Likewise for the init_ops offsets */
8825         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8826         ops_offsets = (u16 *)(firmware->data + offset);
8827         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8828
8829         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8830                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8831                         dev_err(&bp->pdev->dev,
8832                                 "Section offset %d is out of bounds\n", i);
8833                         return -EINVAL;
8834                 }
8835         }
8836
8837         /* Check FW version */
8838         offset = be32_to_cpu(fw_hdr->fw_version.offset);
8839         fw_ver = firmware->data + offset;
8840         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8841             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8842             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8843             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8844                 dev_err(&bp->pdev->dev,
8845                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8846                        fw_ver[0], fw_ver[1], fw_ver[2],
8847                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8848                        BCM_5710_FW_MINOR_VERSION,
8849                        BCM_5710_FW_REVISION_VERSION,
8850                        BCM_5710_FW_ENGINEERING_VERSION);
8851                 return -EINVAL;
8852         }
8853
8854         return 0;
8855 }
8856
8857 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8858 {
8859         const __be32 *source = (const __be32 *)_source;
8860         u32 *target = (u32 *)_target;
8861         u32 i;
8862
8863         for (i = 0; i < n/4; i++)
8864                 target[i] = be32_to_cpu(source[i]);
8865 }
8866
8867 /*
8868    Ops array is stored in the following format:
8869    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8870  */
8871 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8872 {
8873         const __be32 *source = (const __be32 *)_source;
8874         struct raw_op *target = (struct raw_op *)_target;
8875         u32 i, j, tmp;
8876
8877         for (i = 0, j = 0; i < n/8; i++, j += 2) {
8878                 tmp = be32_to_cpu(source[j]);
8879                 target[i].op = (tmp >> 24) & 0xff;
8880                 target[i].offset = tmp & 0xffffff;
8881                 target[i].raw_data = be32_to_cpu(source[j + 1]);
8882         }
8883 }
8884
8885 /**
8886  * IRO array is stored in the following format:
8887  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8888  */
8889 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8890 {
8891         const __be32 *source = (const __be32 *)_source;
8892         struct iro *target = (struct iro *)_target;
8893         u32 i, j, tmp;
8894
8895         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8896                 target[i].base = be32_to_cpu(source[j]);
8897                 j++;
8898                 tmp = be32_to_cpu(source[j]);
8899                 target[i].m1 = (tmp >> 16) & 0xffff;
8900                 target[i].m2 = tmp & 0xffff;
8901                 j++;
8902                 tmp = be32_to_cpu(source[j]);
8903                 target[i].m3 = (tmp >> 16) & 0xffff;
8904                 target[i].size = tmp & 0xffff;
8905                 j++;
8906         }
8907 }
8908
8909 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8910 {
8911         const __be16 *source = (const __be16 *)_source;
8912         u16 *target = (u16 *)_target;
8913         u32 i;
8914
8915         for (i = 0; i < n/2; i++)
8916                 target[i] = be16_to_cpu(source[i]);
8917 }
8918
8919 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
8920 do {                                                                    \
8921         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
8922         bp->arr = kmalloc(len, GFP_KERNEL);                             \
8923         if (!bp->arr) {                                                 \
8924                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8925                 goto lbl;                                               \
8926         }                                                               \
8927         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
8928              (u8 *)bp->arr, len);                                       \
8929 } while (0)
8930
8931 int bnx2x_init_firmware(struct bnx2x *bp)
8932 {
8933         const char *fw_file_name;
8934         struct bnx2x_fw_file_hdr *fw_hdr;
8935         int rc;
8936
8937         if (CHIP_IS_E1(bp))
8938                 fw_file_name = FW_FILE_NAME_E1;
8939         else if (CHIP_IS_E1H(bp))
8940                 fw_file_name = FW_FILE_NAME_E1H;
8941         else if (CHIP_IS_E2(bp))
8942                 fw_file_name = FW_FILE_NAME_E2;
8943         else {
8944                 BNX2X_ERR("Unsupported chip revision\n");
8945                 return -EINVAL;
8946         }
8947
8948         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8949
8950         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8951         if (rc) {
8952                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8953                 goto request_firmware_exit;
8954         }
8955
8956         rc = bnx2x_check_firmware(bp);
8957         if (rc) {
8958                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8959                 goto request_firmware_exit;
8960         }
8961
8962         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8963
8964         /* Initialize the pointers to the init arrays */
8965         /* Blob */
8966         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8967
8968         /* Opcodes */
8969         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8970
8971         /* Offsets */
8972         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8973                             be16_to_cpu_n);
8974
8975         /* STORMs firmware */
8976         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8977                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8978         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
8979                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8980         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8981                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8982         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
8983                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
8984         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8985                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8986         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
8987                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8988         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8989                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8990         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
8991                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
8992         /* IRO */
8993         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8994
8995         return 0;
8996
8997 iro_alloc_err:
8998         kfree(bp->init_ops_offsets);
8999 init_offsets_alloc_err:
9000         kfree(bp->init_ops);
9001 init_ops_alloc_err:
9002         kfree(bp->init_data);
9003 request_firmware_exit:
9004         release_firmware(bp->firmware);
9005
9006         return rc;
9007 }
9008
9009 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9010 {
9011         int cid_count = L2_FP_COUNT(l2_cid_count);
9012
9013 #ifdef BCM_CNIC
9014         cid_count += CNIC_CID_MAX;
9015 #endif
9016         return roundup(cid_count, QM_CID_ROUND);
9017 }
9018
9019 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9020                                     const struct pci_device_id *ent)
9021 {
9022         struct net_device *dev = NULL;
9023         struct bnx2x *bp;
9024         int pcie_width, pcie_speed;
9025         int rc, cid_count;
9026
9027         switch (ent->driver_data) {
9028         case BCM57710:
9029         case BCM57711:
9030         case BCM57711E:
9031                 cid_count = FP_SB_MAX_E1x;
9032                 break;
9033
9034         case BCM57712:
9035         case BCM57712E:
9036                 cid_count = FP_SB_MAX_E2;
9037                 break;
9038
9039         default:
9040                 pr_err("Unknown board_type (%ld), aborting\n",
9041                            ent->driver_data);
9042                 return ENODEV;
9043         }
9044
9045         cid_count += CNIC_CONTEXT_USE;
9046
9047         /* dev zeroed in init_etherdev */
9048         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9049         if (!dev) {
9050                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9051                 return -ENOMEM;
9052         }
9053
9054         bp = netdev_priv(dev);
9055         bp->msg_enable = debug;
9056
9057         pci_set_drvdata(pdev, dev);
9058
9059         bp->l2_cid_count = cid_count;
9060
9061         rc = bnx2x_init_dev(pdev, dev);
9062         if (rc < 0) {
9063                 free_netdev(dev);
9064                 return rc;
9065         }
9066
9067         rc = bnx2x_init_bp(bp);
9068         if (rc)
9069                 goto init_one_exit;
9070
9071         /* calc qm_cid_count */
9072         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9073
9074         rc = register_netdev(dev);
9075         if (rc) {
9076                 dev_err(&pdev->dev, "Cannot register net device\n");
9077                 goto init_one_exit;
9078         }
9079
9080         /* Configure interupt mode: try to enable MSI-X/MSI if
9081          * needed, set bp->num_queues appropriately.
9082          */
9083         bnx2x_set_int_mode(bp);
9084
9085         /* Add all NAPI objects */
9086         bnx2x_add_all_napi(bp);
9087
9088         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9089
9090         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9091                " IRQ %d, ", board_info[ent->driver_data].name,
9092                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9093                pcie_width,
9094                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9095                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9096                                                 "5GHz (Gen2)" : "2.5GHz",
9097                dev->base_addr, bp->pdev->irq);
9098         pr_cont("node addr %pM\n", dev->dev_addr);
9099
9100         return 0;
9101
9102 init_one_exit:
9103         if (bp->regview)
9104                 iounmap(bp->regview);
9105
9106         if (bp->doorbells)
9107                 iounmap(bp->doorbells);
9108
9109         free_netdev(dev);
9110
9111         if (atomic_read(&pdev->enable_cnt) == 1)
9112                 pci_release_regions(pdev);
9113
9114         pci_disable_device(pdev);
9115         pci_set_drvdata(pdev, NULL);
9116
9117         return rc;
9118 }
9119
9120 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9121 {
9122         struct net_device *dev = pci_get_drvdata(pdev);
9123         struct bnx2x *bp;
9124
9125         if (!dev) {
9126                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9127                 return;
9128         }
9129         bp = netdev_priv(dev);
9130
9131         unregister_netdev(dev);
9132
9133         /* Delete all NAPI objects */
9134         bnx2x_del_all_napi(bp);
9135
9136         /* Disable MSI/MSI-X */
9137         bnx2x_disable_msi(bp);
9138
9139         /* Make sure RESET task is not scheduled before continuing */
9140         cancel_delayed_work_sync(&bp->reset_task);
9141
9142         if (bp->regview)
9143                 iounmap(bp->regview);
9144
9145         if (bp->doorbells)
9146                 iounmap(bp->doorbells);
9147
9148         bnx2x_free_mem_bp(bp);
9149
9150         free_netdev(dev);
9151
9152         if (atomic_read(&pdev->enable_cnt) == 1)
9153                 pci_release_regions(pdev);
9154
9155         pci_disable_device(pdev);
9156         pci_set_drvdata(pdev, NULL);
9157 }
9158
9159 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9160 {
9161         int i;
9162
9163         bp->state = BNX2X_STATE_ERROR;
9164
9165         bp->rx_mode = BNX2X_RX_MODE_NONE;
9166
9167         bnx2x_netif_stop(bp, 0);
9168         netif_carrier_off(bp->dev);
9169
9170         del_timer_sync(&bp->timer);
9171         bp->stats_state = STATS_STATE_DISABLED;
9172         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9173
9174         /* Release IRQs */
9175         bnx2x_free_irq(bp);
9176
9177         /* Free SKBs, SGEs, TPA pool and driver internals */
9178         bnx2x_free_skbs(bp);
9179
9180         for_each_queue(bp, i)
9181                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9182
9183         bnx2x_free_mem(bp);
9184
9185         bp->state = BNX2X_STATE_CLOSED;
9186
9187         return 0;
9188 }
9189
9190 static void bnx2x_eeh_recover(struct bnx2x *bp)
9191 {
9192         u32 val;
9193
9194         mutex_init(&bp->port.phy_mutex);
9195
9196         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9197         bp->link_params.shmem_base = bp->common.shmem_base;
9198         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9199
9200         if (!bp->common.shmem_base ||
9201             (bp->common.shmem_base < 0xA0000) ||
9202             (bp->common.shmem_base >= 0xC0000)) {
9203                 BNX2X_DEV_INFO("MCP not active\n");
9204                 bp->flags |= NO_MCP_FLAG;
9205                 return;
9206         }
9207
9208         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9209         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9210                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9211                 BNX2X_ERR("BAD MCP validity signature\n");
9212
9213         if (!BP_NOMCP(bp)) {
9214                 bp->fw_seq =
9215                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9216                     DRV_MSG_SEQ_NUMBER_MASK);
9217                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9218         }
9219 }
9220
9221 /**
9222  * bnx2x_io_error_detected - called when PCI error is detected
9223  * @pdev: Pointer to PCI device
9224  * @state: The current pci connection state
9225  *
9226  * This function is called after a PCI bus error affecting
9227  * this device has been detected.
9228  */
9229 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9230                                                 pci_channel_state_t state)
9231 {
9232         struct net_device *dev = pci_get_drvdata(pdev);
9233         struct bnx2x *bp = netdev_priv(dev);
9234
9235         rtnl_lock();
9236
9237         netif_device_detach(dev);
9238
9239         if (state == pci_channel_io_perm_failure) {
9240                 rtnl_unlock();
9241                 return PCI_ERS_RESULT_DISCONNECT;
9242         }
9243
9244         if (netif_running(dev))
9245                 bnx2x_eeh_nic_unload(bp);
9246
9247         pci_disable_device(pdev);
9248
9249         rtnl_unlock();
9250
9251         /* Request a slot reset */
9252         return PCI_ERS_RESULT_NEED_RESET;
9253 }
9254
9255 /**
9256  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9257  * @pdev: Pointer to PCI device
9258  *
9259  * Restart the card from scratch, as if from a cold-boot.
9260  */
9261 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9262 {
9263         struct net_device *dev = pci_get_drvdata(pdev);
9264         struct bnx2x *bp = netdev_priv(dev);
9265
9266         rtnl_lock();
9267
9268         if (pci_enable_device(pdev)) {
9269                 dev_err(&pdev->dev,
9270                         "Cannot re-enable PCI device after reset\n");
9271                 rtnl_unlock();
9272                 return PCI_ERS_RESULT_DISCONNECT;
9273         }
9274
9275         pci_set_master(pdev);
9276         pci_restore_state(pdev);
9277
9278         if (netif_running(dev))
9279                 bnx2x_set_power_state(bp, PCI_D0);
9280
9281         rtnl_unlock();
9282
9283         return PCI_ERS_RESULT_RECOVERED;
9284 }
9285
9286 /**
9287  * bnx2x_io_resume - called when traffic can start flowing again
9288  * @pdev: Pointer to PCI device
9289  *
9290  * This callback is called when the error recovery driver tells us that
9291  * its OK to resume normal operation.
9292  */
9293 static void bnx2x_io_resume(struct pci_dev *pdev)
9294 {
9295         struct net_device *dev = pci_get_drvdata(pdev);
9296         struct bnx2x *bp = netdev_priv(dev);
9297
9298         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9299                 printk(KERN_ERR "Handling parity error recovery. "
9300                                 "Try again later\n");
9301                 return;
9302         }
9303
9304         rtnl_lock();
9305
9306         bnx2x_eeh_recover(bp);
9307
9308         if (netif_running(dev))
9309                 bnx2x_nic_load(bp, LOAD_NORMAL);
9310
9311         netif_device_attach(dev);
9312
9313         rtnl_unlock();
9314 }
9315
9316 static struct pci_error_handlers bnx2x_err_handler = {
9317         .error_detected = bnx2x_io_error_detected,
9318         .slot_reset     = bnx2x_io_slot_reset,
9319         .resume         = bnx2x_io_resume,
9320 };
9321
9322 static struct pci_driver bnx2x_pci_driver = {
9323         .name        = DRV_MODULE_NAME,
9324         .id_table    = bnx2x_pci_tbl,
9325         .probe       = bnx2x_init_one,
9326         .remove      = __devexit_p(bnx2x_remove_one),
9327         .suspend     = bnx2x_suspend,
9328         .resume      = bnx2x_resume,
9329         .err_handler = &bnx2x_err_handler,
9330 };
9331
9332 static int __init bnx2x_init(void)
9333 {
9334         int ret;
9335
9336         pr_info("%s", version);
9337
9338         bnx2x_wq = create_singlethread_workqueue("bnx2x");
9339         if (bnx2x_wq == NULL) {
9340                 pr_err("Cannot create workqueue\n");
9341                 return -ENOMEM;
9342         }
9343
9344         ret = pci_register_driver(&bnx2x_pci_driver);
9345         if (ret) {
9346                 pr_err("Cannot register driver\n");
9347                 destroy_workqueue(bnx2x_wq);
9348         }
9349         return ret;
9350 }
9351
9352 static void __exit bnx2x_cleanup(void)
9353 {
9354         pci_unregister_driver(&bnx2x_pci_driver);
9355
9356         destroy_workqueue(bnx2x_wq);
9357 }
9358
9359 module_init(bnx2x_init);
9360 module_exit(bnx2x_cleanup);
9361
9362 #ifdef BCM_CNIC
9363
9364 /* count denotes the number of new completions we have seen */
9365 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9366 {
9367         struct eth_spe *spe;
9368
9369 #ifdef BNX2X_STOP_ON_ERROR
9370         if (unlikely(bp->panic))
9371                 return;
9372 #endif
9373
9374         spin_lock_bh(&bp->spq_lock);
9375         BUG_ON(bp->cnic_spq_pending < count);
9376         bp->cnic_spq_pending -= count;
9377
9378
9379         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9380                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9381                                 & SPE_HDR_CONN_TYPE) >>
9382                                 SPE_HDR_CONN_TYPE_SHIFT;
9383
9384                 /* Set validation for iSCSI L2 client before sending SETUP
9385                  *  ramrod
9386                  */
9387                 if (type == ETH_CONNECTION_TYPE) {
9388                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9389                                              hdr.conn_and_cmd_data) >>
9390                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9391
9392                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9393                                 bnx2x_set_ctx_validation(&bp->context.
9394                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9395                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9396                 }
9397
9398                 /* There may be not more than 8 L2 and COMMON SPEs and not more
9399                  * than 8 L5 SPEs in the air.
9400                  */
9401                 if ((type == NONE_CONNECTION_TYPE) ||
9402                     (type == ETH_CONNECTION_TYPE)) {
9403                         if (!atomic_read(&bp->spq_left))
9404                                 break;
9405                         else
9406                                 atomic_dec(&bp->spq_left);
9407                 } else if (type == ISCSI_CONNECTION_TYPE) {
9408                         if (bp->cnic_spq_pending >=
9409                             bp->cnic_eth_dev.max_kwqe_pending)
9410                                 break;
9411                         else
9412                                 bp->cnic_spq_pending++;
9413                 } else {
9414                         BNX2X_ERR("Unknown SPE type: %d\n", type);
9415                         bnx2x_panic();
9416                         break;
9417                 }
9418
9419                 spe = bnx2x_sp_get_next(bp);
9420                 *spe = *bp->cnic_kwq_cons;
9421
9422                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9423                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9424
9425                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9426                         bp->cnic_kwq_cons = bp->cnic_kwq;
9427                 else
9428                         bp->cnic_kwq_cons++;
9429         }
9430         bnx2x_sp_prod_update(bp);
9431         spin_unlock_bh(&bp->spq_lock);
9432 }
9433
9434 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9435                                struct kwqe_16 *kwqes[], u32 count)
9436 {
9437         struct bnx2x *bp = netdev_priv(dev);
9438         int i;
9439
9440 #ifdef BNX2X_STOP_ON_ERROR
9441         if (unlikely(bp->panic))
9442                 return -EIO;
9443 #endif
9444
9445         spin_lock_bh(&bp->spq_lock);
9446
9447         for (i = 0; i < count; i++) {
9448                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9449
9450                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9451                         break;
9452
9453                 *bp->cnic_kwq_prod = *spe;
9454
9455                 bp->cnic_kwq_pending++;
9456
9457                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9458                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
9459                    spe->data.update_data_addr.hi,
9460                    spe->data.update_data_addr.lo,
9461                    bp->cnic_kwq_pending);
9462
9463                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9464                         bp->cnic_kwq_prod = bp->cnic_kwq;
9465                 else
9466                         bp->cnic_kwq_prod++;
9467         }
9468
9469         spin_unlock_bh(&bp->spq_lock);
9470
9471         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9472                 bnx2x_cnic_sp_post(bp, 0);
9473
9474         return i;
9475 }
9476
9477 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9478 {
9479         struct cnic_ops *c_ops;
9480         int rc = 0;
9481
9482         mutex_lock(&bp->cnic_mutex);
9483         c_ops = bp->cnic_ops;
9484         if (c_ops)
9485                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9486         mutex_unlock(&bp->cnic_mutex);
9487
9488         return rc;
9489 }
9490
9491 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9492 {
9493         struct cnic_ops *c_ops;
9494         int rc = 0;
9495
9496         rcu_read_lock();
9497         c_ops = rcu_dereference(bp->cnic_ops);
9498         if (c_ops)
9499                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9500         rcu_read_unlock();
9501
9502         return rc;
9503 }
9504
9505 /*
9506  * for commands that have no data
9507  */
9508 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9509 {
9510         struct cnic_ctl_info ctl = {0};
9511
9512         ctl.cmd = cmd;
9513
9514         return bnx2x_cnic_ctl_send(bp, &ctl);
9515 }
9516
9517 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9518 {
9519         struct cnic_ctl_info ctl;
9520
9521         /* first we tell CNIC and only then we count this as a completion */
9522         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9523         ctl.data.comp.cid = cid;
9524
9525         bnx2x_cnic_ctl_send_bh(bp, &ctl);
9526         bnx2x_cnic_sp_post(bp, 0);
9527 }
9528
9529 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9530 {
9531         struct bnx2x *bp = netdev_priv(dev);
9532         int rc = 0;
9533
9534         switch (ctl->cmd) {
9535         case DRV_CTL_CTXTBL_WR_CMD: {
9536                 u32 index = ctl->data.io.offset;
9537                 dma_addr_t addr = ctl->data.io.dma_addr;
9538
9539                 bnx2x_ilt_wr(bp, index, addr);
9540                 break;
9541         }
9542
9543         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9544                 int count = ctl->data.credit.credit_count;
9545
9546                 bnx2x_cnic_sp_post(bp, count);
9547                 break;
9548         }
9549
9550         /* rtnl_lock is held.  */
9551         case DRV_CTL_START_L2_CMD: {
9552                 u32 cli = ctl->data.ring.client_id;
9553
9554                 /* Set iSCSI MAC address */
9555                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9556
9557                 mmiowb();
9558                 barrier();
9559
9560                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9561                  * because it's the only way for UIO Client to accept
9562                  * multicasts (in non-promiscuous mode only one Client per
9563                  * function will receive multicast packets (leading in our
9564                  * case).
9565                  */
9566                 bnx2x_rxq_set_mac_filters(bp, cli,
9567                         BNX2X_ACCEPT_UNICAST |
9568                         BNX2X_ACCEPT_BROADCAST |
9569                         BNX2X_ACCEPT_ALL_MULTICAST);
9570                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9571
9572                 break;
9573         }
9574
9575         /* rtnl_lock is held.  */
9576         case DRV_CTL_STOP_L2_CMD: {
9577                 u32 cli = ctl->data.ring.client_id;
9578
9579                 /* Stop accepting on iSCSI L2 ring */
9580                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9581                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9582
9583                 mmiowb();
9584                 barrier();
9585
9586                 /* Unset iSCSI L2 MAC */
9587                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9588                 break;
9589         }
9590         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9591                 int count = ctl->data.credit.credit_count;
9592
9593                 smp_mb__before_atomic_inc();
9594                 atomic_add(count, &bp->spq_left);
9595                 smp_mb__after_atomic_inc();
9596                 break;
9597         }
9598
9599         default:
9600                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9601                 rc = -EINVAL;
9602         }
9603
9604         return rc;
9605 }
9606
9607 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9608 {
9609         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9610
9611         if (bp->flags & USING_MSIX_FLAG) {
9612                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9613                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9614                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9615         } else {
9616                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9617                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9618         }
9619         if (CHIP_IS_E2(bp))
9620                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9621         else
9622                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9623
9624         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9625         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9626         cp->irq_arr[1].status_blk = bp->def_status_blk;
9627         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9628         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9629
9630         cp->num_irq = 2;
9631 }
9632
9633 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9634                                void *data)
9635 {
9636         struct bnx2x *bp = netdev_priv(dev);
9637         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9638
9639         if (ops == NULL)
9640                 return -EINVAL;
9641
9642         if (atomic_read(&bp->intr_sem) != 0)
9643                 return -EBUSY;
9644
9645         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9646         if (!bp->cnic_kwq)
9647                 return -ENOMEM;
9648
9649         bp->cnic_kwq_cons = bp->cnic_kwq;
9650         bp->cnic_kwq_prod = bp->cnic_kwq;
9651         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9652
9653         bp->cnic_spq_pending = 0;
9654         bp->cnic_kwq_pending = 0;
9655
9656         bp->cnic_data = data;
9657
9658         cp->num_irq = 0;
9659         cp->drv_state = CNIC_DRV_STATE_REGD;
9660         cp->iro_arr = bp->iro_arr;
9661
9662         bnx2x_setup_cnic_irq_info(bp);
9663
9664         rcu_assign_pointer(bp->cnic_ops, ops);
9665
9666         return 0;
9667 }
9668
9669 static int bnx2x_unregister_cnic(struct net_device *dev)
9670 {
9671         struct bnx2x *bp = netdev_priv(dev);
9672         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9673
9674         mutex_lock(&bp->cnic_mutex);
9675         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9676                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9677                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9678         }
9679         cp->drv_state = 0;
9680         rcu_assign_pointer(bp->cnic_ops, NULL);
9681         mutex_unlock(&bp->cnic_mutex);
9682         synchronize_rcu();
9683         kfree(bp->cnic_kwq);
9684         bp->cnic_kwq = NULL;
9685
9686         return 0;
9687 }
9688
9689 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9690 {
9691         struct bnx2x *bp = netdev_priv(dev);
9692         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9693
9694         cp->drv_owner = THIS_MODULE;
9695         cp->chip_id = CHIP_ID(bp);
9696         cp->pdev = bp->pdev;
9697         cp->io_base = bp->regview;
9698         cp->io_base2 = bp->doorbells;
9699         cp->max_kwqe_pending = 8;
9700         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9701         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9702                              bnx2x_cid_ilt_lines(bp);
9703         cp->ctx_tbl_len = CNIC_ILT_LINES;
9704         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
9705         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9706         cp->drv_ctl = bnx2x_drv_ctl;
9707         cp->drv_register_cnic = bnx2x_register_cnic;
9708         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9709         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9710         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9711
9712         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9713                          "starting cid %d\n",
9714            cp->ctx_blk_size,
9715            cp->ctx_tbl_offset,
9716            cp->ctx_tbl_len,
9717            cp->starting_cid);
9718         return cp;
9719 }
9720 EXPORT_SYMBOL(bnx2x_cnic_probe);
9721
9722 #endif /* BCM_CNIC */
9723