]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge branch 'master' of ssh://ra.kernel.org/pub/scm/linux/kernel/git/linville/wirele...
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 /* Is BE in a multi-channel mode */
118 static inline bool be_is_mc(struct be_adapter *adapter) {
119         return (adapter->function_mode & FLEX10_MODE ||
120                 adapter->function_mode & VNIC_MODE ||
121                 adapter->function_mode & UMC_ENABLED);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u32 reg, enabled;
152
153         if (adapter->eeh_err)
154                 return;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                                 &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_RQ_RING_ID_MASK;
175         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_RQ_OFFSET);
179 }
180
181 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
182 {
183         u32 val = 0;
184         val |= qid & DB_TXULP_RING_ID_MASK;
185         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
186
187         wmb();
188         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
189 }
190
191 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
192                 bool arm, bool clear_int, u16 num_popped)
193 {
194         u32 val = 0;
195         val |= qid & DB_EQ_RING_ID_MASK;
196         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
197                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
198
199         if (adapter->eeh_err)
200                 return;
201
202         if (arm)
203                 val |= 1 << DB_EQ_REARM_SHIFT;
204         if (clear_int)
205                 val |= 1 << DB_EQ_CLR_SHIFT;
206         val |= 1 << DB_EQ_EVNT_SHIFT;
207         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
208         iowrite32(val, adapter->db + DB_EQ_OFFSET);
209 }
210
211 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
212 {
213         u32 val = 0;
214         val |= qid & DB_CQ_RING_ID_MASK;
215         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
216                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
217
218         if (adapter->eeh_err)
219                 return;
220
221         if (arm)
222                 val |= 1 << DB_CQ_REARM_SHIFT;
223         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_CQ_OFFSET);
225 }
226
227 static int be_mac_addr_set(struct net_device *netdev, void *p)
228 {
229         struct be_adapter *adapter = netdev_priv(netdev);
230         struct sockaddr *addr = p;
231         int status = 0;
232         u8 current_mac[ETH_ALEN];
233         u32 pmac_id = adapter->pmac_id;
234
235         if (!is_valid_ether_addr(addr->sa_data))
236                 return -EADDRNOTAVAIL;
237
238         status = be_cmd_mac_addr_query(adapter, current_mac,
239                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
240         if (status)
241                 goto err;
242
243         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
244                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246                 if (status)
247                         goto err;
248
249                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
250         }
251         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
252         return 0;
253 err:
254         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
255         return status;
256 }
257
258 static void populate_be2_stats(struct be_adapter *adapter)
259 {
260         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
261         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
262         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
263         struct be_port_rxf_stats_v0 *port_stats =
264                                         &rxf_stats->port[adapter->port_num];
265         struct be_drv_stats *drvs = &adapter->drv_stats;
266
267         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
268         drvs->rx_pause_frames = port_stats->rx_pause_frames;
269         drvs->rx_crc_errors = port_stats->rx_crc_errors;
270         drvs->rx_control_frames = port_stats->rx_control_frames;
271         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
272         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
273         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
274         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
275         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
276         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
277         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
278         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
279         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
280         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
281         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
282         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
283         drvs->rx_dropped_header_too_small =
284                 port_stats->rx_dropped_header_too_small;
285         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
286         drvs->rx_alignment_symbol_errors =
287                 port_stats->rx_alignment_symbol_errors;
288
289         drvs->tx_pauseframes = port_stats->tx_pauseframes;
290         drvs->tx_controlframes = port_stats->tx_controlframes;
291
292         if (adapter->port_num)
293                 drvs->jabber_events = rxf_stats->port1_jabber_events;
294         else
295                 drvs->jabber_events = rxf_stats->port0_jabber_events;
296         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
297         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
298         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
299         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
300         drvs->forwarded_packets = rxf_stats->forwarded_packets;
301         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
302         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
303         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
304         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
305 }
306
307 static void populate_be3_stats(struct be_adapter *adapter)
308 {
309         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
310         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
311         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
312         struct be_port_rxf_stats_v1 *port_stats =
313                                         &rxf_stats->port[adapter->port_num];
314         struct be_drv_stats *drvs = &adapter->drv_stats;
315
316         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
317         drvs->rx_pause_frames = port_stats->rx_pause_frames;
318         drvs->rx_crc_errors = port_stats->rx_crc_errors;
319         drvs->rx_control_frames = port_stats->rx_control_frames;
320         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330         drvs->rx_dropped_header_too_small =
331                 port_stats->rx_dropped_header_too_small;
332         drvs->rx_input_fifo_overflow_drop =
333                 port_stats->rx_input_fifo_overflow_drop;
334         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
335         drvs->rx_alignment_symbol_errors =
336                 port_stats->rx_alignment_symbol_errors;
337         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
338         drvs->tx_pauseframes = port_stats->tx_pauseframes;
339         drvs->tx_controlframes = port_stats->tx_controlframes;
340         drvs->jabber_events = port_stats->jabber_events;
341         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345         drvs->forwarded_packets = rxf_stats->forwarded_packets;
346         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
348         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
349         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350 }
351
352 static void populate_lancer_stats(struct be_adapter *adapter)
353 {
354
355         struct be_drv_stats *drvs = &adapter->drv_stats;
356         struct lancer_pport_stats *pport_stats =
357                                         pport_stats_from_cmd(adapter);
358
359         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
360         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
361         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
362         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
363         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
364         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
365         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
366         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
367         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
368         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
369         drvs->rx_dropped_tcp_length =
370                                 pport_stats->rx_dropped_invalid_tcp_length;
371         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
372         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
373         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
374         drvs->rx_dropped_header_too_small =
375                                 pport_stats->rx_dropped_header_too_small;
376         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
377         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
378         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
379         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
380         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
381         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
382         drvs->jabber_events = pport_stats->rx_jabbers;
383         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
384         drvs->forwarded_packets = pport_stats->num_forwards_lo;
385         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
386         drvs->rx_drops_too_many_frags =
387                                 pport_stats->rx_drops_too_many_frags_lo;
388 }
389
390 static void accumulate_16bit_val(u32 *acc, u16 val)
391 {
392 #define lo(x)                   (x & 0xFFFF)
393 #define hi(x)                   (x & 0xFFFF0000)
394         bool wrapped = val < lo(*acc);
395         u32 newacc = hi(*acc) + val;
396
397         if (wrapped)
398                 newacc += 65536;
399         ACCESS_ONCE(*acc) = newacc;
400 }
401
402 void be_parse_stats(struct be_adapter *adapter)
403 {
404         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
405         struct be_rx_obj *rxo;
406         int i;
407
408         if (adapter->generation == BE_GEN3) {
409                 if (lancer_chip(adapter))
410                         populate_lancer_stats(adapter);
411                  else
412                         populate_be3_stats(adapter);
413         } else {
414                 populate_be2_stats(adapter);
415         }
416
417         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
418         for_all_rx_queues(adapter, rxo, i) {
419                 /* below erx HW counter can actually wrap around after
420                  * 65535. Driver accumulates a 32-bit value
421                  */
422                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
423                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
424         }
425 }
426
427 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
428                                         struct rtnl_link_stats64 *stats)
429 {
430         struct be_adapter *adapter = netdev_priv(netdev);
431         struct be_drv_stats *drvs = &adapter->drv_stats;
432         struct be_rx_obj *rxo;
433         struct be_tx_obj *txo;
434         u64 pkts, bytes;
435         unsigned int start;
436         int i;
437
438         for_all_rx_queues(adapter, rxo, i) {
439                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
440                 do {
441                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
442                         pkts = rx_stats(rxo)->rx_pkts;
443                         bytes = rx_stats(rxo)->rx_bytes;
444                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
445                 stats->rx_packets += pkts;
446                 stats->rx_bytes += bytes;
447                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
448                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
449                                         rx_stats(rxo)->rx_drops_no_frags;
450         }
451
452         for_all_tx_queues(adapter, txo, i) {
453                 const struct be_tx_stats *tx_stats = tx_stats(txo);
454                 do {
455                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
456                         pkts = tx_stats(txo)->tx_pkts;
457                         bytes = tx_stats(txo)->tx_bytes;
458                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
459                 stats->tx_packets += pkts;
460                 stats->tx_bytes += bytes;
461         }
462
463         /* bad pkts received */
464         stats->rx_errors = drvs->rx_crc_errors +
465                 drvs->rx_alignment_symbol_errors +
466                 drvs->rx_in_range_errors +
467                 drvs->rx_out_range_errors +
468                 drvs->rx_frame_too_long +
469                 drvs->rx_dropped_too_small +
470                 drvs->rx_dropped_too_short +
471                 drvs->rx_dropped_header_too_small +
472                 drvs->rx_dropped_tcp_length +
473                 drvs->rx_dropped_runt;
474
475         /* detailed rx errors */
476         stats->rx_length_errors = drvs->rx_in_range_errors +
477                 drvs->rx_out_range_errors +
478                 drvs->rx_frame_too_long;
479
480         stats->rx_crc_errors = drvs->rx_crc_errors;
481
482         /* frame alignment errors */
483         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
484
485         /* receiver fifo overrun */
486         /* drops_no_pbuf is no per i/f, it's per BE card */
487         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488                                 drvs->rx_input_fifo_overflow_drop +
489                                 drvs->rx_drops_no_pbuf;
490         return stats;
491 }
492
493 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
494 {
495         struct net_device *netdev = adapter->netdev;
496
497         /* when link status changes, link speed must be re-queried from card */
498         adapter->link_speed = -1;
499         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
500                 netif_carrier_on(netdev);
501                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
502         } else {
503                 netif_carrier_off(netdev);
504                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
505         }
506 }
507
508 static void be_tx_stats_update(struct be_tx_obj *txo,
509                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
510 {
511         struct be_tx_stats *stats = tx_stats(txo);
512
513         u64_stats_update_begin(&stats->sync);
514         stats->tx_reqs++;
515         stats->tx_wrbs += wrb_cnt;
516         stats->tx_bytes += copied;
517         stats->tx_pkts += (gso_segs ? gso_segs : 1);
518         if (stopped)
519                 stats->tx_stops++;
520         u64_stats_update_end(&stats->sync);
521 }
522
523 /* Determine number of WRB entries needed to xmit data in an skb */
524 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
525                                                                 bool *dummy)
526 {
527         int cnt = (skb->len > skb->data_len);
528
529         cnt += skb_shinfo(skb)->nr_frags;
530
531         /* to account for hdr wrb */
532         cnt++;
533         if (lancer_chip(adapter) || !(cnt & 1)) {
534                 *dummy = false;
535         } else {
536                 /* add a dummy to make it an even num */
537                 cnt++;
538                 *dummy = true;
539         }
540         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
541         return cnt;
542 }
543
544 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
545 {
546         wrb->frag_pa_hi = upper_32_bits(addr);
547         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
548         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
549 }
550
551 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
552                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
553 {
554         u8 vlan_prio = 0;
555         u16 vlan_tag = 0;
556
557         memset(hdr, 0, sizeof(*hdr));
558
559         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
560
561         if (skb_is_gso(skb)) {
562                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
563                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
564                         hdr, skb_shinfo(skb)->gso_size);
565                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
566                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
567                 if (lancer_chip(adapter) && adapter->sli_family  ==
568                                                         LANCER_A0_SLI_FAMILY) {
569                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
570                         if (is_tcp_pkt(skb))
571                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
572                                                                 tcpcs, hdr, 1);
573                         else if (is_udp_pkt(skb))
574                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
575                                                                 udpcs, hdr, 1);
576                 }
577         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
578                 if (is_tcp_pkt(skb))
579                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
580                 else if (is_udp_pkt(skb))
581                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
582         }
583
584         if (vlan_tx_tag_present(skb)) {
585                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
586                 vlan_tag = vlan_tx_tag_get(skb);
587                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
588                 /* If vlan priority provided by OS is NOT in available bmap */
589                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
590                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
591                                         adapter->recommended_prio;
592                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
593         }
594
595         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
596         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
599 }
600
601 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
602                 bool unmap_single)
603 {
604         dma_addr_t dma;
605
606         be_dws_le_to_cpu(wrb, sizeof(*wrb));
607
608         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
609         if (wrb->frag_len) {
610                 if (unmap_single)
611                         dma_unmap_single(dev, dma, wrb->frag_len,
612                                          DMA_TO_DEVICE);
613                 else
614                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
615         }
616 }
617
618 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
619                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
620 {
621         dma_addr_t busaddr;
622         int i, copied = 0;
623         struct device *dev = &adapter->pdev->dev;
624         struct sk_buff *first_skb = skb;
625         struct be_eth_wrb *wrb;
626         struct be_eth_hdr_wrb *hdr;
627         bool map_single = false;
628         u16 map_head;
629
630         hdr = queue_head_node(txq);
631         queue_head_inc(txq);
632         map_head = txq->head;
633
634         if (skb->len > skb->data_len) {
635                 int len = skb_headlen(skb);
636                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
637                 if (dma_mapping_error(dev, busaddr))
638                         goto dma_err;
639                 map_single = true;
640                 wrb = queue_head_node(txq);
641                 wrb_fill(wrb, busaddr, len);
642                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
643                 queue_head_inc(txq);
644                 copied += len;
645         }
646
647         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
648                 const struct skb_frag_struct *frag =
649                         &skb_shinfo(skb)->frags[i];
650                 busaddr = skb_frag_dma_map(dev, frag, 0,
651                                            skb_frag_size(frag), DMA_TO_DEVICE);
652                 if (dma_mapping_error(dev, busaddr))
653                         goto dma_err;
654                 wrb = queue_head_node(txq);
655                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
656                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
657                 queue_head_inc(txq);
658                 copied += skb_frag_size(frag);
659         }
660
661         if (dummy_wrb) {
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, 0, 0);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666         }
667
668         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
669         be_dws_cpu_to_le(hdr, sizeof(*hdr));
670
671         return copied;
672 dma_err:
673         txq->head = map_head;
674         while (copied) {
675                 wrb = queue_head_node(txq);
676                 unmap_tx_frag(dev, wrb, map_single);
677                 map_single = false;
678                 copied -= wrb->frag_len;
679                 queue_head_inc(txq);
680         }
681         return 0;
682 }
683
684 static netdev_tx_t be_xmit(struct sk_buff *skb,
685                         struct net_device *netdev)
686 {
687         struct be_adapter *adapter = netdev_priv(netdev);
688         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
689         struct be_queue_info *txq = &txo->q;
690         u32 wrb_cnt = 0, copied = 0;
691         u32 start = txq->head;
692         bool dummy_wrb, stopped = false;
693
694         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
695
696         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
697         if (copied) {
698                 /* record the sent skb in the sent_skb table */
699                 BUG_ON(txo->sent_skb_list[start]);
700                 txo->sent_skb_list[start] = skb;
701
702                 /* Ensure txq has space for the next skb; Else stop the queue
703                  * *BEFORE* ringing the tx doorbell, so that we serialze the
704                  * tx compls of the current transmit which'll wake up the queue
705                  */
706                 atomic_add(wrb_cnt, &txq->used);
707                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
708                                                                 txq->len) {
709                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
710                         stopped = true;
711                 }
712
713                 be_txq_notify(adapter, txq->id, wrb_cnt);
714
715                 be_tx_stats_update(txo, wrb_cnt, copied,
716                                 skb_shinfo(skb)->gso_segs, stopped);
717         } else {
718                 txq->head = start;
719                 dev_kfree_skb_any(skb);
720         }
721         return NETDEV_TX_OK;
722 }
723
724 static int be_change_mtu(struct net_device *netdev, int new_mtu)
725 {
726         struct be_adapter *adapter = netdev_priv(netdev);
727         if (new_mtu < BE_MIN_MTU ||
728                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
729                                         (ETH_HLEN + ETH_FCS_LEN))) {
730                 dev_info(&adapter->pdev->dev,
731                         "MTU must be between %d and %d bytes\n",
732                         BE_MIN_MTU,
733                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
734                 return -EINVAL;
735         }
736         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
737                         netdev->mtu, new_mtu);
738         netdev->mtu = new_mtu;
739         return 0;
740 }
741
742 /*
743  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
744  * If the user configures more, place BE in vlan promiscuous mode.
745  */
746 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
747 {
748         u16 vtag[BE_NUM_VLANS_SUPPORTED];
749         u16 ntags = 0, i;
750         int status = 0;
751         u32 if_handle;
752
753         if (vf) {
754                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
755                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
756                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
757         }
758
759         /* No need to further configure vids if in promiscuous mode */
760         if (adapter->promiscuous)
761                 return 0;
762
763         if (adapter->vlans_added <= adapter->max_vlans)  {
764                 /* Construct VLAN Table to give to HW */
765                 for (i = 0; i < VLAN_N_VID; i++) {
766                         if (adapter->vlan_tag[i]) {
767                                 vtag[ntags] = cpu_to_le16(i);
768                                 ntags++;
769                         }
770                 }
771                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
772                                         vtag, ntags, 1, 0);
773         } else {
774                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
775                                         NULL, 0, 1, 1);
776         }
777
778         return status;
779 }
780
781 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784
785         adapter->vlans_added++;
786         if (!be_physfn(adapter))
787                 return;
788
789         adapter->vlan_tag[vid] = 1;
790         if (adapter->vlans_added <= (adapter->max_vlans + 1))
791                 be_vid_config(adapter, false, 0);
792 }
793
794 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
795 {
796         struct be_adapter *adapter = netdev_priv(netdev);
797
798         adapter->vlans_added--;
799
800         if (!be_physfn(adapter))
801                 return;
802
803         adapter->vlan_tag[vid] = 0;
804         if (adapter->vlans_added <= adapter->max_vlans)
805                 be_vid_config(adapter, false, 0);
806 }
807
808 static void be_set_rx_mode(struct net_device *netdev)
809 {
810         struct be_adapter *adapter = netdev_priv(netdev);
811
812         if (netdev->flags & IFF_PROMISC) {
813                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
814                 adapter->promiscuous = true;
815                 goto done;
816         }
817
818         /* BE was previously in promiscuous mode; disable it */
819         if (adapter->promiscuous) {
820                 adapter->promiscuous = false;
821                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
822
823                 if (adapter->vlans_added)
824                         be_vid_config(adapter, false, 0);
825         }
826
827         /* Enable multicast promisc if num configured exceeds what we support */
828         if (netdev->flags & IFF_ALLMULTI ||
829                         netdev_mc_count(netdev) > BE_MAX_MC) {
830                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
831                 goto done;
832         }
833
834         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
835 done:
836         return;
837 }
838
839 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
840 {
841         struct be_adapter *adapter = netdev_priv(netdev);
842         int status;
843
844         if (!adapter->sriov_enabled)
845                 return -EPERM;
846
847         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
848                 return -EINVAL;
849
850         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
851                 status = be_cmd_pmac_del(adapter,
852                                         adapter->vf_cfg[vf].vf_if_handle,
853                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
854
855         status = be_cmd_pmac_add(adapter, mac,
856                                 adapter->vf_cfg[vf].vf_if_handle,
857                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
858
859         if (status)
860                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
861                                 mac, vf);
862         else
863                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
864
865         return status;
866 }
867
868 static int be_get_vf_config(struct net_device *netdev, int vf,
869                         struct ifla_vf_info *vi)
870 {
871         struct be_adapter *adapter = netdev_priv(netdev);
872
873         if (!adapter->sriov_enabled)
874                 return -EPERM;
875
876         if (vf >= num_vfs)
877                 return -EINVAL;
878
879         vi->vf = vf;
880         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
881         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
882         vi->qos = 0;
883         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
884
885         return 0;
886 }
887
888 static int be_set_vf_vlan(struct net_device *netdev,
889                         int vf, u16 vlan, u8 qos)
890 {
891         struct be_adapter *adapter = netdev_priv(netdev);
892         int status = 0;
893
894         if (!adapter->sriov_enabled)
895                 return -EPERM;
896
897         if ((vf >= num_vfs) || (vlan > 4095))
898                 return -EINVAL;
899
900         if (vlan) {
901                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
902                 adapter->vlans_added++;
903         } else {
904                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
905                 adapter->vlans_added--;
906         }
907
908         status = be_vid_config(adapter, true, vf);
909
910         if (status)
911                 dev_info(&adapter->pdev->dev,
912                                 "VLAN %d config on VF %d failed\n", vlan, vf);
913         return status;
914 }
915
916 static int be_set_vf_tx_rate(struct net_device *netdev,
917                         int vf, int rate)
918 {
919         struct be_adapter *adapter = netdev_priv(netdev);
920         int status = 0;
921
922         if (!adapter->sriov_enabled)
923                 return -EPERM;
924
925         if ((vf >= num_vfs) || (rate < 0))
926                 return -EINVAL;
927
928         if (rate > 10000)
929                 rate = 10000;
930
931         adapter->vf_cfg[vf].vf_tx_rate = rate;
932         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
933
934         if (status)
935                 dev_info(&adapter->pdev->dev,
936                                 "tx rate %d on VF %d failed\n", rate, vf);
937         return status;
938 }
939
940 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
941 {
942         struct be_eq_obj *rx_eq = &rxo->rx_eq;
943         struct be_rx_stats *stats = rx_stats(rxo);
944         ulong now = jiffies;
945         ulong delta = now - stats->rx_jiffies;
946         u64 pkts;
947         unsigned int start, eqd;
948
949         if (!rx_eq->enable_aic)
950                 return;
951
952         /* Wrapped around */
953         if (time_before(now, stats->rx_jiffies)) {
954                 stats->rx_jiffies = now;
955                 return;
956         }
957
958         /* Update once a second */
959         if (delta < HZ)
960                 return;
961
962         do {
963                 start = u64_stats_fetch_begin_bh(&stats->sync);
964                 pkts = stats->rx_pkts;
965         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
966
967         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
968         stats->rx_pkts_prev = pkts;
969         stats->rx_jiffies = now;
970         eqd = stats->rx_pps / 110000;
971         eqd = eqd << 3;
972         if (eqd > rx_eq->max_eqd)
973                 eqd = rx_eq->max_eqd;
974         if (eqd < rx_eq->min_eqd)
975                 eqd = rx_eq->min_eqd;
976         if (eqd < 10)
977                 eqd = 0;
978         if (eqd != rx_eq->cur_eqd) {
979                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
980                 rx_eq->cur_eqd = eqd;
981         }
982 }
983
984 static void be_rx_stats_update(struct be_rx_obj *rxo,
985                 struct be_rx_compl_info *rxcp)
986 {
987         struct be_rx_stats *stats = rx_stats(rxo);
988
989         u64_stats_update_begin(&stats->sync);
990         stats->rx_compl++;
991         stats->rx_bytes += rxcp->pkt_size;
992         stats->rx_pkts++;
993         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
994                 stats->rx_mcast_pkts++;
995         if (rxcp->err)
996                 stats->rx_compl_err++;
997         u64_stats_update_end(&stats->sync);
998 }
999
1000 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1001 {
1002         /* L4 checksum is not reliable for non TCP/UDP packets.
1003          * Also ignore ipcksm for ipv6 pkts */
1004         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1005                                 (rxcp->ip_csum || rxcp->ipv6);
1006 }
1007
1008 static struct be_rx_page_info *
1009 get_rx_page_info(struct be_adapter *adapter,
1010                 struct be_rx_obj *rxo,
1011                 u16 frag_idx)
1012 {
1013         struct be_rx_page_info *rx_page_info;
1014         struct be_queue_info *rxq = &rxo->q;
1015
1016         rx_page_info = &rxo->page_info_tbl[frag_idx];
1017         BUG_ON(!rx_page_info->page);
1018
1019         if (rx_page_info->last_page_user) {
1020                 dma_unmap_page(&adapter->pdev->dev,
1021                                dma_unmap_addr(rx_page_info, bus),
1022                                adapter->big_page_size, DMA_FROM_DEVICE);
1023                 rx_page_info->last_page_user = false;
1024         }
1025
1026         atomic_dec(&rxq->used);
1027         return rx_page_info;
1028 }
1029
1030 /* Throwaway the data in the Rx completion */
1031 static void be_rx_compl_discard(struct be_adapter *adapter,
1032                 struct be_rx_obj *rxo,
1033                 struct be_rx_compl_info *rxcp)
1034 {
1035         struct be_queue_info *rxq = &rxo->q;
1036         struct be_rx_page_info *page_info;
1037         u16 i, num_rcvd = rxcp->num_rcvd;
1038
1039         for (i = 0; i < num_rcvd; i++) {
1040                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1041                 put_page(page_info->page);
1042                 memset(page_info, 0, sizeof(*page_info));
1043                 index_inc(&rxcp->rxq_idx, rxq->len);
1044         }
1045 }
1046
1047 /*
1048  * skb_fill_rx_data forms a complete skb for an ether frame
1049  * indicated by rxcp.
1050  */
1051 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1052                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1053 {
1054         struct be_queue_info *rxq = &rxo->q;
1055         struct be_rx_page_info *page_info;
1056         u16 i, j;
1057         u16 hdr_len, curr_frag_len, remaining;
1058         u8 *start;
1059
1060         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1061         start = page_address(page_info->page) + page_info->page_offset;
1062         prefetch(start);
1063
1064         /* Copy data in the first descriptor of this completion */
1065         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1066
1067         /* Copy the header portion into skb_data */
1068         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1069         memcpy(skb->data, start, hdr_len);
1070         skb->len = curr_frag_len;
1071         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1072                 /* Complete packet has now been moved to data */
1073                 put_page(page_info->page);
1074                 skb->data_len = 0;
1075                 skb->tail += curr_frag_len;
1076         } else {
1077                 skb_shinfo(skb)->nr_frags = 1;
1078                 skb_frag_set_page(skb, 0, page_info->page);
1079                 skb_shinfo(skb)->frags[0].page_offset =
1080                                         page_info->page_offset + hdr_len;
1081                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1082                 skb->data_len = curr_frag_len - hdr_len;
1083                 skb->truesize += rx_frag_size;
1084                 skb->tail += hdr_len;
1085         }
1086         page_info->page = NULL;
1087
1088         if (rxcp->pkt_size <= rx_frag_size) {
1089                 BUG_ON(rxcp->num_rcvd != 1);
1090                 return;
1091         }
1092
1093         /* More frags present for this completion */
1094         index_inc(&rxcp->rxq_idx, rxq->len);
1095         remaining = rxcp->pkt_size - curr_frag_len;
1096         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1097                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1098                 curr_frag_len = min(remaining, rx_frag_size);
1099
1100                 /* Coalesce all frags from the same physical page in one slot */
1101                 if (page_info->page_offset == 0) {
1102                         /* Fresh page */
1103                         j++;
1104                         skb_frag_set_page(skb, j, page_info->page);
1105                         skb_shinfo(skb)->frags[j].page_offset =
1106                                                         page_info->page_offset;
1107                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1108                         skb_shinfo(skb)->nr_frags++;
1109                 } else {
1110                         put_page(page_info->page);
1111                 }
1112
1113                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1114                 skb->len += curr_frag_len;
1115                 skb->data_len += curr_frag_len;
1116                 skb->truesize += rx_frag_size;
1117                 remaining -= curr_frag_len;
1118                 index_inc(&rxcp->rxq_idx, rxq->len);
1119                 page_info->page = NULL;
1120         }
1121         BUG_ON(j > MAX_SKB_FRAGS);
1122 }
1123
1124 /* Process the RX completion indicated by rxcp when GRO is disabled */
1125 static void be_rx_compl_process(struct be_adapter *adapter,
1126                         struct be_rx_obj *rxo,
1127                         struct be_rx_compl_info *rxcp)
1128 {
1129         struct net_device *netdev = adapter->netdev;
1130         struct sk_buff *skb;
1131
1132         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1133         if (unlikely(!skb)) {
1134                 rx_stats(rxo)->rx_drops_no_skbs++;
1135                 be_rx_compl_discard(adapter, rxo, rxcp);
1136                 return;
1137         }
1138
1139         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1140
1141         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1142                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1143         else
1144                 skb_checksum_none_assert(skb);
1145
1146         skb->protocol = eth_type_trans(skb, netdev);
1147         if (adapter->netdev->features & NETIF_F_RXHASH)
1148                 skb->rxhash = rxcp->rss_hash;
1149
1150
1151         if (rxcp->vlanf)
1152                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1153
1154         netif_receive_skb(skb);
1155 }
1156
1157 /* Process the RX completion indicated by rxcp when GRO is enabled */
1158 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1159                 struct be_rx_obj *rxo,
1160                 struct be_rx_compl_info *rxcp)
1161 {
1162         struct be_rx_page_info *page_info;
1163         struct sk_buff *skb = NULL;
1164         struct be_queue_info *rxq = &rxo->q;
1165         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1166         u16 remaining, curr_frag_len;
1167         u16 i, j;
1168
1169         skb = napi_get_frags(&eq_obj->napi);
1170         if (!skb) {
1171                 be_rx_compl_discard(adapter, rxo, rxcp);
1172                 return;
1173         }
1174
1175         remaining = rxcp->pkt_size;
1176         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1177                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1178
1179                 curr_frag_len = min(remaining, rx_frag_size);
1180
1181                 /* Coalesce all frags from the same physical page in one slot */
1182                 if (i == 0 || page_info->page_offset == 0) {
1183                         /* First frag or Fresh page */
1184                         j++;
1185                         skb_frag_set_page(skb, j, page_info->page);
1186                         skb_shinfo(skb)->frags[j].page_offset =
1187                                                         page_info->page_offset;
1188                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1189                 } else {
1190                         put_page(page_info->page);
1191                 }
1192                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1193                 skb->truesize += rx_frag_size;
1194                 remaining -= curr_frag_len;
1195                 index_inc(&rxcp->rxq_idx, rxq->len);
1196                 memset(page_info, 0, sizeof(*page_info));
1197         }
1198         BUG_ON(j > MAX_SKB_FRAGS);
1199
1200         skb_shinfo(skb)->nr_frags = j + 1;
1201         skb->len = rxcp->pkt_size;
1202         skb->data_len = rxcp->pkt_size;
1203         skb->ip_summed = CHECKSUM_UNNECESSARY;
1204         if (adapter->netdev->features & NETIF_F_RXHASH)
1205                 skb->rxhash = rxcp->rss_hash;
1206
1207         if (rxcp->vlanf)
1208                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1209
1210         napi_gro_frags(&eq_obj->napi);
1211 }
1212
1213 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1214                                 struct be_eth_rx_compl *compl,
1215                                 struct be_rx_compl_info *rxcp)
1216 {
1217         rxcp->pkt_size =
1218                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1219         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1220         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1221         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1222         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1223         rxcp->ip_csum =
1224                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1225         rxcp->l4_csum =
1226                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1227         rxcp->ipv6 =
1228                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1229         rxcp->rxq_idx =
1230                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1231         rxcp->num_rcvd =
1232                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1233         rxcp->pkt_type =
1234                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1235         rxcp->rss_hash =
1236                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1237         if (rxcp->vlanf) {
1238                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1239                                           compl);
1240                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1241                                                compl);
1242         }
1243         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1244 }
1245
1246 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1247                                 struct be_eth_rx_compl *compl,
1248                                 struct be_rx_compl_info *rxcp)
1249 {
1250         rxcp->pkt_size =
1251                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1252         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1253         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1254         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1255         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1256         rxcp->ip_csum =
1257                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1258         rxcp->l4_csum =
1259                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1260         rxcp->ipv6 =
1261                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1262         rxcp->rxq_idx =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1264         rxcp->num_rcvd =
1265                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1266         rxcp->pkt_type =
1267                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1268         rxcp->rss_hash =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1270         if (rxcp->vlanf) {
1271                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1272                                           compl);
1273                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1274                                                compl);
1275         }
1276         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1277 }
1278
1279 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1280 {
1281         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1282         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1283         struct be_adapter *adapter = rxo->adapter;
1284
1285         /* For checking the valid bit it is Ok to use either definition as the
1286          * valid bit is at the same position in both v0 and v1 Rx compl */
1287         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1288                 return NULL;
1289
1290         rmb();
1291         be_dws_le_to_cpu(compl, sizeof(*compl));
1292
1293         if (adapter->be3_native)
1294                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1295         else
1296                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1297
1298         if (rxcp->vlanf) {
1299                 /* vlanf could be wrongly set in some cards.
1300                  * ignore if vtm is not set */
1301                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1302                         rxcp->vlanf = 0;
1303
1304                 if (!lancer_chip(adapter))
1305                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1306
1307                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1308                     !adapter->vlan_tag[rxcp->vlan_tag])
1309                         rxcp->vlanf = 0;
1310         }
1311
1312         /* As the compl has been parsed, reset it; we wont touch it again */
1313         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1314
1315         queue_tail_inc(&rxo->cq);
1316         return rxcp;
1317 }
1318
1319 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1320 {
1321         u32 order = get_order(size);
1322
1323         if (order > 0)
1324                 gfp |= __GFP_COMP;
1325         return  alloc_pages(gfp, order);
1326 }
1327
1328 /*
1329  * Allocate a page, split it to fragments of size rx_frag_size and post as
1330  * receive buffers to BE
1331  */
1332 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1333 {
1334         struct be_adapter *adapter = rxo->adapter;
1335         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1336         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1337         struct be_queue_info *rxq = &rxo->q;
1338         struct page *pagep = NULL;
1339         struct be_eth_rx_d *rxd;
1340         u64 page_dmaaddr = 0, frag_dmaaddr;
1341         u32 posted, page_offset = 0;
1342
1343         page_info = &rxo->page_info_tbl[rxq->head];
1344         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1345                 if (!pagep) {
1346                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1347                         if (unlikely(!pagep)) {
1348                                 rx_stats(rxo)->rx_post_fail++;
1349                                 break;
1350                         }
1351                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1352                                                     0, adapter->big_page_size,
1353                                                     DMA_FROM_DEVICE);
1354                         page_info->page_offset = 0;
1355                 } else {
1356                         get_page(pagep);
1357                         page_info->page_offset = page_offset + rx_frag_size;
1358                 }
1359                 page_offset = page_info->page_offset;
1360                 page_info->page = pagep;
1361                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1362                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1363
1364                 rxd = queue_head_node(rxq);
1365                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1366                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1367
1368                 /* Any space left in the current big page for another frag? */
1369                 if ((page_offset + rx_frag_size + rx_frag_size) >
1370                                         adapter->big_page_size) {
1371                         pagep = NULL;
1372                         page_info->last_page_user = true;
1373                 }
1374
1375                 prev_page_info = page_info;
1376                 queue_head_inc(rxq);
1377                 page_info = &page_info_tbl[rxq->head];
1378         }
1379         if (pagep)
1380                 prev_page_info->last_page_user = true;
1381
1382         if (posted) {
1383                 atomic_add(posted, &rxq->used);
1384                 be_rxq_notify(adapter, rxq->id, posted);
1385         } else if (atomic_read(&rxq->used) == 0) {
1386                 /* Let be_worker replenish when memory is available */
1387                 rxo->rx_post_starved = true;
1388         }
1389 }
1390
1391 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1392 {
1393         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1394
1395         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1396                 return NULL;
1397
1398         rmb();
1399         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1400
1401         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1402
1403         queue_tail_inc(tx_cq);
1404         return txcp;
1405 }
1406
1407 static u16 be_tx_compl_process(struct be_adapter *adapter,
1408                 struct be_tx_obj *txo, u16 last_index)
1409 {
1410         struct be_queue_info *txq = &txo->q;
1411         struct be_eth_wrb *wrb;
1412         struct sk_buff **sent_skbs = txo->sent_skb_list;
1413         struct sk_buff *sent_skb;
1414         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1415         bool unmap_skb_hdr = true;
1416
1417         sent_skb = sent_skbs[txq->tail];
1418         BUG_ON(!sent_skb);
1419         sent_skbs[txq->tail] = NULL;
1420
1421         /* skip header wrb */
1422         queue_tail_inc(txq);
1423
1424         do {
1425                 cur_index = txq->tail;
1426                 wrb = queue_tail_node(txq);
1427                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1428                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1429                 unmap_skb_hdr = false;
1430
1431                 num_wrbs++;
1432                 queue_tail_inc(txq);
1433         } while (cur_index != last_index);
1434
1435         kfree_skb(sent_skb);
1436         return num_wrbs;
1437 }
1438
1439 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1440 {
1441         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1442
1443         if (!eqe->evt)
1444                 return NULL;
1445
1446         rmb();
1447         eqe->evt = le32_to_cpu(eqe->evt);
1448         queue_tail_inc(&eq_obj->q);
1449         return eqe;
1450 }
1451
1452 static int event_handle(struct be_adapter *adapter,
1453                         struct be_eq_obj *eq_obj,
1454                         bool rearm)
1455 {
1456         struct be_eq_entry *eqe;
1457         u16 num = 0;
1458
1459         while ((eqe = event_get(eq_obj)) != NULL) {
1460                 eqe->evt = 0;
1461                 num++;
1462         }
1463
1464         /* Deal with any spurious interrupts that come
1465          * without events
1466          */
1467         if (!num)
1468                 rearm = true;
1469
1470         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1471         if (num)
1472                 napi_schedule(&eq_obj->napi);
1473
1474         return num;
1475 }
1476
1477 /* Just read and notify events without processing them.
1478  * Used at the time of destroying event queues */
1479 static void be_eq_clean(struct be_adapter *adapter,
1480                         struct be_eq_obj *eq_obj)
1481 {
1482         struct be_eq_entry *eqe;
1483         u16 num = 0;
1484
1485         while ((eqe = event_get(eq_obj)) != NULL) {
1486                 eqe->evt = 0;
1487                 num++;
1488         }
1489
1490         if (num)
1491                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1492 }
1493
1494 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1495 {
1496         struct be_rx_page_info *page_info;
1497         struct be_queue_info *rxq = &rxo->q;
1498         struct be_queue_info *rx_cq = &rxo->cq;
1499         struct be_rx_compl_info *rxcp;
1500         u16 tail;
1501
1502         /* First cleanup pending rx completions */
1503         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1504                 be_rx_compl_discard(adapter, rxo, rxcp);
1505                 be_cq_notify(adapter, rx_cq->id, false, 1);
1506         }
1507
1508         /* Then free posted rx buffer that were not used */
1509         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1510         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1511                 page_info = get_rx_page_info(adapter, rxo, tail);
1512                 put_page(page_info->page);
1513                 memset(page_info, 0, sizeof(*page_info));
1514         }
1515         BUG_ON(atomic_read(&rxq->used));
1516         rxq->tail = rxq->head = 0;
1517 }
1518
1519 static void be_tx_compl_clean(struct be_adapter *adapter,
1520                                 struct be_tx_obj *txo)
1521 {
1522         struct be_queue_info *tx_cq = &txo->cq;
1523         struct be_queue_info *txq = &txo->q;
1524         struct be_eth_tx_compl *txcp;
1525         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1526         struct sk_buff **sent_skbs = txo->sent_skb_list;
1527         struct sk_buff *sent_skb;
1528         bool dummy_wrb;
1529
1530         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1531         do {
1532                 while ((txcp = be_tx_compl_get(tx_cq))) {
1533                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1534                                         wrb_index, txcp);
1535                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1536                         cmpl++;
1537                 }
1538                 if (cmpl) {
1539                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1540                         atomic_sub(num_wrbs, &txq->used);
1541                         cmpl = 0;
1542                         num_wrbs = 0;
1543                 }
1544
1545                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1546                         break;
1547
1548                 mdelay(1);
1549         } while (true);
1550
1551         if (atomic_read(&txq->used))
1552                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1553                         atomic_read(&txq->used));
1554
1555         /* free posted tx for which compls will never arrive */
1556         while (atomic_read(&txq->used)) {
1557                 sent_skb = sent_skbs[txq->tail];
1558                 end_idx = txq->tail;
1559                 index_adv(&end_idx,
1560                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1561                         txq->len);
1562                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1563                 atomic_sub(num_wrbs, &txq->used);
1564         }
1565 }
1566
1567 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1568 {
1569         struct be_queue_info *q;
1570
1571         q = &adapter->mcc_obj.q;
1572         if (q->created)
1573                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1574         be_queue_free(adapter, q);
1575
1576         q = &adapter->mcc_obj.cq;
1577         if (q->created)
1578                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1579         be_queue_free(adapter, q);
1580 }
1581
1582 /* Must be called only after TX qs are created as MCC shares TX EQ */
1583 static int be_mcc_queues_create(struct be_adapter *adapter)
1584 {
1585         struct be_queue_info *q, *cq;
1586
1587         /* Alloc MCC compl queue */
1588         cq = &adapter->mcc_obj.cq;
1589         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1590                         sizeof(struct be_mcc_compl)))
1591                 goto err;
1592
1593         /* Ask BE to create MCC compl queue; share TX's eq */
1594         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1595                 goto mcc_cq_free;
1596
1597         /* Alloc MCC queue */
1598         q = &adapter->mcc_obj.q;
1599         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1600                 goto mcc_cq_destroy;
1601
1602         /* Ask BE to create MCC queue */
1603         if (be_cmd_mccq_create(adapter, q, cq))
1604                 goto mcc_q_free;
1605
1606         return 0;
1607
1608 mcc_q_free:
1609         be_queue_free(adapter, q);
1610 mcc_cq_destroy:
1611         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1612 mcc_cq_free:
1613         be_queue_free(adapter, cq);
1614 err:
1615         return -1;
1616 }
1617
1618 static void be_tx_queues_destroy(struct be_adapter *adapter)
1619 {
1620         struct be_queue_info *q;
1621         struct be_tx_obj *txo;
1622         u8 i;
1623
1624         for_all_tx_queues(adapter, txo, i) {
1625                 q = &txo->q;
1626                 if (q->created)
1627                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1628                 be_queue_free(adapter, q);
1629
1630                 q = &txo->cq;
1631                 if (q->created)
1632                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1633                 be_queue_free(adapter, q);
1634         }
1635
1636         /* Clear any residual events */
1637         be_eq_clean(adapter, &adapter->tx_eq);
1638
1639         q = &adapter->tx_eq.q;
1640         if (q->created)
1641                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1642         be_queue_free(adapter, q);
1643 }
1644
1645 static int be_num_txqs_want(struct be_adapter *adapter)
1646 {
1647         if ((num_vfs && adapter->sriov_enabled) ||
1648                 be_is_mc(adapter) ||
1649                 lancer_chip(adapter) || !be_physfn(adapter) ||
1650                 adapter->generation == BE_GEN2)
1651                 return 1;
1652         else
1653                 return MAX_TX_QS;
1654 }
1655
1656 /* One TX event queue is shared by all TX compl qs */
1657 static int be_tx_queues_create(struct be_adapter *adapter)
1658 {
1659         struct be_queue_info *eq, *q, *cq;
1660         struct be_tx_obj *txo;
1661         u8 i;
1662
1663         adapter->num_tx_qs = be_num_txqs_want(adapter);
1664         if (adapter->num_tx_qs != MAX_TX_QS)
1665                 netif_set_real_num_tx_queues(adapter->netdev,
1666                         adapter->num_tx_qs);
1667
1668         adapter->tx_eq.max_eqd = 0;
1669         adapter->tx_eq.min_eqd = 0;
1670         adapter->tx_eq.cur_eqd = 96;
1671         adapter->tx_eq.enable_aic = false;
1672
1673         eq = &adapter->tx_eq.q;
1674         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1675                 sizeof(struct be_eq_entry)))
1676                 return -1;
1677
1678         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1679                 goto err;
1680         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1681
1682         for_all_tx_queues(adapter, txo, i) {
1683                 cq = &txo->cq;
1684                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1685                         sizeof(struct be_eth_tx_compl)))
1686                         goto err;
1687
1688                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1689                         goto err;
1690
1691                 q = &txo->q;
1692                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1693                         sizeof(struct be_eth_wrb)))
1694                         goto err;
1695
1696                 if (be_cmd_txq_create(adapter, q, cq))
1697                         goto err;
1698         }
1699         return 0;
1700
1701 err:
1702         be_tx_queues_destroy(adapter);
1703         return -1;
1704 }
1705
1706 static void be_rx_queues_destroy(struct be_adapter *adapter)
1707 {
1708         struct be_queue_info *q;
1709         struct be_rx_obj *rxo;
1710         int i;
1711
1712         for_all_rx_queues(adapter, rxo, i) {
1713                 be_queue_free(adapter, &rxo->q);
1714
1715                 q = &rxo->cq;
1716                 if (q->created)
1717                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1718                 be_queue_free(adapter, q);
1719
1720                 q = &rxo->rx_eq.q;
1721                 if (q->created)
1722                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1723                 be_queue_free(adapter, q);
1724         }
1725 }
1726
1727 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1728 {
1729         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1730                 !adapter->sriov_enabled && be_physfn(adapter) &&
1731                 !be_is_mc(adapter)) {
1732                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1733         } else {
1734                 dev_warn(&adapter->pdev->dev,
1735                         "No support for multiple RX queues\n");
1736                 return 1;
1737         }
1738 }
1739
1740 static int be_rx_queues_create(struct be_adapter *adapter)
1741 {
1742         struct be_queue_info *eq, *q, *cq;
1743         struct be_rx_obj *rxo;
1744         int rc, i;
1745
1746         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1747                                 msix_enabled(adapter) ?
1748                                         adapter->num_msix_vec - 1 : 1);
1749         if (adapter->num_rx_qs != MAX_RX_QS)
1750                 dev_warn(&adapter->pdev->dev,
1751                         "Can create only %d RX queues", adapter->num_rx_qs);
1752
1753         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1754         for_all_rx_queues(adapter, rxo, i) {
1755                 rxo->adapter = adapter;
1756                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1757                 rxo->rx_eq.enable_aic = true;
1758
1759                 /* EQ */
1760                 eq = &rxo->rx_eq.q;
1761                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762                                         sizeof(struct be_eq_entry));
1763                 if (rc)
1764                         goto err;
1765
1766                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1767                 if (rc)
1768                         goto err;
1769
1770                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1771
1772                 /* CQ */
1773                 cq = &rxo->cq;
1774                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1775                                 sizeof(struct be_eth_rx_compl));
1776                 if (rc)
1777                         goto err;
1778
1779                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1780                 if (rc)
1781                         goto err;
1782
1783                 /* Rx Q - will be created in be_open() */
1784                 q = &rxo->q;
1785                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1786                                 sizeof(struct be_eth_rx_d));
1787                 if (rc)
1788                         goto err;
1789
1790         }
1791
1792         return 0;
1793 err:
1794         be_rx_queues_destroy(adapter);
1795         return -1;
1796 }
1797
1798 static bool event_peek(struct be_eq_obj *eq_obj)
1799 {
1800         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1801         if (!eqe->evt)
1802                 return false;
1803         else
1804                 return true;
1805 }
1806
1807 static irqreturn_t be_intx(int irq, void *dev)
1808 {
1809         struct be_adapter *adapter = dev;
1810         struct be_rx_obj *rxo;
1811         int isr, i, tx = 0 , rx = 0;
1812
1813         if (lancer_chip(adapter)) {
1814                 if (event_peek(&adapter->tx_eq))
1815                         tx = event_handle(adapter, &adapter->tx_eq, false);
1816                 for_all_rx_queues(adapter, rxo, i) {
1817                         if (event_peek(&rxo->rx_eq))
1818                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1819                 }
1820
1821                 if (!(tx || rx))
1822                         return IRQ_NONE;
1823
1824         } else {
1825                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1826                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1827                 if (!isr)
1828                         return IRQ_NONE;
1829
1830                 if ((1 << adapter->tx_eq.eq_idx & isr))
1831                         event_handle(adapter, &adapter->tx_eq, false);
1832
1833                 for_all_rx_queues(adapter, rxo, i) {
1834                         if ((1 << rxo->rx_eq.eq_idx & isr))
1835                                 event_handle(adapter, &rxo->rx_eq, true);
1836                 }
1837         }
1838
1839         return IRQ_HANDLED;
1840 }
1841
1842 static irqreturn_t be_msix_rx(int irq, void *dev)
1843 {
1844         struct be_rx_obj *rxo = dev;
1845         struct be_adapter *adapter = rxo->adapter;
1846
1847         event_handle(adapter, &rxo->rx_eq, true);
1848
1849         return IRQ_HANDLED;
1850 }
1851
1852 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1853 {
1854         struct be_adapter *adapter = dev;
1855
1856         event_handle(adapter, &adapter->tx_eq, false);
1857
1858         return IRQ_HANDLED;
1859 }
1860
1861 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1862 {
1863         return (rxcp->tcpf && !rxcp->err) ? true : false;
1864 }
1865
1866 static int be_poll_rx(struct napi_struct *napi, int budget)
1867 {
1868         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1869         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1870         struct be_adapter *adapter = rxo->adapter;
1871         struct be_queue_info *rx_cq = &rxo->cq;
1872         struct be_rx_compl_info *rxcp;
1873         u32 work_done;
1874
1875         rx_stats(rxo)->rx_polls++;
1876         for (work_done = 0; work_done < budget; work_done++) {
1877                 rxcp = be_rx_compl_get(rxo);
1878                 if (!rxcp)
1879                         break;
1880
1881                 /* Is it a flush compl that has no data */
1882                 if (unlikely(rxcp->num_rcvd == 0))
1883                         goto loop_continue;
1884
1885                 /* Discard compl with partial DMA Lancer B0 */
1886                 if (unlikely(!rxcp->pkt_size)) {
1887                         be_rx_compl_discard(adapter, rxo, rxcp);
1888                         goto loop_continue;
1889                 }
1890
1891                 /* On BE drop pkts that arrive due to imperfect filtering in
1892                  * promiscuous mode on some skews
1893                  */
1894                 if (unlikely(rxcp->port != adapter->port_num &&
1895                                 !lancer_chip(adapter))) {
1896                         be_rx_compl_discard(adapter, rxo, rxcp);
1897                         goto loop_continue;
1898                 }
1899
1900                 if (do_gro(rxcp))
1901                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1902                 else
1903                         be_rx_compl_process(adapter, rxo, rxcp);
1904 loop_continue:
1905                 be_rx_stats_update(rxo, rxcp);
1906         }
1907
1908         be_cq_notify(adapter, rx_cq->id, false, work_done);
1909
1910         /* Refill the queue */
1911         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1912                 be_post_rx_frags(rxo, GFP_ATOMIC);
1913
1914         /* All consumed */
1915         if (work_done < budget) {
1916                 napi_complete(napi);
1917                 /* Arm CQ */
1918                 be_cq_notify(adapter, rx_cq->id, true, 0);
1919         }
1920         return work_done;
1921 }
1922
1923 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1924  * For TX/MCC we don't honour budget; consume everything
1925  */
1926 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1927 {
1928         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1929         struct be_adapter *adapter =
1930                 container_of(tx_eq, struct be_adapter, tx_eq);
1931         struct be_tx_obj *txo;
1932         struct be_eth_tx_compl *txcp;
1933         int tx_compl, mcc_compl, status = 0;
1934         u8 i;
1935         u16 num_wrbs;
1936
1937         for_all_tx_queues(adapter, txo, i) {
1938                 tx_compl = 0;
1939                 num_wrbs = 0;
1940                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1941                         num_wrbs += be_tx_compl_process(adapter, txo,
1942                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1943                                         wrb_index, txcp));
1944                         tx_compl++;
1945                 }
1946                 if (tx_compl) {
1947                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1948
1949                         atomic_sub(num_wrbs, &txo->q.used);
1950
1951                         /* As Tx wrbs have been freed up, wake up netdev queue
1952                          * if it was stopped due to lack of tx wrbs.  */
1953                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1954                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1955                                 netif_wake_subqueue(adapter->netdev, i);
1956                         }
1957
1958                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1959                         tx_stats(txo)->tx_compl += tx_compl;
1960                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1961                 }
1962         }
1963
1964         mcc_compl = be_process_mcc(adapter, &status);
1965
1966         if (mcc_compl) {
1967                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1968                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1969         }
1970
1971         napi_complete(napi);
1972
1973         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1974         adapter->drv_stats.tx_events++;
1975         return 1;
1976 }
1977
1978 void be_detect_dump_ue(struct be_adapter *adapter)
1979 {
1980         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1981         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1982         u32 i;
1983
1984         if (lancer_chip(adapter)) {
1985                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1986                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1987                         sliport_err1 = ioread32(adapter->db +
1988                                         SLIPORT_ERROR1_OFFSET);
1989                         sliport_err2 = ioread32(adapter->db +
1990                                         SLIPORT_ERROR2_OFFSET);
1991                 }
1992         } else {
1993                 pci_read_config_dword(adapter->pdev,
1994                                 PCICFG_UE_STATUS_LOW, &ue_lo);
1995                 pci_read_config_dword(adapter->pdev,
1996                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
1997                 pci_read_config_dword(adapter->pdev,
1998                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
1999                 pci_read_config_dword(adapter->pdev,
2000                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2001
2002                 ue_lo = (ue_lo & (~ue_lo_mask));
2003                 ue_hi = (ue_hi & (~ue_hi_mask));
2004         }
2005
2006         if (ue_lo || ue_hi ||
2007                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2008                 adapter->ue_detected = true;
2009                 adapter->eeh_err = true;
2010                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2011         }
2012
2013         if (ue_lo) {
2014                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2015                         if (ue_lo & 1)
2016                                 dev_err(&adapter->pdev->dev,
2017                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2018                 }
2019         }
2020         if (ue_hi) {
2021                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2022                         if (ue_hi & 1)
2023                                 dev_err(&adapter->pdev->dev,
2024                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2025                 }
2026         }
2027
2028         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2029                 dev_err(&adapter->pdev->dev,
2030                         "sliport status 0x%x\n", sliport_status);
2031                 dev_err(&adapter->pdev->dev,
2032                         "sliport error1 0x%x\n", sliport_err1);
2033                 dev_err(&adapter->pdev->dev,
2034                         "sliport error2 0x%x\n", sliport_err2);
2035         }
2036 }
2037
2038 static void be_worker(struct work_struct *work)
2039 {
2040         struct be_adapter *adapter =
2041                 container_of(work, struct be_adapter, work.work);
2042         struct be_rx_obj *rxo;
2043         int i;
2044
2045         if (!adapter->ue_detected)
2046                 be_detect_dump_ue(adapter);
2047
2048         /* when interrupts are not yet enabled, just reap any pending
2049         * mcc completions */
2050         if (!netif_running(adapter->netdev)) {
2051                 int mcc_compl, status = 0;
2052
2053                 mcc_compl = be_process_mcc(adapter, &status);
2054
2055                 if (mcc_compl) {
2056                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2057                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2058                 }
2059
2060                 goto reschedule;
2061         }
2062
2063         if (!adapter->stats_cmd_sent) {
2064                 if (lancer_chip(adapter))
2065                         lancer_cmd_get_pport_stats(adapter,
2066                                                 &adapter->stats_cmd);
2067                 else
2068                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2069         }
2070
2071         for_all_rx_queues(adapter, rxo, i) {
2072                 be_rx_eqd_update(adapter, rxo);
2073
2074                 if (rxo->rx_post_starved) {
2075                         rxo->rx_post_starved = false;
2076                         be_post_rx_frags(rxo, GFP_KERNEL);
2077                 }
2078         }
2079
2080 reschedule:
2081         adapter->work_counter++;
2082         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2083 }
2084
2085 static void be_msix_disable(struct be_adapter *adapter)
2086 {
2087         if (msix_enabled(adapter)) {
2088                 pci_disable_msix(adapter->pdev);
2089                 adapter->num_msix_vec = 0;
2090         }
2091 }
2092
2093 static void be_msix_enable(struct be_adapter *adapter)
2094 {
2095 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2096         int i, status, num_vec;
2097
2098         num_vec = be_num_rxqs_want(adapter) + 1;
2099
2100         for (i = 0; i < num_vec; i++)
2101                 adapter->msix_entries[i].entry = i;
2102
2103         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2104         if (status == 0) {
2105                 goto done;
2106         } else if (status >= BE_MIN_MSIX_VECTORS) {
2107                 num_vec = status;
2108                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2109                                 num_vec) == 0)
2110                         goto done;
2111         }
2112         return;
2113 done:
2114         adapter->num_msix_vec = num_vec;
2115         return;
2116 }
2117
2118 static int be_sriov_enable(struct be_adapter *adapter)
2119 {
2120         be_check_sriov_fn_type(adapter);
2121 #ifdef CONFIG_PCI_IOV
2122         if (be_physfn(adapter) && num_vfs) {
2123                 int status, pos;
2124                 u16 nvfs;
2125
2126                 pos = pci_find_ext_capability(adapter->pdev,
2127                                                 PCI_EXT_CAP_ID_SRIOV);
2128                 pci_read_config_word(adapter->pdev,
2129                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2130
2131                 if (num_vfs > nvfs) {
2132                         dev_info(&adapter->pdev->dev,
2133                                         "Device supports %d VFs and not %d\n",
2134                                         nvfs, num_vfs);
2135                         num_vfs = nvfs;
2136                 }
2137
2138                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2139                 adapter->sriov_enabled = status ? false : true;
2140
2141                 if (adapter->sriov_enabled) {
2142                         adapter->vf_cfg = kcalloc(num_vfs,
2143                                                 sizeof(struct be_vf_cfg),
2144                                                 GFP_KERNEL);
2145                         if (!adapter->vf_cfg)
2146                                 return -ENOMEM;
2147                 }
2148         }
2149 #endif
2150         return 0;
2151 }
2152
2153 static void be_sriov_disable(struct be_adapter *adapter)
2154 {
2155 #ifdef CONFIG_PCI_IOV
2156         if (adapter->sriov_enabled) {
2157                 pci_disable_sriov(adapter->pdev);
2158                 kfree(adapter->vf_cfg);
2159                 adapter->sriov_enabled = false;
2160         }
2161 #endif
2162 }
2163
2164 static inline int be_msix_vec_get(struct be_adapter *adapter,
2165                                         struct be_eq_obj *eq_obj)
2166 {
2167         return adapter->msix_entries[eq_obj->eq_idx].vector;
2168 }
2169
2170 static int be_request_irq(struct be_adapter *adapter,
2171                 struct be_eq_obj *eq_obj,
2172                 void *handler, char *desc, void *context)
2173 {
2174         struct net_device *netdev = adapter->netdev;
2175         int vec;
2176
2177         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2178         vec = be_msix_vec_get(adapter, eq_obj);
2179         return request_irq(vec, handler, 0, eq_obj->desc, context);
2180 }
2181
2182 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2183                         void *context)
2184 {
2185         int vec = be_msix_vec_get(adapter, eq_obj);
2186         free_irq(vec, context);
2187 }
2188
2189 static int be_msix_register(struct be_adapter *adapter)
2190 {
2191         struct be_rx_obj *rxo;
2192         int status, i;
2193         char qname[10];
2194
2195         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2196                                 adapter);
2197         if (status)
2198                 goto err;
2199
2200         for_all_rx_queues(adapter, rxo, i) {
2201                 sprintf(qname, "rxq%d", i);
2202                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2203                                 qname, rxo);
2204                 if (status)
2205                         goto err_msix;
2206         }
2207
2208         return 0;
2209
2210 err_msix:
2211         be_free_irq(adapter, &adapter->tx_eq, adapter);
2212
2213         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2214                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2215
2216 err:
2217         dev_warn(&adapter->pdev->dev,
2218                 "MSIX Request IRQ failed - err %d\n", status);
2219         be_msix_disable(adapter);
2220         return status;
2221 }
2222
2223 static int be_irq_register(struct be_adapter *adapter)
2224 {
2225         struct net_device *netdev = adapter->netdev;
2226         int status;
2227
2228         if (msix_enabled(adapter)) {
2229                 status = be_msix_register(adapter);
2230                 if (status == 0)
2231                         goto done;
2232                 /* INTx is not supported for VF */
2233                 if (!be_physfn(adapter))
2234                         return status;
2235         }
2236
2237         /* INTx */
2238         netdev->irq = adapter->pdev->irq;
2239         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2240                         adapter);
2241         if (status) {
2242                 dev_err(&adapter->pdev->dev,
2243                         "INTx request IRQ failed - err %d\n", status);
2244                 return status;
2245         }
2246 done:
2247         adapter->isr_registered = true;
2248         return 0;
2249 }
2250
2251 static void be_irq_unregister(struct be_adapter *adapter)
2252 {
2253         struct net_device *netdev = adapter->netdev;
2254         struct be_rx_obj *rxo;
2255         int i;
2256
2257         if (!adapter->isr_registered)
2258                 return;
2259
2260         /* INTx */
2261         if (!msix_enabled(adapter)) {
2262                 free_irq(netdev->irq, adapter);
2263                 goto done;
2264         }
2265
2266         /* MSIx */
2267         be_free_irq(adapter, &adapter->tx_eq, adapter);
2268
2269         for_all_rx_queues(adapter, rxo, i)
2270                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2271
2272 done:
2273         adapter->isr_registered = false;
2274 }
2275
2276 static void be_rx_queues_clear(struct be_adapter *adapter)
2277 {
2278         struct be_queue_info *q;
2279         struct be_rx_obj *rxo;
2280         int i;
2281
2282         for_all_rx_queues(adapter, rxo, i) {
2283                 q = &rxo->q;
2284                 if (q->created) {
2285                         be_cmd_rxq_destroy(adapter, q);
2286                         /* After the rxq is invalidated, wait for a grace time
2287                          * of 1ms for all dma to end and the flush compl to
2288                          * arrive
2289                          */
2290                         mdelay(1);
2291                         be_rx_q_clean(adapter, rxo);
2292                 }
2293
2294                 /* Clear any residual events */
2295                 q = &rxo->rx_eq.q;
2296                 if (q->created)
2297                         be_eq_clean(adapter, &rxo->rx_eq);
2298         }
2299 }
2300
2301 static int be_close(struct net_device *netdev)
2302 {
2303         struct be_adapter *adapter = netdev_priv(netdev);
2304         struct be_rx_obj *rxo;
2305         struct be_tx_obj *txo;
2306         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2307         int vec, i;
2308
2309         be_async_mcc_disable(adapter);
2310
2311         if (!lancer_chip(adapter))
2312                 be_intr_set(adapter, false);
2313
2314         for_all_rx_queues(adapter, rxo, i)
2315                 napi_disable(&rxo->rx_eq.napi);
2316
2317         napi_disable(&tx_eq->napi);
2318
2319         if (lancer_chip(adapter)) {
2320                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2321                 for_all_rx_queues(adapter, rxo, i)
2322                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2323                 for_all_tx_queues(adapter, txo, i)
2324                          be_cq_notify(adapter, txo->cq.id, false, 0);
2325         }
2326
2327         if (msix_enabled(adapter)) {
2328                 vec = be_msix_vec_get(adapter, tx_eq);
2329                 synchronize_irq(vec);
2330
2331                 for_all_rx_queues(adapter, rxo, i) {
2332                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2333                         synchronize_irq(vec);
2334                 }
2335         } else {
2336                 synchronize_irq(netdev->irq);
2337         }
2338         be_irq_unregister(adapter);
2339
2340         /* Wait for all pending tx completions to arrive so that
2341          * all tx skbs are freed.
2342          */
2343         for_all_tx_queues(adapter, txo, i)
2344                 be_tx_compl_clean(adapter, txo);
2345
2346         be_rx_queues_clear(adapter);
2347         return 0;
2348 }
2349
2350 static int be_rx_queues_setup(struct be_adapter *adapter)
2351 {
2352         struct be_rx_obj *rxo;
2353         int rc, i;
2354         u8 rsstable[MAX_RSS_QS];
2355
2356         for_all_rx_queues(adapter, rxo, i) {
2357                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2358                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2359                         adapter->if_handle,
2360                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2361                 if (rc)
2362                         return rc;
2363         }
2364
2365         if (be_multi_rxq(adapter)) {
2366                 for_all_rss_queues(adapter, rxo, i)
2367                         rsstable[i] = rxo->rss_id;
2368
2369                 rc = be_cmd_rss_config(adapter, rsstable,
2370                         adapter->num_rx_qs - 1);
2371                 if (rc)
2372                         return rc;
2373         }
2374
2375         /* First time posting */
2376         for_all_rx_queues(adapter, rxo, i) {
2377                 be_post_rx_frags(rxo, GFP_KERNEL);
2378                 napi_enable(&rxo->rx_eq.napi);
2379         }
2380         return 0;
2381 }
2382
2383 static int be_open(struct net_device *netdev)
2384 {
2385         struct be_adapter *adapter = netdev_priv(netdev);
2386         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2387         struct be_rx_obj *rxo;
2388         int status, i;
2389
2390         status = be_rx_queues_setup(adapter);
2391         if (status)
2392                 goto err;
2393
2394         napi_enable(&tx_eq->napi);
2395
2396         be_irq_register(adapter);
2397
2398         if (!lancer_chip(adapter))
2399                 be_intr_set(adapter, true);
2400
2401         /* The evt queues are created in unarmed state; arm them */
2402         for_all_rx_queues(adapter, rxo, i) {
2403                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2404                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2405         }
2406         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2407
2408         /* Now that interrupts are on we can process async mcc */
2409         be_async_mcc_enable(adapter);
2410
2411         return 0;
2412 err:
2413         be_close(adapter->netdev);
2414         return -EIO;
2415 }
2416
2417 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2418 {
2419         struct be_dma_mem cmd;
2420         int status = 0;
2421         u8 mac[ETH_ALEN];
2422
2423         memset(mac, 0, ETH_ALEN);
2424
2425         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2426         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2427                                     GFP_KERNEL);
2428         if (cmd.va == NULL)
2429                 return -1;
2430         memset(cmd.va, 0, cmd.size);
2431
2432         if (enable) {
2433                 status = pci_write_config_dword(adapter->pdev,
2434                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2435                 if (status) {
2436                         dev_err(&adapter->pdev->dev,
2437                                 "Could not enable Wake-on-lan\n");
2438                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2439                                           cmd.dma);
2440                         return status;
2441                 }
2442                 status = be_cmd_enable_magic_wol(adapter,
2443                                 adapter->netdev->dev_addr, &cmd);
2444                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2445                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2446         } else {
2447                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2448                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2449                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2450         }
2451
2452         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2453         return status;
2454 }
2455
2456 /*
2457  * Generate a seed MAC address from the PF MAC Address using jhash.
2458  * MAC Address for VFs are assigned incrementally starting from the seed.
2459  * These addresses are programmed in the ASIC by the PF and the VF driver
2460  * queries for the MAC address during its probe.
2461  */
2462 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2463 {
2464         u32 vf;
2465         int status = 0;
2466         u8 mac[ETH_ALEN];
2467
2468         be_vf_eth_addr_generate(adapter, mac);
2469
2470         for (vf = 0; vf < num_vfs; vf++) {
2471                 status = be_cmd_pmac_add(adapter, mac,
2472                                         adapter->vf_cfg[vf].vf_if_handle,
2473                                         &adapter->vf_cfg[vf].vf_pmac_id,
2474                                         vf + 1);
2475                 if (status)
2476                         dev_err(&adapter->pdev->dev,
2477                                 "Mac address add failed for VF %d\n", vf);
2478                 else
2479                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2480
2481                 mac[5] += 1;
2482         }
2483         return status;
2484 }
2485
2486 static void be_vf_clear(struct be_adapter *adapter)
2487 {
2488         u32 vf;
2489
2490         for (vf = 0; vf < num_vfs; vf++) {
2491                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2492                         be_cmd_pmac_del(adapter,
2493                                         adapter->vf_cfg[vf].vf_if_handle,
2494                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2495         }
2496
2497         for (vf = 0; vf < num_vfs; vf++)
2498                 if (adapter->vf_cfg[vf].vf_if_handle)
2499                         be_cmd_if_destroy(adapter,
2500                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2501 }
2502
2503 static int be_clear(struct be_adapter *adapter)
2504 {
2505         if (be_physfn(adapter) && adapter->sriov_enabled)
2506                 be_vf_clear(adapter);
2507
2508         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2509
2510         be_mcc_queues_destroy(adapter);
2511         be_rx_queues_destroy(adapter);
2512         be_tx_queues_destroy(adapter);
2513         adapter->eq_next_idx = 0;
2514
2515         adapter->be3_native = false;
2516         adapter->promiscuous = false;
2517
2518         /* tell fw we're done with firing cmds */
2519         be_cmd_fw_clean(adapter);
2520         return 0;
2521 }
2522
2523 static int be_vf_setup(struct be_adapter *adapter)
2524 {
2525         u32 cap_flags, en_flags, vf;
2526         u16 lnk_speed;
2527         int status;
2528
2529         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2530         for (vf = 0; vf < num_vfs; vf++) {
2531                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2532                                         &adapter->vf_cfg[vf].vf_if_handle,
2533                                         NULL, vf+1);
2534                 if (status)
2535                         goto err;
2536                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2537         }
2538
2539         if (!lancer_chip(adapter)) {
2540                 status = be_vf_eth_addr_config(adapter);
2541                 if (status)
2542                         goto err;
2543         }
2544
2545         for (vf = 0; vf < num_vfs; vf++) {
2546                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2547                                 vf + 1);
2548                 if (status)
2549                         goto err;
2550                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2551         }
2552         return 0;
2553 err:
2554         return status;
2555 }
2556
2557 static int be_setup(struct be_adapter *adapter)
2558 {
2559         struct net_device *netdev = adapter->netdev;
2560         u32 cap_flags, en_flags;
2561         u32 tx_fc, rx_fc;
2562         int status;
2563         u8 mac[ETH_ALEN];
2564
2565         /* Allow all priorities by default. A GRP5 evt may modify this */
2566         adapter->vlan_prio_bmap = 0xff;
2567         adapter->link_speed = -1;
2568
2569         be_cmd_req_native_mode(adapter);
2570
2571         status = be_tx_queues_create(adapter);
2572         if (status != 0)
2573                 goto err;
2574
2575         status = be_rx_queues_create(adapter);
2576         if (status != 0)
2577                 goto err;
2578
2579         status = be_mcc_queues_create(adapter);
2580         if (status != 0)
2581                 goto err;
2582
2583         memset(mac, 0, ETH_ALEN);
2584         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2585                         true /*permanent */, 0);
2586         if (status)
2587                 return status;
2588         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2589         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2590
2591         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2592                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2593         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2594                         BE_IF_FLAGS_PROMISCUOUS;
2595         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2596                 cap_flags |= BE_IF_FLAGS_RSS;
2597                 en_flags |= BE_IF_FLAGS_RSS;
2598         }
2599         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2600                         netdev->dev_addr, &adapter->if_handle,
2601                         &adapter->pmac_id, 0);
2602         if (status != 0)
2603                 goto err;
2604
2605         /* For BEx, the VF's permanent mac queried from card is incorrect.
2606          * Query the mac configued by the PF using if_handle
2607          */
2608         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2609                 status = be_cmd_mac_addr_query(adapter, mac,
2610                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2611                 if (!status) {
2612                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2613                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2614                 }
2615         }
2616
2617         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2618
2619         status = be_vid_config(adapter, false, 0);
2620         if (status)
2621                 goto err;
2622
2623         be_set_rx_mode(adapter->netdev);
2624
2625         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2626         if (status)
2627                 goto err;
2628         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2629                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2630                                         adapter->rx_fc);
2631                 if (status)
2632                         goto err;
2633         }
2634
2635         pcie_set_readrq(adapter->pdev, 4096);
2636
2637         if (be_physfn(adapter) && adapter->sriov_enabled) {
2638                 status = be_vf_setup(adapter);
2639                 if (status)
2640                         goto err;
2641         }
2642
2643         return 0;
2644 err:
2645         be_clear(adapter);
2646         return status;
2647 }
2648
2649 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2650 static bool be_flash_redboot(struct be_adapter *adapter,
2651                         const u8 *p, u32 img_start, int image_size,
2652                         int hdr_size)
2653 {
2654         u32 crc_offset;
2655         u8 flashed_crc[4];
2656         int status;
2657
2658         crc_offset = hdr_size + img_start + image_size - 4;
2659
2660         p += crc_offset;
2661
2662         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2663                         (image_size - 4));
2664         if (status) {
2665                 dev_err(&adapter->pdev->dev,
2666                 "could not get crc from flash, not flashing redboot\n");
2667                 return false;
2668         }
2669
2670         /*update redboot only if crc does not match*/
2671         if (!memcmp(flashed_crc, p, 4))
2672                 return false;
2673         else
2674                 return true;
2675 }
2676
2677 static bool phy_flashing_required(struct be_adapter *adapter)
2678 {
2679         int status = 0;
2680         struct be_phy_info phy_info;
2681
2682         status = be_cmd_get_phy_info(adapter, &phy_info);
2683         if (status)
2684                 return false;
2685         if ((phy_info.phy_type == TN_8022) &&
2686                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2687                 return true;
2688         }
2689         return false;
2690 }
2691
2692 static int be_flash_data(struct be_adapter *adapter,
2693                         const struct firmware *fw,
2694                         struct be_dma_mem *flash_cmd, int num_of_images)
2695
2696 {
2697         int status = 0, i, filehdr_size = 0;
2698         u32 total_bytes = 0, flash_op;
2699         int num_bytes;
2700         const u8 *p = fw->data;
2701         struct be_cmd_write_flashrom *req = flash_cmd->va;
2702         const struct flash_comp *pflashcomp;
2703         int num_comp;
2704
2705         static const struct flash_comp gen3_flash_types[10] = {
2706                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2707                         FLASH_IMAGE_MAX_SIZE_g3},
2708                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2709                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2710                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2711                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2712                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2713                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2714                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2715                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2716                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2717                         FLASH_IMAGE_MAX_SIZE_g3},
2718                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2719                         FLASH_IMAGE_MAX_SIZE_g3},
2720                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2721                         FLASH_IMAGE_MAX_SIZE_g3},
2722                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2723                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2724                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2725                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2726         };
2727         static const struct flash_comp gen2_flash_types[8] = {
2728                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2729                         FLASH_IMAGE_MAX_SIZE_g2},
2730                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2731                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2732                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2733                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2734                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2735                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2736                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2737                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2738                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2739                         FLASH_IMAGE_MAX_SIZE_g2},
2740                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2741                         FLASH_IMAGE_MAX_SIZE_g2},
2742                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2743                          FLASH_IMAGE_MAX_SIZE_g2}
2744         };
2745
2746         if (adapter->generation == BE_GEN3) {
2747                 pflashcomp = gen3_flash_types;
2748                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2749                 num_comp = ARRAY_SIZE(gen3_flash_types);
2750         } else {
2751                 pflashcomp = gen2_flash_types;
2752                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2753                 num_comp = ARRAY_SIZE(gen2_flash_types);
2754         }
2755         for (i = 0; i < num_comp; i++) {
2756                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2757                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2758                         continue;
2759                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2760                         if (!phy_flashing_required(adapter))
2761                                 continue;
2762                 }
2763                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2764                         (!be_flash_redboot(adapter, fw->data,
2765                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2766                         (num_of_images * sizeof(struct image_hdr)))))
2767                         continue;
2768                 p = fw->data;
2769                 p += filehdr_size + pflashcomp[i].offset
2770                         + (num_of_images * sizeof(struct image_hdr));
2771                 if (p + pflashcomp[i].size > fw->data + fw->size)
2772                         return -1;
2773                 total_bytes = pflashcomp[i].size;
2774                 while (total_bytes) {
2775                         if (total_bytes > 32*1024)
2776                                 num_bytes = 32*1024;
2777                         else
2778                                 num_bytes = total_bytes;
2779                         total_bytes -= num_bytes;
2780                         if (!total_bytes) {
2781                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2782                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2783                                 else
2784                                         flash_op = FLASHROM_OPER_FLASH;
2785                         } else {
2786                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2787                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2788                                 else
2789                                         flash_op = FLASHROM_OPER_SAVE;
2790                         }
2791                         memcpy(req->params.data_buf, p, num_bytes);
2792                         p += num_bytes;
2793                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2794                                 pflashcomp[i].optype, flash_op, num_bytes);
2795                         if (status) {
2796                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2797                                         (pflashcomp[i].optype ==
2798                                                 IMG_TYPE_PHY_FW))
2799                                         break;
2800                                 dev_err(&adapter->pdev->dev,
2801                                         "cmd to write to flash rom failed.\n");
2802                                 return -1;
2803                         }
2804                 }
2805         }
2806         return 0;
2807 }
2808
2809 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2810 {
2811         if (fhdr == NULL)
2812                 return 0;
2813         if (fhdr->build[0] == '3')
2814                 return BE_GEN3;
2815         else if (fhdr->build[0] == '2')
2816                 return BE_GEN2;
2817         else
2818                 return 0;
2819 }
2820
2821 static int lancer_fw_download(struct be_adapter *adapter,
2822                                 const struct firmware *fw)
2823 {
2824 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2825 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2826         struct be_dma_mem flash_cmd;
2827         const u8 *data_ptr = NULL;
2828         u8 *dest_image_ptr = NULL;
2829         size_t image_size = 0;
2830         u32 chunk_size = 0;
2831         u32 data_written = 0;
2832         u32 offset = 0;
2833         int status = 0;
2834         u8 add_status = 0;
2835
2836         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2837                 dev_err(&adapter->pdev->dev,
2838                         "FW Image not properly aligned. "
2839                         "Length must be 4 byte aligned.\n");
2840                 status = -EINVAL;
2841                 goto lancer_fw_exit;
2842         }
2843
2844         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2845                                 + LANCER_FW_DOWNLOAD_CHUNK;
2846         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2847                                                 &flash_cmd.dma, GFP_KERNEL);
2848         if (!flash_cmd.va) {
2849                 status = -ENOMEM;
2850                 dev_err(&adapter->pdev->dev,
2851                         "Memory allocation failure while flashing\n");
2852                 goto lancer_fw_exit;
2853         }
2854
2855         dest_image_ptr = flash_cmd.va +
2856                                 sizeof(struct lancer_cmd_req_write_object);
2857         image_size = fw->size;
2858         data_ptr = fw->data;
2859
2860         while (image_size) {
2861                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2862
2863                 /* Copy the image chunk content. */
2864                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2865
2866                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2867                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2868                                 &data_written, &add_status);
2869
2870                 if (status)
2871                         break;
2872
2873                 offset += data_written;
2874                 data_ptr += data_written;
2875                 image_size -= data_written;
2876         }
2877
2878         if (!status) {
2879                 /* Commit the FW written */
2880                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2881                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2882                                         &data_written, &add_status);
2883         }
2884
2885         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2886                                 flash_cmd.dma);
2887         if (status) {
2888                 dev_err(&adapter->pdev->dev,
2889                         "Firmware load error. "
2890                         "Status code: 0x%x Additional Status: 0x%x\n",
2891                         status, add_status);
2892                 goto lancer_fw_exit;
2893         }
2894
2895         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2896 lancer_fw_exit:
2897         return status;
2898 }
2899
2900 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2901 {
2902         struct flash_file_hdr_g2 *fhdr;
2903         struct flash_file_hdr_g3 *fhdr3;
2904         struct image_hdr *img_hdr_ptr = NULL;
2905         struct be_dma_mem flash_cmd;
2906         const u8 *p;
2907         int status = 0, i = 0, num_imgs = 0;
2908
2909         p = fw->data;
2910         fhdr = (struct flash_file_hdr_g2 *) p;
2911
2912         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2913         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2914                                           &flash_cmd.dma, GFP_KERNEL);
2915         if (!flash_cmd.va) {
2916                 status = -ENOMEM;
2917                 dev_err(&adapter->pdev->dev,
2918                         "Memory allocation failure while flashing\n");
2919                 goto be_fw_exit;
2920         }
2921
2922         if ((adapter->generation == BE_GEN3) &&
2923                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2924                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2925                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2926                 for (i = 0; i < num_imgs; i++) {
2927                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2928                                         (sizeof(struct flash_file_hdr_g3) +
2929                                          i * sizeof(struct image_hdr)));
2930                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2931                                 status = be_flash_data(adapter, fw, &flash_cmd,
2932                                                         num_imgs);
2933                 }
2934         } else if ((adapter->generation == BE_GEN2) &&
2935                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2936                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2937         } else {
2938                 dev_err(&adapter->pdev->dev,
2939                         "UFI and Interface are not compatible for flashing\n");
2940                 status = -1;
2941         }
2942
2943         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2944                           flash_cmd.dma);
2945         if (status) {
2946                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2947                 goto be_fw_exit;
2948         }
2949
2950         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2951
2952 be_fw_exit:
2953         return status;
2954 }
2955
2956 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2957 {
2958         const struct firmware *fw;
2959         int status;
2960
2961         if (!netif_running(adapter->netdev)) {
2962                 dev_err(&adapter->pdev->dev,
2963                         "Firmware load not allowed (interface is down)\n");
2964                 return -1;
2965         }
2966
2967         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2968         if (status)
2969                 goto fw_exit;
2970
2971         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2972
2973         if (lancer_chip(adapter))
2974                 status = lancer_fw_download(adapter, fw);
2975         else
2976                 status = be_fw_download(adapter, fw);
2977
2978 fw_exit:
2979         release_firmware(fw);
2980         return status;
2981 }
2982
2983 static struct net_device_ops be_netdev_ops = {
2984         .ndo_open               = be_open,
2985         .ndo_stop               = be_close,
2986         .ndo_start_xmit         = be_xmit,
2987         .ndo_set_rx_mode        = be_set_rx_mode,
2988         .ndo_set_mac_address    = be_mac_addr_set,
2989         .ndo_change_mtu         = be_change_mtu,
2990         .ndo_get_stats64        = be_get_stats64,
2991         .ndo_validate_addr      = eth_validate_addr,
2992         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2993         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2994         .ndo_set_vf_mac         = be_set_vf_mac,
2995         .ndo_set_vf_vlan        = be_set_vf_vlan,
2996         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2997         .ndo_get_vf_config      = be_get_vf_config
2998 };
2999
3000 static void be_netdev_init(struct net_device *netdev)
3001 {
3002         struct be_adapter *adapter = netdev_priv(netdev);
3003         struct be_rx_obj *rxo;
3004         int i;
3005
3006         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3007                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3008                 NETIF_F_HW_VLAN_TX;
3009         if (be_multi_rxq(adapter))
3010                 netdev->hw_features |= NETIF_F_RXHASH;
3011
3012         netdev->features |= netdev->hw_features |
3013                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3014
3015         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3016                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3017
3018         netdev->flags |= IFF_MULTICAST;
3019
3020         netif_set_gso_max_size(netdev, 65535);
3021
3022         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3023
3024         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3025
3026         for_all_rx_queues(adapter, rxo, i)
3027                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3028                                 BE_NAPI_WEIGHT);
3029
3030         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3031                 BE_NAPI_WEIGHT);
3032 }
3033
3034 static void be_unmap_pci_bars(struct be_adapter *adapter)
3035 {
3036         if (adapter->csr)
3037                 iounmap(adapter->csr);
3038         if (adapter->db)
3039                 iounmap(adapter->db);
3040 }
3041
3042 static int be_map_pci_bars(struct be_adapter *adapter)
3043 {
3044         u8 __iomem *addr;
3045         int db_reg;
3046
3047         if (lancer_chip(adapter)) {
3048                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3049                         pci_resource_len(adapter->pdev, 0));
3050                 if (addr == NULL)
3051                         return -ENOMEM;
3052                 adapter->db = addr;
3053                 return 0;
3054         }
3055
3056         if (be_physfn(adapter)) {
3057                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3058                                 pci_resource_len(adapter->pdev, 2));
3059                 if (addr == NULL)
3060                         return -ENOMEM;
3061                 adapter->csr = addr;
3062         }
3063
3064         if (adapter->generation == BE_GEN2) {
3065                 db_reg = 4;
3066         } else {
3067                 if (be_physfn(adapter))
3068                         db_reg = 4;
3069                 else
3070                         db_reg = 0;
3071         }
3072         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3073                                 pci_resource_len(adapter->pdev, db_reg));
3074         if (addr == NULL)
3075                 goto pci_map_err;
3076         adapter->db = addr;
3077
3078         return 0;
3079 pci_map_err:
3080         be_unmap_pci_bars(adapter);
3081         return -ENOMEM;
3082 }
3083
3084
3085 static void be_ctrl_cleanup(struct be_adapter *adapter)
3086 {
3087         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3088
3089         be_unmap_pci_bars(adapter);
3090
3091         if (mem->va)
3092                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3093                                   mem->dma);
3094
3095         mem = &adapter->rx_filter;
3096         if (mem->va)
3097                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3098                                   mem->dma);
3099 }
3100
3101 static int be_ctrl_init(struct be_adapter *adapter)
3102 {
3103         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3104         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3105         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3106         int status;
3107
3108         status = be_map_pci_bars(adapter);
3109         if (status)
3110                 goto done;
3111
3112         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3113         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3114                                                 mbox_mem_alloc->size,
3115                                                 &mbox_mem_alloc->dma,
3116                                                 GFP_KERNEL);
3117         if (!mbox_mem_alloc->va) {
3118                 status = -ENOMEM;
3119                 goto unmap_pci_bars;
3120         }
3121         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3122         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3123         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3124         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3125
3126         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3127         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3128                                         &rx_filter->dma, GFP_KERNEL);
3129         if (rx_filter->va == NULL) {
3130                 status = -ENOMEM;
3131                 goto free_mbox;
3132         }
3133         memset(rx_filter->va, 0, rx_filter->size);
3134
3135         mutex_init(&adapter->mbox_lock);
3136         spin_lock_init(&adapter->mcc_lock);
3137         spin_lock_init(&adapter->mcc_cq_lock);
3138
3139         init_completion(&adapter->flash_compl);
3140         pci_save_state(adapter->pdev);
3141         return 0;
3142
3143 free_mbox:
3144         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3145                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3146
3147 unmap_pci_bars:
3148         be_unmap_pci_bars(adapter);
3149
3150 done:
3151         return status;
3152 }
3153
3154 static void be_stats_cleanup(struct be_adapter *adapter)
3155 {
3156         struct be_dma_mem *cmd = &adapter->stats_cmd;
3157
3158         if (cmd->va)
3159                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3160                                   cmd->va, cmd->dma);
3161 }
3162
3163 static int be_stats_init(struct be_adapter *adapter)
3164 {
3165         struct be_dma_mem *cmd = &adapter->stats_cmd;
3166
3167         if (adapter->generation == BE_GEN2) {
3168                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3169         } else {
3170                 if (lancer_chip(adapter))
3171                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3172                 else
3173                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3174         }
3175         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3176                                      GFP_KERNEL);
3177         if (cmd->va == NULL)
3178                 return -1;
3179         memset(cmd->va, 0, cmd->size);
3180         return 0;
3181 }
3182
3183 static void __devexit be_remove(struct pci_dev *pdev)
3184 {
3185         struct be_adapter *adapter = pci_get_drvdata(pdev);
3186
3187         if (!adapter)
3188                 return;
3189
3190         cancel_delayed_work_sync(&adapter->work);
3191
3192         unregister_netdev(adapter->netdev);
3193
3194         be_clear(adapter);
3195
3196         be_stats_cleanup(adapter);
3197
3198         be_ctrl_cleanup(adapter);
3199
3200         be_sriov_disable(adapter);
3201
3202         be_msix_disable(adapter);
3203
3204         pci_set_drvdata(pdev, NULL);
3205         pci_release_regions(pdev);
3206         pci_disable_device(pdev);
3207
3208         free_netdev(adapter->netdev);
3209 }
3210
3211 static int be_get_config(struct be_adapter *adapter)
3212 {
3213         int status;
3214
3215         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3216                         &adapter->function_mode, &adapter->function_caps);
3217         if (status)
3218                 return status;
3219
3220         if (adapter->function_mode & FLEX10_MODE)
3221                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3222         else
3223                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3224
3225         status = be_cmd_get_cntl_attributes(adapter);
3226         if (status)
3227                 return status;
3228
3229         return 0;
3230 }
3231
3232 static int be_dev_family_check(struct be_adapter *adapter)
3233 {
3234         struct pci_dev *pdev = adapter->pdev;
3235         u32 sli_intf = 0, if_type;
3236
3237         switch (pdev->device) {
3238         case BE_DEVICE_ID1:
3239         case OC_DEVICE_ID1:
3240                 adapter->generation = BE_GEN2;
3241                 break;
3242         case BE_DEVICE_ID2:
3243         case OC_DEVICE_ID2:
3244                 adapter->generation = BE_GEN3;
3245                 break;
3246         case OC_DEVICE_ID3:
3247         case OC_DEVICE_ID4:
3248                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3249                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3250                                                 SLI_INTF_IF_TYPE_SHIFT;
3251
3252                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3253                         if_type != 0x02) {
3254                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3255                         return -EINVAL;
3256                 }
3257                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3258                                          SLI_INTF_FAMILY_SHIFT);
3259                 adapter->generation = BE_GEN3;
3260                 break;
3261         default:
3262                 adapter->generation = 0;
3263         }
3264         return 0;
3265 }
3266
3267 static int lancer_wait_ready(struct be_adapter *adapter)
3268 {
3269 #define SLIPORT_READY_TIMEOUT 500
3270         u32 sliport_status;
3271         int status = 0, i;
3272
3273         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3274                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3275                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3276                         break;
3277
3278                 msleep(20);
3279         }
3280
3281         if (i == SLIPORT_READY_TIMEOUT)
3282                 status = -1;
3283
3284         return status;
3285 }
3286
3287 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3288 {
3289         int status;
3290         u32 sliport_status, err, reset_needed;
3291         status = lancer_wait_ready(adapter);
3292         if (!status) {
3293                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3294                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3295                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3296                 if (err && reset_needed) {
3297                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3298                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3299
3300                         /* check adapter has corrected the error */
3301                         status = lancer_wait_ready(adapter);
3302                         sliport_status = ioread32(adapter->db +
3303                                                         SLIPORT_STATUS_OFFSET);
3304                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3305                                                 SLIPORT_STATUS_RN_MASK);
3306                         if (status || sliport_status)
3307                                 status = -1;
3308                 } else if (err || reset_needed) {
3309                         status = -1;
3310                 }
3311         }
3312         return status;
3313 }
3314
3315 static int __devinit be_probe(struct pci_dev *pdev,
3316                         const struct pci_device_id *pdev_id)
3317 {
3318         int status = 0;
3319         struct be_adapter *adapter;
3320         struct net_device *netdev;
3321
3322         status = pci_enable_device(pdev);
3323         if (status)
3324                 goto do_none;
3325
3326         status = pci_request_regions(pdev, DRV_NAME);
3327         if (status)
3328                 goto disable_dev;
3329         pci_set_master(pdev);
3330
3331         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3332         if (netdev == NULL) {
3333                 status = -ENOMEM;
3334                 goto rel_reg;
3335         }
3336         adapter = netdev_priv(netdev);
3337         adapter->pdev = pdev;
3338         pci_set_drvdata(pdev, adapter);
3339
3340         status = be_dev_family_check(adapter);
3341         if (status)
3342                 goto free_netdev;
3343
3344         adapter->netdev = netdev;
3345         SET_NETDEV_DEV(netdev, &pdev->dev);
3346
3347         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3348         if (!status) {
3349                 netdev->features |= NETIF_F_HIGHDMA;
3350         } else {
3351                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3352                 if (status) {
3353                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3354                         goto free_netdev;
3355                 }
3356         }
3357
3358         status = be_sriov_enable(adapter);
3359         if (status)
3360                 goto free_netdev;
3361
3362         status = be_ctrl_init(adapter);
3363         if (status)
3364                 goto disable_sriov;
3365
3366         if (lancer_chip(adapter)) {
3367                 status = lancer_test_and_set_rdy_state(adapter);
3368                 if (status) {
3369                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3370                         goto ctrl_clean;
3371                 }
3372         }
3373
3374         /* sync up with fw's ready state */
3375         if (be_physfn(adapter)) {
3376                 status = be_cmd_POST(adapter);
3377                 if (status)
3378                         goto ctrl_clean;
3379         }
3380
3381         /* tell fw we're ready to fire cmds */
3382         status = be_cmd_fw_init(adapter);
3383         if (status)
3384                 goto ctrl_clean;
3385
3386         status = be_cmd_reset_function(adapter);
3387         if (status)
3388                 goto ctrl_clean;
3389
3390         status = be_stats_init(adapter);
3391         if (status)
3392                 goto ctrl_clean;
3393
3394         status = be_get_config(adapter);
3395         if (status)
3396                 goto stats_clean;
3397
3398         /* The INTR bit may be set in the card when probed by a kdump kernel
3399          * after a crash.
3400          */
3401         if (!lancer_chip(adapter))
3402                 be_intr_set(adapter, false);
3403
3404         be_msix_enable(adapter);
3405
3406         INIT_DELAYED_WORK(&adapter->work, be_worker);
3407         adapter->rx_fc = adapter->tx_fc = true;
3408
3409         status = be_setup(adapter);
3410         if (status)
3411                 goto msix_disable;
3412
3413         be_netdev_init(netdev);
3414         status = register_netdev(netdev);
3415         if (status != 0)
3416                 goto unsetup;
3417
3418         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3419
3420         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3421         return 0;
3422
3423 unsetup:
3424         be_clear(adapter);
3425 msix_disable:
3426         be_msix_disable(adapter);
3427 stats_clean:
3428         be_stats_cleanup(adapter);
3429 ctrl_clean:
3430         be_ctrl_cleanup(adapter);
3431 disable_sriov:
3432         be_sriov_disable(adapter);
3433 free_netdev:
3434         free_netdev(netdev);
3435         pci_set_drvdata(pdev, NULL);
3436 rel_reg:
3437         pci_release_regions(pdev);
3438 disable_dev:
3439         pci_disable_device(pdev);
3440 do_none:
3441         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3442         return status;
3443 }
3444
3445 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3446 {
3447         struct be_adapter *adapter = pci_get_drvdata(pdev);
3448         struct net_device *netdev =  adapter->netdev;
3449
3450         cancel_delayed_work_sync(&adapter->work);
3451         if (adapter->wol)
3452                 be_setup_wol(adapter, true);
3453
3454         netif_device_detach(netdev);
3455         if (netif_running(netdev)) {
3456                 rtnl_lock();
3457                 be_close(netdev);
3458                 rtnl_unlock();
3459         }
3460         be_clear(adapter);
3461
3462         be_msix_disable(adapter);
3463         pci_save_state(pdev);
3464         pci_disable_device(pdev);
3465         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3466         return 0;
3467 }
3468
3469 static int be_resume(struct pci_dev *pdev)
3470 {
3471         int status = 0;
3472         struct be_adapter *adapter = pci_get_drvdata(pdev);
3473         struct net_device *netdev =  adapter->netdev;
3474
3475         netif_device_detach(netdev);
3476
3477         status = pci_enable_device(pdev);
3478         if (status)
3479                 return status;
3480
3481         pci_set_power_state(pdev, 0);
3482         pci_restore_state(pdev);
3483
3484         be_msix_enable(adapter);
3485         /* tell fw we're ready to fire cmds */
3486         status = be_cmd_fw_init(adapter);
3487         if (status)
3488                 return status;
3489
3490         be_setup(adapter);
3491         if (netif_running(netdev)) {
3492                 rtnl_lock();
3493                 be_open(netdev);
3494                 rtnl_unlock();
3495         }
3496         netif_device_attach(netdev);
3497
3498         if (adapter->wol)
3499                 be_setup_wol(adapter, false);
3500
3501         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3502         return 0;
3503 }
3504
3505 /*
3506  * An FLR will stop BE from DMAing any data.
3507  */
3508 static void be_shutdown(struct pci_dev *pdev)
3509 {
3510         struct be_adapter *adapter = pci_get_drvdata(pdev);
3511
3512         if (!adapter)
3513                 return;
3514
3515         cancel_delayed_work_sync(&adapter->work);
3516
3517         netif_device_detach(adapter->netdev);
3518
3519         if (adapter->wol)
3520                 be_setup_wol(adapter, true);
3521
3522         be_cmd_reset_function(adapter);
3523
3524         pci_disable_device(pdev);
3525 }
3526
3527 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3528                                 pci_channel_state_t state)
3529 {
3530         struct be_adapter *adapter = pci_get_drvdata(pdev);
3531         struct net_device *netdev =  adapter->netdev;
3532
3533         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3534
3535         adapter->eeh_err = true;
3536
3537         netif_device_detach(netdev);
3538
3539         if (netif_running(netdev)) {
3540                 rtnl_lock();
3541                 be_close(netdev);
3542                 rtnl_unlock();
3543         }
3544         be_clear(adapter);
3545
3546         if (state == pci_channel_io_perm_failure)
3547                 return PCI_ERS_RESULT_DISCONNECT;
3548
3549         pci_disable_device(pdev);
3550
3551         return PCI_ERS_RESULT_NEED_RESET;
3552 }
3553
3554 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3555 {
3556         struct be_adapter *adapter = pci_get_drvdata(pdev);
3557         int status;
3558
3559         dev_info(&adapter->pdev->dev, "EEH reset\n");
3560         adapter->eeh_err = false;
3561
3562         status = pci_enable_device(pdev);
3563         if (status)
3564                 return PCI_ERS_RESULT_DISCONNECT;
3565
3566         pci_set_master(pdev);
3567         pci_set_power_state(pdev, 0);
3568         pci_restore_state(pdev);
3569
3570         /* Check if card is ok and fw is ready */
3571         status = be_cmd_POST(adapter);
3572         if (status)
3573                 return PCI_ERS_RESULT_DISCONNECT;
3574
3575         return PCI_ERS_RESULT_RECOVERED;
3576 }
3577
3578 static void be_eeh_resume(struct pci_dev *pdev)
3579 {
3580         int status = 0;
3581         struct be_adapter *adapter = pci_get_drvdata(pdev);
3582         struct net_device *netdev =  adapter->netdev;
3583
3584         dev_info(&adapter->pdev->dev, "EEH resume\n");
3585
3586         pci_save_state(pdev);
3587
3588         /* tell fw we're ready to fire cmds */
3589         status = be_cmd_fw_init(adapter);
3590         if (status)
3591                 goto err;
3592
3593         status = be_setup(adapter);
3594         if (status)
3595                 goto err;
3596
3597         if (netif_running(netdev)) {
3598                 status = be_open(netdev);
3599                 if (status)
3600                         goto err;
3601         }
3602         netif_device_attach(netdev);
3603         return;
3604 err:
3605         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3606 }
3607
3608 static struct pci_error_handlers be_eeh_handlers = {
3609         .error_detected = be_eeh_err_detected,
3610         .slot_reset = be_eeh_reset,
3611         .resume = be_eeh_resume,
3612 };
3613
3614 static struct pci_driver be_driver = {
3615         .name = DRV_NAME,
3616         .id_table = be_dev_ids,
3617         .probe = be_probe,
3618         .remove = be_remove,
3619         .suspend = be_suspend,
3620         .resume = be_resume,
3621         .shutdown = be_shutdown,
3622         .err_handler = &be_eeh_handlers
3623 };
3624
3625 static int __init be_init_module(void)
3626 {
3627         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3628             rx_frag_size != 2048) {
3629                 printk(KERN_WARNING DRV_NAME
3630                         " : Module param rx_frag_size must be 2048/4096/8192."
3631                         " Using 2048\n");
3632                 rx_frag_size = 2048;
3633         }
3634
3635         return pci_register_driver(&be_driver);
3636 }
3637 module_init(be_init_module);
3638
3639 static void __exit be_exit_module(void)
3640 {
3641         pci_unregister_driver(&be_driver);
3642 }
3643 module_exit(be_exit_module);