]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
d32e3787beb4799638efcef0e5685f3313acea2e
[mv-sheeva.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u32 reg, enabled;
145
146         if (adapter->eeh_err)
147                 return;
148
149         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150                                 &reg);
151         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         pci_write_config_dword(adapter->pdev,
161                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
162 }
163
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
165 {
166         u32 val = 0;
167         val |= qid & DB_RQ_RING_ID_MASK;
168         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
169
170         wmb();
171         iowrite32(val, adapter->db + DB_RQ_OFFSET);
172 }
173
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176         u32 val = 0;
177         val |= qid & DB_TXULP_RING_ID_MASK;
178         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
179
180         wmb();
181         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
182 }
183
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185                 bool arm, bool clear_int, u16 num_popped)
186 {
187         u32 val = 0;
188         val |= qid & DB_EQ_RING_ID_MASK;
189         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
210
211         if (adapter->eeh_err)
212                 return;
213
214         if (arm)
215                 val |= 1 << DB_CQ_REARM_SHIFT;
216         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217         iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 }
219
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 {
222         struct be_adapter *adapter = netdev_priv(netdev);
223         struct sockaddr *addr = p;
224         int status = 0;
225
226         if (!is_valid_ether_addr(addr->sa_data))
227                 return -EADDRNOTAVAIL;
228
229         /* MAC addr configuration will be done in hardware for VFs
230          * by their corresponding PFs. Just copy to netdev addr here
231          */
232         if (!be_physfn(adapter))
233                 goto netdev_addr;
234
235         status = be_cmd_pmac_del(adapter, adapter->if_handle,
236                                 adapter->pmac_id, 0);
237         if (status)
238                 return status;
239
240         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241                                 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243         if (!status)
244                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246         return status;
247 }
248
249 static void populate_be2_stats(struct be_adapter *adapter)
250 {
251         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254         struct be_port_rxf_stats_v0 *port_stats =
255                                         &rxf_stats->port[adapter->port_num];
256         struct be_drv_stats *drvs = &adapter->drv_stats;
257
258         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259         drvs->rx_pause_frames = port_stats->rx_pause_frames;
260         drvs->rx_crc_errors = port_stats->rx_crc_errors;
261         drvs->rx_control_frames = port_stats->rx_control_frames;
262         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274         drvs->rx_dropped_header_too_small =
275                 port_stats->rx_dropped_header_too_small;
276         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277         drvs->rx_alignment_symbol_errors =
278                 port_stats->rx_alignment_symbol_errors;
279
280         drvs->tx_pauseframes = port_stats->tx_pauseframes;
281         drvs->tx_controlframes = port_stats->tx_controlframes;
282
283         if (adapter->port_num)
284                 drvs->jabber_events = rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events = rxf_stats->port0_jabber_events;
287         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291         drvs->forwarded_packets = rxf_stats->forwarded_packets;
292         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296 }
297
298 static void populate_be3_stats(struct be_adapter *adapter)
299 {
300         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303         struct be_port_rxf_stats_v1 *port_stats =
304                                         &rxf_stats->port[adapter->port_num];
305         struct be_drv_stats *drvs = &adapter->drv_stats;
306
307         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308         drvs->rx_pause_frames = port_stats->rx_pause_frames;
309         drvs->rx_crc_errors = port_stats->rx_crc_errors;
310         drvs->rx_control_frames = port_stats->rx_control_frames;
311         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321         drvs->rx_dropped_header_too_small =
322                 port_stats->rx_dropped_header_too_small;
323         drvs->rx_input_fifo_overflow_drop =
324                 port_stats->rx_input_fifo_overflow_drop;
325         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326         drvs->rx_alignment_symbol_errors =
327                 port_stats->rx_alignment_symbol_errors;
328         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329         drvs->tx_pauseframes = port_stats->tx_pauseframes;
330         drvs->tx_controlframes = port_stats->tx_controlframes;
331         drvs->jabber_events = port_stats->jabber_events;
332         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336         drvs->forwarded_packets = rxf_stats->forwarded_packets;
337         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341 }
342
343 static void populate_lancer_stats(struct be_adapter *adapter)
344 {
345
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347         struct lancer_pport_stats *pport_stats =
348                                         pport_stats_from_cmd(adapter);
349
350         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360         drvs->rx_dropped_tcp_length =
361                                 pport_stats->rx_dropped_invalid_tcp_length;
362         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365         drvs->rx_dropped_header_too_small =
366                                 pport_stats->rx_dropped_header_too_small;
367         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373         drvs->jabber_events = pport_stats->rx_jabbers;
374         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375         drvs->forwarded_packets = pport_stats->num_forwards_lo;
376         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377         drvs->rx_drops_too_many_frags =
378                                 pport_stats->rx_drops_too_many_frags_lo;
379 }
380
381 static void accumulate_16bit_val(u32 *acc, u16 val)
382 {
383 #define lo(x)                   (x & 0xFFFF)
384 #define hi(x)                   (x & 0xFFFF0000)
385         bool wrapped = val < lo(*acc);
386         u32 newacc = hi(*acc) + val;
387
388         if (wrapped)
389                 newacc += 65536;
390         ACCESS_ONCE(*acc) = newacc;
391 }
392
393 void be_parse_stats(struct be_adapter *adapter)
394 {
395         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396         struct be_rx_obj *rxo;
397         int i;
398
399         if (adapter->generation == BE_GEN3) {
400                 if (lancer_chip(adapter))
401                         populate_lancer_stats(adapter);
402                  else
403                         populate_be3_stats(adapter);
404         } else {
405                 populate_be2_stats(adapter);
406         }
407
408         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409         for_all_rx_queues(adapter, rxo, i) {
410                 /* below erx HW counter can actually wrap around after
411                  * 65535. Driver accumulates a 32-bit value
412                  */
413                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415         }
416 }
417
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419                                         struct rtnl_link_stats64 *stats)
420 {
421         struct be_adapter *adapter = netdev_priv(netdev);
422         struct be_drv_stats *drvs = &adapter->drv_stats;
423         struct be_rx_obj *rxo;
424         struct be_tx_obj *txo;
425         u64 pkts, bytes;
426         unsigned int start;
427         int i;
428
429         for_all_rx_queues(adapter, rxo, i) {
430                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431                 do {
432                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433                         pkts = rx_stats(rxo)->rx_pkts;
434                         bytes = rx_stats(rxo)->rx_bytes;
435                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436                 stats->rx_packets += pkts;
437                 stats->rx_bytes += bytes;
438                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440                                         rx_stats(rxo)->rx_drops_no_frags;
441         }
442
443         for_all_tx_queues(adapter, txo, i) {
444                 const struct be_tx_stats *tx_stats = tx_stats(txo);
445                 do {
446                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447                         pkts = tx_stats(txo)->tx_pkts;
448                         bytes = tx_stats(txo)->tx_bytes;
449                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450                 stats->tx_packets += pkts;
451                 stats->tx_bytes += bytes;
452         }
453
454         /* bad pkts received */
455         stats->rx_errors = drvs->rx_crc_errors +
456                 drvs->rx_alignment_symbol_errors +
457                 drvs->rx_in_range_errors +
458                 drvs->rx_out_range_errors +
459                 drvs->rx_frame_too_long +
460                 drvs->rx_dropped_too_small +
461                 drvs->rx_dropped_too_short +
462                 drvs->rx_dropped_header_too_small +
463                 drvs->rx_dropped_tcp_length +
464                 drvs->rx_dropped_runt;
465
466         /* detailed rx errors */
467         stats->rx_length_errors = drvs->rx_in_range_errors +
468                 drvs->rx_out_range_errors +
469                 drvs->rx_frame_too_long;
470
471         stats->rx_crc_errors = drvs->rx_crc_errors;
472
473         /* frame alignment errors */
474         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
475
476         /* receiver fifo overrun */
477         /* drops_no_pbuf is no per i/f, it's per BE card */
478         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479                                 drvs->rx_input_fifo_overflow_drop +
480                                 drvs->rx_drops_no_pbuf;
481         return stats;
482 }
483
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
485 {
486         struct net_device *netdev = adapter->netdev;
487
488         /* when link status changes, link speed must be re-queried from card */
489         adapter->link_speed = -1;
490         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491                 netif_carrier_on(netdev);
492                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493         } else {
494                 netif_carrier_off(netdev);
495                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
496         }
497 }
498
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
501 {
502         struct be_tx_stats *stats = tx_stats(txo);
503
504         u64_stats_update_begin(&stats->sync);
505         stats->tx_reqs++;
506         stats->tx_wrbs += wrb_cnt;
507         stats->tx_bytes += copied;
508         stats->tx_pkts += (gso_segs ? gso_segs : 1);
509         if (stopped)
510                 stats->tx_stops++;
511         u64_stats_update_end(&stats->sync);
512 }
513
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516                                                                 bool *dummy)
517 {
518         int cnt = (skb->len > skb->data_len);
519
520         cnt += skb_shinfo(skb)->nr_frags;
521
522         /* to account for hdr wrb */
523         cnt++;
524         if (lancer_chip(adapter) || !(cnt & 1)) {
525                 *dummy = false;
526         } else {
527                 /* add a dummy to make it an even num */
528                 cnt++;
529                 *dummy = true;
530         }
531         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532         return cnt;
533 }
534
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536 {
537         wrb->frag_pa_hi = upper_32_bits(addr);
538         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540 }
541
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
544 {
545         u8 vlan_prio = 0;
546         u16 vlan_tag = 0;
547
548         memset(hdr, 0, sizeof(*hdr));
549
550         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
552         if (skb_is_gso(skb)) {
553                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555                         hdr, skb_shinfo(skb)->gso_size);
556                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558                 if (lancer_chip(adapter) && adapter->sli_family  ==
559                                                         LANCER_A0_SLI_FAMILY) {
560                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561                         if (is_tcp_pkt(skb))
562                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563                                                                 tcpcs, hdr, 1);
564                         else if (is_udp_pkt(skb))
565                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566                                                                 udpcs, hdr, 1);
567                 }
568         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569                 if (is_tcp_pkt(skb))
570                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571                 else if (is_udp_pkt(skb))
572                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573         }
574
575         if (vlan_tx_tag_present(skb)) {
576                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577                 vlan_tag = vlan_tx_tag_get(skb);
578                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579                 /* If vlan priority provided by OS is NOT in available bmap */
580                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582                                         adapter->recommended_prio;
583                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
584         }
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590 }
591
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593                 bool unmap_single)
594 {
595         dma_addr_t dma;
596
597         be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600         if (wrb->frag_len) {
601                 if (unmap_single)
602                         dma_unmap_single(dev, dma, wrb->frag_len,
603                                          DMA_TO_DEVICE);
604                 else
605                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
606         }
607 }
608
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611 {
612         dma_addr_t busaddr;
613         int i, copied = 0;
614         struct device *dev = &adapter->pdev->dev;
615         struct sk_buff *first_skb = skb;
616         struct be_eth_wrb *wrb;
617         struct be_eth_hdr_wrb *hdr;
618         bool map_single = false;
619         u16 map_head;
620
621         hdr = queue_head_node(txq);
622         queue_head_inc(txq);
623         map_head = txq->head;
624
625         if (skb->len > skb->data_len) {
626                 int len = skb_headlen(skb);
627                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628                 if (dma_mapping_error(dev, busaddr))
629                         goto dma_err;
630                 map_single = true;
631                 wrb = queue_head_node(txq);
632                 wrb_fill(wrb, busaddr, len);
633                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634                 queue_head_inc(txq);
635                 copied += len;
636         }
637
638         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639                 const struct skb_frag_struct *frag =
640                         &skb_shinfo(skb)->frags[i];
641                 busaddr = skb_frag_dma_map(dev, frag, 0,
642                                            skb_frag_size(frag), DMA_TO_DEVICE);
643                 if (dma_mapping_error(dev, busaddr))
644                         goto dma_err;
645                 wrb = queue_head_node(txq);
646                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648                 queue_head_inc(txq);
649                 copied += skb_frag_size(frag);
650         }
651
652         if (dummy_wrb) {
653                 wrb = queue_head_node(txq);
654                 wrb_fill(wrb, 0, 0);
655                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656                 queue_head_inc(txq);
657         }
658
659         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660         be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662         return copied;
663 dma_err:
664         txq->head = map_head;
665         while (copied) {
666                 wrb = queue_head_node(txq);
667                 unmap_tx_frag(dev, wrb, map_single);
668                 map_single = false;
669                 copied -= wrb->frag_len;
670                 queue_head_inc(txq);
671         }
672         return 0;
673 }
674
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676                         struct net_device *netdev)
677 {
678         struct be_adapter *adapter = netdev_priv(netdev);
679         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680         struct be_queue_info *txq = &txo->q;
681         u32 wrb_cnt = 0, copied = 0;
682         u32 start = txq->head;
683         bool dummy_wrb, stopped = false;
684
685         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
686
687         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688         if (copied) {
689                 /* record the sent skb in the sent_skb table */
690                 BUG_ON(txo->sent_skb_list[start]);
691                 txo->sent_skb_list[start] = skb;
692
693                 /* Ensure txq has space for the next skb; Else stop the queue
694                  * *BEFORE* ringing the tx doorbell, so that we serialze the
695                  * tx compls of the current transmit which'll wake up the queue
696                  */
697                 atomic_add(wrb_cnt, &txq->used);
698                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699                                                                 txq->len) {
700                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701                         stopped = true;
702                 }
703
704                 be_txq_notify(adapter, txq->id, wrb_cnt);
705
706                 be_tx_stats_update(txo, wrb_cnt, copied,
707                                 skb_shinfo(skb)->gso_segs, stopped);
708         } else {
709                 txq->head = start;
710                 dev_kfree_skb_any(skb);
711         }
712         return NETDEV_TX_OK;
713 }
714
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
716 {
717         struct be_adapter *adapter = netdev_priv(netdev);
718         if (new_mtu < BE_MIN_MTU ||
719                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720                                         (ETH_HLEN + ETH_FCS_LEN))) {
721                 dev_info(&adapter->pdev->dev,
722                         "MTU must be between %d and %d bytes\n",
723                         BE_MIN_MTU,
724                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725                 return -EINVAL;
726         }
727         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728                         netdev->mtu, new_mtu);
729         netdev->mtu = new_mtu;
730         return 0;
731 }
732
733 /*
734  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735  * If the user configures more, place BE in vlan promiscuous mode.
736  */
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
738 {
739         u16 vtag[BE_NUM_VLANS_SUPPORTED];
740         u16 ntags = 0, i;
741         int status = 0;
742         u32 if_handle;
743
744         if (vf) {
745                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748         }
749
750         /* No need to further configure vids if in promiscuous mode */
751         if (adapter->promiscuous)
752                 return 0;
753
754         if (adapter->vlans_added <= adapter->max_vlans)  {
755                 /* Construct VLAN Table to give to HW */
756                 for (i = 0; i < VLAN_N_VID; i++) {
757                         if (adapter->vlan_tag[i]) {
758                                 vtag[ntags] = cpu_to_le16(i);
759                                 ntags++;
760                         }
761                 }
762                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763                                         vtag, ntags, 1, 0);
764         } else {
765                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766                                         NULL, 0, 1, 1);
767         }
768
769         return status;
770 }
771
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773 {
774         struct be_adapter *adapter = netdev_priv(netdev);
775
776         adapter->vlans_added++;
777         if (!be_physfn(adapter))
778                 return;
779
780         adapter->vlan_tag[vid] = 1;
781         if (adapter->vlans_added <= (adapter->max_vlans + 1))
782                 be_vid_config(adapter, false, 0);
783 }
784
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786 {
787         struct be_adapter *adapter = netdev_priv(netdev);
788
789         adapter->vlans_added--;
790
791         if (!be_physfn(adapter))
792                 return;
793
794         adapter->vlan_tag[vid] = 0;
795         if (adapter->vlans_added <= adapter->max_vlans)
796                 be_vid_config(adapter, false, 0);
797 }
798
799 static void be_set_rx_mode(struct net_device *netdev)
800 {
801         struct be_adapter *adapter = netdev_priv(netdev);
802
803         if (netdev->flags & IFF_PROMISC) {
804                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805                 adapter->promiscuous = true;
806                 goto done;
807         }
808
809         /* BE was previously in promiscuous mode; disable it */
810         if (adapter->promiscuous) {
811                 adapter->promiscuous = false;
812                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
813
814                 if (adapter->vlans_added)
815                         be_vid_config(adapter, false, 0);
816         }
817
818         /* Enable multicast promisc if num configured exceeds what we support */
819         if (netdev->flags & IFF_ALLMULTI ||
820                         netdev_mc_count(netdev) > BE_MAX_MC) {
821                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822                 goto done;
823         }
824
825         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827         return;
828 }
829
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831 {
832         struct be_adapter *adapter = netdev_priv(netdev);
833         int status;
834
835         if (!adapter->sriov_enabled)
836                 return -EPERM;
837
838         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839                 return -EINVAL;
840
841         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842                 status = be_cmd_pmac_del(adapter,
843                                         adapter->vf_cfg[vf].vf_if_handle,
844                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
845
846         status = be_cmd_pmac_add(adapter, mac,
847                                 adapter->vf_cfg[vf].vf_if_handle,
848                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
849
850         if (status)
851                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852                                 mac, vf);
853         else
854                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
856         return status;
857 }
858
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860                         struct ifla_vf_info *vi)
861 {
862         struct be_adapter *adapter = netdev_priv(netdev);
863
864         if (!adapter->sriov_enabled)
865                 return -EPERM;
866
867         if (vf >= num_vfs)
868                 return -EINVAL;
869
870         vi->vf = vf;
871         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873         vi->qos = 0;
874         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876         return 0;
877 }
878
879 static int be_set_vf_vlan(struct net_device *netdev,
880                         int vf, u16 vlan, u8 qos)
881 {
882         struct be_adapter *adapter = netdev_priv(netdev);
883         int status = 0;
884
885         if (!adapter->sriov_enabled)
886                 return -EPERM;
887
888         if ((vf >= num_vfs) || (vlan > 4095))
889                 return -EINVAL;
890
891         if (vlan) {
892                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893                 adapter->vlans_added++;
894         } else {
895                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896                 adapter->vlans_added--;
897         }
898
899         status = be_vid_config(adapter, true, vf);
900
901         if (status)
902                 dev_info(&adapter->pdev->dev,
903                                 "VLAN %d config on VF %d failed\n", vlan, vf);
904         return status;
905 }
906
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908                         int vf, int rate)
909 {
910         struct be_adapter *adapter = netdev_priv(netdev);
911         int status = 0;
912
913         if (!adapter->sriov_enabled)
914                 return -EPERM;
915
916         if ((vf >= num_vfs) || (rate < 0))
917                 return -EINVAL;
918
919         if (rate > 10000)
920                 rate = 10000;
921
922         adapter->vf_cfg[vf].vf_tx_rate = rate;
923         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
924
925         if (status)
926                 dev_info(&adapter->pdev->dev,
927                                 "tx rate %d on VF %d failed\n", rate, vf);
928         return status;
929 }
930
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
932 {
933         struct be_eq_obj *rx_eq = &rxo->rx_eq;
934         struct be_rx_stats *stats = rx_stats(rxo);
935         ulong now = jiffies;
936         ulong delta = now - stats->rx_jiffies;
937         u64 pkts;
938         unsigned int start, eqd;
939
940         if (!rx_eq->enable_aic)
941                 return;
942
943         /* Wrapped around */
944         if (time_before(now, stats->rx_jiffies)) {
945                 stats->rx_jiffies = now;
946                 return;
947         }
948
949         /* Update once a second */
950         if (delta < HZ)
951                 return;
952
953         do {
954                 start = u64_stats_fetch_begin_bh(&stats->sync);
955                 pkts = stats->rx_pkts;
956         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
958         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959         stats->rx_pkts_prev = pkts;
960         stats->rx_jiffies = now;
961         eqd = stats->rx_pps / 110000;
962         eqd = eqd << 3;
963         if (eqd > rx_eq->max_eqd)
964                 eqd = rx_eq->max_eqd;
965         if (eqd < rx_eq->min_eqd)
966                 eqd = rx_eq->min_eqd;
967         if (eqd < 10)
968                 eqd = 0;
969         if (eqd != rx_eq->cur_eqd) {
970                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971                 rx_eq->cur_eqd = eqd;
972         }
973 }
974
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976                 struct be_rx_compl_info *rxcp)
977 {
978         struct be_rx_stats *stats = rx_stats(rxo);
979
980         u64_stats_update_begin(&stats->sync);
981         stats->rx_compl++;
982         stats->rx_bytes += rxcp->pkt_size;
983         stats->rx_pkts++;
984         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985                 stats->rx_mcast_pkts++;
986         if (rxcp->err)
987                 stats->rx_compl_err++;
988         u64_stats_update_end(&stats->sync);
989 }
990
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
992 {
993         /* L4 checksum is not reliable for non TCP/UDP packets.
994          * Also ignore ipcksm for ipv6 pkts */
995         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996                                 (rxcp->ip_csum || rxcp->ipv6);
997 }
998
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001                 struct be_rx_obj *rxo,
1002                 u16 frag_idx)
1003 {
1004         struct be_rx_page_info *rx_page_info;
1005         struct be_queue_info *rxq = &rxo->q;
1006
1007         rx_page_info = &rxo->page_info_tbl[frag_idx];
1008         BUG_ON(!rx_page_info->page);
1009
1010         if (rx_page_info->last_page_user) {
1011                 dma_unmap_page(&adapter->pdev->dev,
1012                                dma_unmap_addr(rx_page_info, bus),
1013                                adapter->big_page_size, DMA_FROM_DEVICE);
1014                 rx_page_info->last_page_user = false;
1015         }
1016
1017         atomic_dec(&rxq->used);
1018         return rx_page_info;
1019 }
1020
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023                 struct be_rx_obj *rxo,
1024                 struct be_rx_compl_info *rxcp)
1025 {
1026         struct be_queue_info *rxq = &rxo->q;
1027         struct be_rx_page_info *page_info;
1028         u16 i, num_rcvd = rxcp->num_rcvd;
1029
1030         for (i = 0; i < num_rcvd; i++) {
1031                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032                 put_page(page_info->page);
1033                 memset(page_info, 0, sizeof(*page_info));
1034                 index_inc(&rxcp->rxq_idx, rxq->len);
1035         }
1036 }
1037
1038 /*
1039  * skb_fill_rx_data forms a complete skb for an ether frame
1040  * indicated by rxcp.
1041  */
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1044 {
1045         struct be_queue_info *rxq = &rxo->q;
1046         struct be_rx_page_info *page_info;
1047         u16 i, j;
1048         u16 hdr_len, curr_frag_len, remaining;
1049         u8 *start;
1050
1051         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052         start = page_address(page_info->page) + page_info->page_offset;
1053         prefetch(start);
1054
1055         /* Copy data in the first descriptor of this completion */
1056         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1057
1058         /* Copy the header portion into skb_data */
1059         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060         memcpy(skb->data, start, hdr_len);
1061         skb->len = curr_frag_len;
1062         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063                 /* Complete packet has now been moved to data */
1064                 put_page(page_info->page);
1065                 skb->data_len = 0;
1066                 skb->tail += curr_frag_len;
1067         } else {
1068                 skb_shinfo(skb)->nr_frags = 1;
1069                 skb_frag_set_page(skb, 0, page_info->page);
1070                 skb_shinfo(skb)->frags[0].page_offset =
1071                                         page_info->page_offset + hdr_len;
1072                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073                 skb->data_len = curr_frag_len - hdr_len;
1074                 skb->truesize += rx_frag_size;
1075                 skb->tail += hdr_len;
1076         }
1077         page_info->page = NULL;
1078
1079         if (rxcp->pkt_size <= rx_frag_size) {
1080                 BUG_ON(rxcp->num_rcvd != 1);
1081                 return;
1082         }
1083
1084         /* More frags present for this completion */
1085         index_inc(&rxcp->rxq_idx, rxq->len);
1086         remaining = rxcp->pkt_size - curr_frag_len;
1087         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089                 curr_frag_len = min(remaining, rx_frag_size);
1090
1091                 /* Coalesce all frags from the same physical page in one slot */
1092                 if (page_info->page_offset == 0) {
1093                         /* Fresh page */
1094                         j++;
1095                         skb_frag_set_page(skb, j, page_info->page);
1096                         skb_shinfo(skb)->frags[j].page_offset =
1097                                                         page_info->page_offset;
1098                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099                         skb_shinfo(skb)->nr_frags++;
1100                 } else {
1101                         put_page(page_info->page);
1102                 }
1103
1104                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105                 skb->len += curr_frag_len;
1106                 skb->data_len += curr_frag_len;
1107                 skb->truesize += rx_frag_size;
1108                 remaining -= curr_frag_len;
1109                 index_inc(&rxcp->rxq_idx, rxq->len);
1110                 page_info->page = NULL;
1111         }
1112         BUG_ON(j > MAX_SKB_FRAGS);
1113 }
1114
1115 /* Process the RX completion indicated by rxcp when GRO is disabled */
1116 static void be_rx_compl_process(struct be_adapter *adapter,
1117                         struct be_rx_obj *rxo,
1118                         struct be_rx_compl_info *rxcp)
1119 {
1120         struct net_device *netdev = adapter->netdev;
1121         struct sk_buff *skb;
1122
1123         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1124         if (unlikely(!skb)) {
1125                 rx_stats(rxo)->rx_drops_no_skbs++;
1126                 be_rx_compl_discard(adapter, rxo, rxcp);
1127                 return;
1128         }
1129
1130         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1131
1132         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1133                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134         else
1135                 skb_checksum_none_assert(skb);
1136
1137         skb->protocol = eth_type_trans(skb, netdev);
1138         if (adapter->netdev->features & NETIF_F_RXHASH)
1139                 skb->rxhash = rxcp->rss_hash;
1140
1141
1142         if (rxcp->vlanf)
1143                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145         netif_receive_skb(skb);
1146 }
1147
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150                 struct be_rx_obj *rxo,
1151                 struct be_rx_compl_info *rxcp)
1152 {
1153         struct be_rx_page_info *page_info;
1154         struct sk_buff *skb = NULL;
1155         struct be_queue_info *rxq = &rxo->q;
1156         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1157         u16 remaining, curr_frag_len;
1158         u16 i, j;
1159
1160         skb = napi_get_frags(&eq_obj->napi);
1161         if (!skb) {
1162                 be_rx_compl_discard(adapter, rxo, rxcp);
1163                 return;
1164         }
1165
1166         remaining = rxcp->pkt_size;
1167         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1169
1170                 curr_frag_len = min(remaining, rx_frag_size);
1171
1172                 /* Coalesce all frags from the same physical page in one slot */
1173                 if (i == 0 || page_info->page_offset == 0) {
1174                         /* First frag or Fresh page */
1175                         j++;
1176                         skb_frag_set_page(skb, j, page_info->page);
1177                         skb_shinfo(skb)->frags[j].page_offset =
1178                                                         page_info->page_offset;
1179                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180                 } else {
1181                         put_page(page_info->page);
1182                 }
1183                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184                 skb->truesize += rx_frag_size;
1185                 remaining -= curr_frag_len;
1186                 index_inc(&rxcp->rxq_idx, rxq->len);
1187                 memset(page_info, 0, sizeof(*page_info));
1188         }
1189         BUG_ON(j > MAX_SKB_FRAGS);
1190
1191         skb_shinfo(skb)->nr_frags = j + 1;
1192         skb->len = rxcp->pkt_size;
1193         skb->data_len = rxcp->pkt_size;
1194         skb->ip_summed = CHECKSUM_UNNECESSARY;
1195         if (adapter->netdev->features & NETIF_F_RXHASH)
1196                 skb->rxhash = rxcp->rss_hash;
1197
1198         if (rxcp->vlanf)
1199                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1200
1201         napi_gro_frags(&eq_obj->napi);
1202 }
1203
1204 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205                                 struct be_eth_rx_compl *compl,
1206                                 struct be_rx_compl_info *rxcp)
1207 {
1208         rxcp->pkt_size =
1209                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1213         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1214         rxcp->ip_csum =
1215                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216         rxcp->l4_csum =
1217                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218         rxcp->ipv6 =
1219                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220         rxcp->rxq_idx =
1221                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222         rxcp->num_rcvd =
1223                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224         rxcp->pkt_type =
1225                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1226         rxcp->rss_hash =
1227                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1228         if (rxcp->vlanf) {
1229                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1230                                           compl);
1231                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232                                                compl);
1233         }
1234         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1235 }
1236
1237 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238                                 struct be_eth_rx_compl *compl,
1239                                 struct be_rx_compl_info *rxcp)
1240 {
1241         rxcp->pkt_size =
1242                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1246         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1247         rxcp->ip_csum =
1248                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249         rxcp->l4_csum =
1250                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251         rxcp->ipv6 =
1252                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253         rxcp->rxq_idx =
1254                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255         rxcp->num_rcvd =
1256                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257         rxcp->pkt_type =
1258                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1259         rxcp->rss_hash =
1260                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1261         if (rxcp->vlanf) {
1262                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1263                                           compl);
1264                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265                                                compl);
1266         }
1267         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1268 }
1269
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271 {
1272         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274         struct be_adapter *adapter = rxo->adapter;
1275
1276         /* For checking the valid bit it is Ok to use either definition as the
1277          * valid bit is at the same position in both v0 and v1 Rx compl */
1278         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279                 return NULL;
1280
1281         rmb();
1282         be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284         if (adapter->be3_native)
1285                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286         else
1287                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
1289         if (rxcp->vlanf) {
1290                 /* vlanf could be wrongly set in some cards.
1291                  * ignore if vtm is not set */
1292                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293                         rxcp->vlanf = 0;
1294
1295                 if (!lancer_chip(adapter))
1296                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1297
1298                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1299                     !adapter->vlan_tag[rxcp->vlan_tag])
1300                         rxcp->vlanf = 0;
1301         }
1302
1303         /* As the compl has been parsed, reset it; we wont touch it again */
1304         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1305
1306         queue_tail_inc(&rxo->cq);
1307         return rxcp;
1308 }
1309
1310 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1311 {
1312         u32 order = get_order(size);
1313
1314         if (order > 0)
1315                 gfp |= __GFP_COMP;
1316         return  alloc_pages(gfp, order);
1317 }
1318
1319 /*
1320  * Allocate a page, split it to fragments of size rx_frag_size and post as
1321  * receive buffers to BE
1322  */
1323 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1324 {
1325         struct be_adapter *adapter = rxo->adapter;
1326         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1327         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1328         struct be_queue_info *rxq = &rxo->q;
1329         struct page *pagep = NULL;
1330         struct be_eth_rx_d *rxd;
1331         u64 page_dmaaddr = 0, frag_dmaaddr;
1332         u32 posted, page_offset = 0;
1333
1334         page_info = &rxo->page_info_tbl[rxq->head];
1335         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336                 if (!pagep) {
1337                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1338                         if (unlikely(!pagep)) {
1339                                 rx_stats(rxo)->rx_post_fail++;
1340                                 break;
1341                         }
1342                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343                                                     0, adapter->big_page_size,
1344                                                     DMA_FROM_DEVICE);
1345                         page_info->page_offset = 0;
1346                 } else {
1347                         get_page(pagep);
1348                         page_info->page_offset = page_offset + rx_frag_size;
1349                 }
1350                 page_offset = page_info->page_offset;
1351                 page_info->page = pagep;
1352                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1353                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1354
1355                 rxd = queue_head_node(rxq);
1356                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1358
1359                 /* Any space left in the current big page for another frag? */
1360                 if ((page_offset + rx_frag_size + rx_frag_size) >
1361                                         adapter->big_page_size) {
1362                         pagep = NULL;
1363                         page_info->last_page_user = true;
1364                 }
1365
1366                 prev_page_info = page_info;
1367                 queue_head_inc(rxq);
1368                 page_info = &page_info_tbl[rxq->head];
1369         }
1370         if (pagep)
1371                 prev_page_info->last_page_user = true;
1372
1373         if (posted) {
1374                 atomic_add(posted, &rxq->used);
1375                 be_rxq_notify(adapter, rxq->id, posted);
1376         } else if (atomic_read(&rxq->used) == 0) {
1377                 /* Let be_worker replenish when memory is available */
1378                 rxo->rx_post_starved = true;
1379         }
1380 }
1381
1382 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1383 {
1384         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1385
1386         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387                 return NULL;
1388
1389         rmb();
1390         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1391
1392         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1393
1394         queue_tail_inc(tx_cq);
1395         return txcp;
1396 }
1397
1398 static u16 be_tx_compl_process(struct be_adapter *adapter,
1399                 struct be_tx_obj *txo, u16 last_index)
1400 {
1401         struct be_queue_info *txq = &txo->q;
1402         struct be_eth_wrb *wrb;
1403         struct sk_buff **sent_skbs = txo->sent_skb_list;
1404         struct sk_buff *sent_skb;
1405         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406         bool unmap_skb_hdr = true;
1407
1408         sent_skb = sent_skbs[txq->tail];
1409         BUG_ON(!sent_skb);
1410         sent_skbs[txq->tail] = NULL;
1411
1412         /* skip header wrb */
1413         queue_tail_inc(txq);
1414
1415         do {
1416                 cur_index = txq->tail;
1417                 wrb = queue_tail_node(txq);
1418                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1420                 unmap_skb_hdr = false;
1421
1422                 num_wrbs++;
1423                 queue_tail_inc(txq);
1424         } while (cur_index != last_index);
1425
1426         kfree_skb(sent_skb);
1427         return num_wrbs;
1428 }
1429
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431 {
1432         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434         if (!eqe->evt)
1435                 return NULL;
1436
1437         rmb();
1438         eqe->evt = le32_to_cpu(eqe->evt);
1439         queue_tail_inc(&eq_obj->q);
1440         return eqe;
1441 }
1442
1443 static int event_handle(struct be_adapter *adapter,
1444                         struct be_eq_obj *eq_obj,
1445                         bool rearm)
1446 {
1447         struct be_eq_entry *eqe;
1448         u16 num = 0;
1449
1450         while ((eqe = event_get(eq_obj)) != NULL) {
1451                 eqe->evt = 0;
1452                 num++;
1453         }
1454
1455         /* Deal with any spurious interrupts that come
1456          * without events
1457          */
1458         if (!num)
1459                 rearm = true;
1460
1461         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1462         if (num)
1463                 napi_schedule(&eq_obj->napi);
1464
1465         return num;
1466 }
1467
1468 /* Just read and notify events without processing them.
1469  * Used at the time of destroying event queues */
1470 static void be_eq_clean(struct be_adapter *adapter,
1471                         struct be_eq_obj *eq_obj)
1472 {
1473         struct be_eq_entry *eqe;
1474         u16 num = 0;
1475
1476         while ((eqe = event_get(eq_obj)) != NULL) {
1477                 eqe->evt = 0;
1478                 num++;
1479         }
1480
1481         if (num)
1482                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1483 }
1484
1485 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1486 {
1487         struct be_rx_page_info *page_info;
1488         struct be_queue_info *rxq = &rxo->q;
1489         struct be_queue_info *rx_cq = &rxo->cq;
1490         struct be_rx_compl_info *rxcp;
1491         u16 tail;
1492
1493         /* First cleanup pending rx completions */
1494         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495                 be_rx_compl_discard(adapter, rxo, rxcp);
1496                 be_cq_notify(adapter, rx_cq->id, false, 1);
1497         }
1498
1499         /* Then free posted rx buffer that were not used */
1500         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1501         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1502                 page_info = get_rx_page_info(adapter, rxo, tail);
1503                 put_page(page_info->page);
1504                 memset(page_info, 0, sizeof(*page_info));
1505         }
1506         BUG_ON(atomic_read(&rxq->used));
1507         rxq->tail = rxq->head = 0;
1508 }
1509
1510 static void be_tx_compl_clean(struct be_adapter *adapter,
1511                                 struct be_tx_obj *txo)
1512 {
1513         struct be_queue_info *tx_cq = &txo->cq;
1514         struct be_queue_info *txq = &txo->q;
1515         struct be_eth_tx_compl *txcp;
1516         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1517         struct sk_buff **sent_skbs = txo->sent_skb_list;
1518         struct sk_buff *sent_skb;
1519         bool dummy_wrb;
1520
1521         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522         do {
1523                 while ((txcp = be_tx_compl_get(tx_cq))) {
1524                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525                                         wrb_index, txcp);
1526                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1527                         cmpl++;
1528                 }
1529                 if (cmpl) {
1530                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1531                         atomic_sub(num_wrbs, &txq->used);
1532                         cmpl = 0;
1533                         num_wrbs = 0;
1534                 }
1535
1536                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537                         break;
1538
1539                 mdelay(1);
1540         } while (true);
1541
1542         if (atomic_read(&txq->used))
1543                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544                         atomic_read(&txq->used));
1545
1546         /* free posted tx for which compls will never arrive */
1547         while (atomic_read(&txq->used)) {
1548                 sent_skb = sent_skbs[txq->tail];
1549                 end_idx = txq->tail;
1550                 index_adv(&end_idx,
1551                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552                         txq->len);
1553                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1554                 atomic_sub(num_wrbs, &txq->used);
1555         }
1556 }
1557
1558 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1559 {
1560         struct be_queue_info *q;
1561
1562         q = &adapter->mcc_obj.q;
1563         if (q->created)
1564                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1565         be_queue_free(adapter, q);
1566
1567         q = &adapter->mcc_obj.cq;
1568         if (q->created)
1569                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570         be_queue_free(adapter, q);
1571 }
1572
1573 /* Must be called only after TX qs are created as MCC shares TX EQ */
1574 static int be_mcc_queues_create(struct be_adapter *adapter)
1575 {
1576         struct be_queue_info *q, *cq;
1577
1578         /* Alloc MCC compl queue */
1579         cq = &adapter->mcc_obj.cq;
1580         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1581                         sizeof(struct be_mcc_compl)))
1582                 goto err;
1583
1584         /* Ask BE to create MCC compl queue; share TX's eq */
1585         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1586                 goto mcc_cq_free;
1587
1588         /* Alloc MCC queue */
1589         q = &adapter->mcc_obj.q;
1590         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591                 goto mcc_cq_destroy;
1592
1593         /* Ask BE to create MCC queue */
1594         if (be_cmd_mccq_create(adapter, q, cq))
1595                 goto mcc_q_free;
1596
1597         return 0;
1598
1599 mcc_q_free:
1600         be_queue_free(adapter, q);
1601 mcc_cq_destroy:
1602         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1603 mcc_cq_free:
1604         be_queue_free(adapter, cq);
1605 err:
1606         return -1;
1607 }
1608
1609 static void be_tx_queues_destroy(struct be_adapter *adapter)
1610 {
1611         struct be_queue_info *q;
1612         struct be_tx_obj *txo;
1613         u8 i;
1614
1615         for_all_tx_queues(adapter, txo, i) {
1616                 q = &txo->q;
1617                 if (q->created)
1618                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619                 be_queue_free(adapter, q);
1620
1621                 q = &txo->cq;
1622                 if (q->created)
1623                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624                 be_queue_free(adapter, q);
1625         }
1626
1627         /* Clear any residual events */
1628         be_eq_clean(adapter, &adapter->tx_eq);
1629
1630         q = &adapter->tx_eq.q;
1631         if (q->created)
1632                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1633         be_queue_free(adapter, q);
1634 }
1635
1636 /* One TX event queue is shared by all TX compl qs */
1637 static int be_tx_queues_create(struct be_adapter *adapter)
1638 {
1639         struct be_queue_info *eq, *q, *cq;
1640         struct be_tx_obj *txo;
1641         u8 i;
1642
1643         adapter->tx_eq.max_eqd = 0;
1644         adapter->tx_eq.min_eqd = 0;
1645         adapter->tx_eq.cur_eqd = 96;
1646         adapter->tx_eq.enable_aic = false;
1647
1648         eq = &adapter->tx_eq.q;
1649         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1650                 sizeof(struct be_eq_entry)))
1651                 return -1;
1652
1653         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1654                 goto err;
1655         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1656
1657         for_all_tx_queues(adapter, txo, i) {
1658                 cq = &txo->cq;
1659                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1660                         sizeof(struct be_eth_tx_compl)))
1661                         goto err;
1662
1663                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1664                         goto err;
1665
1666                 q = &txo->q;
1667                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1668                         sizeof(struct be_eth_wrb)))
1669                         goto err;
1670
1671                 if (be_cmd_txq_create(adapter, q, cq))
1672                         goto err;
1673         }
1674         return 0;
1675
1676 err:
1677         be_tx_queues_destroy(adapter);
1678         return -1;
1679 }
1680
1681 static void be_rx_queues_destroy(struct be_adapter *adapter)
1682 {
1683         struct be_queue_info *q;
1684         struct be_rx_obj *rxo;
1685         int i;
1686
1687         for_all_rx_queues(adapter, rxo, i) {
1688                 be_queue_free(adapter, &rxo->q);
1689
1690                 q = &rxo->cq;
1691                 if (q->created)
1692                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693                 be_queue_free(adapter, q);
1694
1695                 q = &rxo->rx_eq.q;
1696                 if (q->created)
1697                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1698                 be_queue_free(adapter, q);
1699         }
1700 }
1701
1702 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1703 {
1704         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1705                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1706                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1707         } else {
1708                 dev_warn(&adapter->pdev->dev,
1709                         "No support for multiple RX queues\n");
1710                 return 1;
1711         }
1712 }
1713
1714 static int be_rx_queues_create(struct be_adapter *adapter)
1715 {
1716         struct be_queue_info *eq, *q, *cq;
1717         struct be_rx_obj *rxo;
1718         int rc, i;
1719
1720         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1721                                 msix_enabled(adapter) ?
1722                                         adapter->num_msix_vec - 1 : 1);
1723         if (adapter->num_rx_qs != MAX_RX_QS)
1724                 dev_warn(&adapter->pdev->dev,
1725                         "Can create only %d RX queues", adapter->num_rx_qs);
1726
1727         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1728         for_all_rx_queues(adapter, rxo, i) {
1729                 rxo->adapter = adapter;
1730                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1731                 rxo->rx_eq.enable_aic = true;
1732
1733                 /* EQ */
1734                 eq = &rxo->rx_eq.q;
1735                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736                                         sizeof(struct be_eq_entry));
1737                 if (rc)
1738                         goto err;
1739
1740                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1741                 if (rc)
1742                         goto err;
1743
1744                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1745
1746                 /* CQ */
1747                 cq = &rxo->cq;
1748                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1749                                 sizeof(struct be_eth_rx_compl));
1750                 if (rc)
1751                         goto err;
1752
1753                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1754                 if (rc)
1755                         goto err;
1756
1757                 /* Rx Q - will be created in be_open() */
1758                 q = &rxo->q;
1759                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1760                                 sizeof(struct be_eth_rx_d));
1761                 if (rc)
1762                         goto err;
1763
1764         }
1765
1766         return 0;
1767 err:
1768         be_rx_queues_destroy(adapter);
1769         return -1;
1770 }
1771
1772 static bool event_peek(struct be_eq_obj *eq_obj)
1773 {
1774         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1775         if (!eqe->evt)
1776                 return false;
1777         else
1778                 return true;
1779 }
1780
1781 static irqreturn_t be_intx(int irq, void *dev)
1782 {
1783         struct be_adapter *adapter = dev;
1784         struct be_rx_obj *rxo;
1785         int isr, i, tx = 0 , rx = 0;
1786
1787         if (lancer_chip(adapter)) {
1788                 if (event_peek(&adapter->tx_eq))
1789                         tx = event_handle(adapter, &adapter->tx_eq, false);
1790                 for_all_rx_queues(adapter, rxo, i) {
1791                         if (event_peek(&rxo->rx_eq))
1792                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1793                 }
1794
1795                 if (!(tx || rx))
1796                         return IRQ_NONE;
1797
1798         } else {
1799                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1800                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1801                 if (!isr)
1802                         return IRQ_NONE;
1803
1804                 if ((1 << adapter->tx_eq.eq_idx & isr))
1805                         event_handle(adapter, &adapter->tx_eq, false);
1806
1807                 for_all_rx_queues(adapter, rxo, i) {
1808                         if ((1 << rxo->rx_eq.eq_idx & isr))
1809                                 event_handle(adapter, &rxo->rx_eq, true);
1810                 }
1811         }
1812
1813         return IRQ_HANDLED;
1814 }
1815
1816 static irqreturn_t be_msix_rx(int irq, void *dev)
1817 {
1818         struct be_rx_obj *rxo = dev;
1819         struct be_adapter *adapter = rxo->adapter;
1820
1821         event_handle(adapter, &rxo->rx_eq, true);
1822
1823         return IRQ_HANDLED;
1824 }
1825
1826 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1827 {
1828         struct be_adapter *adapter = dev;
1829
1830         event_handle(adapter, &adapter->tx_eq, false);
1831
1832         return IRQ_HANDLED;
1833 }
1834
1835 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1836 {
1837         return (rxcp->tcpf && !rxcp->err) ? true : false;
1838 }
1839
1840 static int be_poll_rx(struct napi_struct *napi, int budget)
1841 {
1842         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1843         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1844         struct be_adapter *adapter = rxo->adapter;
1845         struct be_queue_info *rx_cq = &rxo->cq;
1846         struct be_rx_compl_info *rxcp;
1847         u32 work_done;
1848
1849         rx_stats(rxo)->rx_polls++;
1850         for (work_done = 0; work_done < budget; work_done++) {
1851                 rxcp = be_rx_compl_get(rxo);
1852                 if (!rxcp)
1853                         break;
1854
1855                 /* Is it a flush compl that has no data */
1856                 if (unlikely(rxcp->num_rcvd == 0))
1857                         goto loop_continue;
1858
1859                 /* Discard compl with partial DMA Lancer B0 */
1860                 if (unlikely(!rxcp->pkt_size)) {
1861                         be_rx_compl_discard(adapter, rxo, rxcp);
1862                         goto loop_continue;
1863                 }
1864
1865                 /* On BE drop pkts that arrive due to imperfect filtering in
1866                  * promiscuous mode on some skews
1867                  */
1868                 if (unlikely(rxcp->port != adapter->port_num &&
1869                                 !lancer_chip(adapter))) {
1870                         be_rx_compl_discard(adapter, rxo, rxcp);
1871                         goto loop_continue;
1872                 }
1873
1874                 if (do_gro(rxcp))
1875                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1876                 else
1877                         be_rx_compl_process(adapter, rxo, rxcp);
1878 loop_continue:
1879                 be_rx_stats_update(rxo, rxcp);
1880         }
1881
1882         /* Refill the queue */
1883         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1884                 be_post_rx_frags(rxo, GFP_ATOMIC);
1885
1886         /* All consumed */
1887         if (work_done < budget) {
1888                 napi_complete(napi);
1889                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1890         } else {
1891                 /* More to be consumed; continue with interrupts disabled */
1892                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1893         }
1894         return work_done;
1895 }
1896
1897 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1898  * For TX/MCC we don't honour budget; consume everything
1899  */
1900 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1901 {
1902         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1903         struct be_adapter *adapter =
1904                 container_of(tx_eq, struct be_adapter, tx_eq);
1905         struct be_tx_obj *txo;
1906         struct be_eth_tx_compl *txcp;
1907         int tx_compl, mcc_compl, status = 0;
1908         u8 i;
1909         u16 num_wrbs;
1910
1911         for_all_tx_queues(adapter, txo, i) {
1912                 tx_compl = 0;
1913                 num_wrbs = 0;
1914                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1915                         num_wrbs += be_tx_compl_process(adapter, txo,
1916                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1917                                         wrb_index, txcp));
1918                         tx_compl++;
1919                 }
1920                 if (tx_compl) {
1921                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1922
1923                         atomic_sub(num_wrbs, &txo->q.used);
1924
1925                         /* As Tx wrbs have been freed up, wake up netdev queue
1926                          * if it was stopped due to lack of tx wrbs.  */
1927                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1928                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1929                                 netif_wake_subqueue(adapter->netdev, i);
1930                         }
1931
1932                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1933                         tx_stats(txo)->tx_compl += tx_compl;
1934                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1935                 }
1936         }
1937
1938         mcc_compl = be_process_mcc(adapter, &status);
1939
1940         if (mcc_compl) {
1941                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1942                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1943         }
1944
1945         napi_complete(napi);
1946
1947         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1948         adapter->drv_stats.tx_events++;
1949         return 1;
1950 }
1951
1952 void be_detect_dump_ue(struct be_adapter *adapter)
1953 {
1954         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1955         u32 i;
1956
1957         pci_read_config_dword(adapter->pdev,
1958                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1959         pci_read_config_dword(adapter->pdev,
1960                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1961         pci_read_config_dword(adapter->pdev,
1962                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1963         pci_read_config_dword(adapter->pdev,
1964                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1965
1966         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1967         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1968
1969         if (ue_status_lo || ue_status_hi) {
1970                 adapter->ue_detected = true;
1971                 adapter->eeh_err = true;
1972                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1973         }
1974
1975         if (ue_status_lo) {
1976                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1977                         if (ue_status_lo & 1)
1978                                 dev_err(&adapter->pdev->dev,
1979                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1980                 }
1981         }
1982         if (ue_status_hi) {
1983                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1984                         if (ue_status_hi & 1)
1985                                 dev_err(&adapter->pdev->dev,
1986                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1987                 }
1988         }
1989
1990 }
1991
1992 static void be_worker(struct work_struct *work)
1993 {
1994         struct be_adapter *adapter =
1995                 container_of(work, struct be_adapter, work.work);
1996         struct be_rx_obj *rxo;
1997         int i;
1998
1999         if (!adapter->ue_detected && !lancer_chip(adapter))
2000                 be_detect_dump_ue(adapter);
2001
2002         /* when interrupts are not yet enabled, just reap any pending
2003         * mcc completions */
2004         if (!netif_running(adapter->netdev)) {
2005                 int mcc_compl, status = 0;
2006
2007                 mcc_compl = be_process_mcc(adapter, &status);
2008
2009                 if (mcc_compl) {
2010                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2011                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2012                 }
2013
2014                 goto reschedule;
2015         }
2016
2017         if (!adapter->stats_cmd_sent) {
2018                 if (lancer_chip(adapter))
2019                         lancer_cmd_get_pport_stats(adapter,
2020                                                 &adapter->stats_cmd);
2021                 else
2022                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2023         }
2024
2025         for_all_rx_queues(adapter, rxo, i) {
2026                 be_rx_eqd_update(adapter, rxo);
2027
2028                 if (rxo->rx_post_starved) {
2029                         rxo->rx_post_starved = false;
2030                         be_post_rx_frags(rxo, GFP_KERNEL);
2031                 }
2032         }
2033
2034 reschedule:
2035         adapter->work_counter++;
2036         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2037 }
2038
2039 static void be_msix_disable(struct be_adapter *adapter)
2040 {
2041         if (msix_enabled(adapter)) {
2042                 pci_disable_msix(adapter->pdev);
2043                 adapter->num_msix_vec = 0;
2044         }
2045 }
2046
2047 static void be_msix_enable(struct be_adapter *adapter)
2048 {
2049 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2050         int i, status, num_vec;
2051
2052         num_vec = be_num_rxqs_want(adapter) + 1;
2053
2054         for (i = 0; i < num_vec; i++)
2055                 adapter->msix_entries[i].entry = i;
2056
2057         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2058         if (status == 0) {
2059                 goto done;
2060         } else if (status >= BE_MIN_MSIX_VECTORS) {
2061                 num_vec = status;
2062                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2063                                 num_vec) == 0)
2064                         goto done;
2065         }
2066         return;
2067 done:
2068         adapter->num_msix_vec = num_vec;
2069         return;
2070 }
2071
2072 static int be_sriov_enable(struct be_adapter *adapter)
2073 {
2074         be_check_sriov_fn_type(adapter);
2075 #ifdef CONFIG_PCI_IOV
2076         if (be_physfn(adapter) && num_vfs) {
2077                 int status, pos;
2078                 u16 nvfs;
2079
2080                 pos = pci_find_ext_capability(adapter->pdev,
2081                                                 PCI_EXT_CAP_ID_SRIOV);
2082                 pci_read_config_word(adapter->pdev,
2083                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2084
2085                 if (num_vfs > nvfs) {
2086                         dev_info(&adapter->pdev->dev,
2087                                         "Device supports %d VFs and not %d\n",
2088                                         nvfs, num_vfs);
2089                         num_vfs = nvfs;
2090                 }
2091
2092                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2093                 adapter->sriov_enabled = status ? false : true;
2094
2095                 if (adapter->sriov_enabled) {
2096                         adapter->vf_cfg = kcalloc(num_vfs,
2097                                                 sizeof(struct be_vf_cfg),
2098                                                 GFP_KERNEL);
2099                         if (!adapter->vf_cfg)
2100                                 return -ENOMEM;
2101                 }
2102         }
2103 #endif
2104         return 0;
2105 }
2106
2107 static void be_sriov_disable(struct be_adapter *adapter)
2108 {
2109 #ifdef CONFIG_PCI_IOV
2110         if (adapter->sriov_enabled) {
2111                 pci_disable_sriov(adapter->pdev);
2112                 kfree(adapter->vf_cfg);
2113                 adapter->sriov_enabled = false;
2114         }
2115 #endif
2116 }
2117
2118 static inline int be_msix_vec_get(struct be_adapter *adapter,
2119                                         struct be_eq_obj *eq_obj)
2120 {
2121         return adapter->msix_entries[eq_obj->eq_idx].vector;
2122 }
2123
2124 static int be_request_irq(struct be_adapter *adapter,
2125                 struct be_eq_obj *eq_obj,
2126                 void *handler, char *desc, void *context)
2127 {
2128         struct net_device *netdev = adapter->netdev;
2129         int vec;
2130
2131         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2132         vec = be_msix_vec_get(adapter, eq_obj);
2133         return request_irq(vec, handler, 0, eq_obj->desc, context);
2134 }
2135
2136 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2137                         void *context)
2138 {
2139         int vec = be_msix_vec_get(adapter, eq_obj);
2140         free_irq(vec, context);
2141 }
2142
2143 static int be_msix_register(struct be_adapter *adapter)
2144 {
2145         struct be_rx_obj *rxo;
2146         int status, i;
2147         char qname[10];
2148
2149         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2150                                 adapter);
2151         if (status)
2152                 goto err;
2153
2154         for_all_rx_queues(adapter, rxo, i) {
2155                 sprintf(qname, "rxq%d", i);
2156                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2157                                 qname, rxo);
2158                 if (status)
2159                         goto err_msix;
2160         }
2161
2162         return 0;
2163
2164 err_msix:
2165         be_free_irq(adapter, &adapter->tx_eq, adapter);
2166
2167         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2168                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2169
2170 err:
2171         dev_warn(&adapter->pdev->dev,
2172                 "MSIX Request IRQ failed - err %d\n", status);
2173         be_msix_disable(adapter);
2174         return status;
2175 }
2176
2177 static int be_irq_register(struct be_adapter *adapter)
2178 {
2179         struct net_device *netdev = adapter->netdev;
2180         int status;
2181
2182         if (msix_enabled(adapter)) {
2183                 status = be_msix_register(adapter);
2184                 if (status == 0)
2185                         goto done;
2186                 /* INTx is not supported for VF */
2187                 if (!be_physfn(adapter))
2188                         return status;
2189         }
2190
2191         /* INTx */
2192         netdev->irq = adapter->pdev->irq;
2193         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2194                         adapter);
2195         if (status) {
2196                 dev_err(&adapter->pdev->dev,
2197                         "INTx request IRQ failed - err %d\n", status);
2198                 return status;
2199         }
2200 done:
2201         adapter->isr_registered = true;
2202         return 0;
2203 }
2204
2205 static void be_irq_unregister(struct be_adapter *adapter)
2206 {
2207         struct net_device *netdev = adapter->netdev;
2208         struct be_rx_obj *rxo;
2209         int i;
2210
2211         if (!adapter->isr_registered)
2212                 return;
2213
2214         /* INTx */
2215         if (!msix_enabled(adapter)) {
2216                 free_irq(netdev->irq, adapter);
2217                 goto done;
2218         }
2219
2220         /* MSIx */
2221         be_free_irq(adapter, &adapter->tx_eq, adapter);
2222
2223         for_all_rx_queues(adapter, rxo, i)
2224                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2225
2226 done:
2227         adapter->isr_registered = false;
2228 }
2229
2230 static void be_rx_queues_clear(struct be_adapter *adapter)
2231 {
2232         struct be_queue_info *q;
2233         struct be_rx_obj *rxo;
2234         int i;
2235
2236         for_all_rx_queues(adapter, rxo, i) {
2237                 q = &rxo->q;
2238                 if (q->created) {
2239                         be_cmd_rxq_destroy(adapter, q);
2240                         /* After the rxq is invalidated, wait for a grace time
2241                          * of 1ms for all dma to end and the flush compl to
2242                          * arrive
2243                          */
2244                         mdelay(1);
2245                         be_rx_q_clean(adapter, rxo);
2246                 }
2247
2248                 /* Clear any residual events */
2249                 q = &rxo->rx_eq.q;
2250                 if (q->created)
2251                         be_eq_clean(adapter, &rxo->rx_eq);
2252         }
2253 }
2254
2255 static int be_close(struct net_device *netdev)
2256 {
2257         struct be_adapter *adapter = netdev_priv(netdev);
2258         struct be_rx_obj *rxo;
2259         struct be_tx_obj *txo;
2260         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2261         int vec, i;
2262
2263         be_async_mcc_disable(adapter);
2264
2265         if (!lancer_chip(adapter))
2266                 be_intr_set(adapter, false);
2267
2268         for_all_rx_queues(adapter, rxo, i)
2269                 napi_disable(&rxo->rx_eq.napi);
2270
2271         napi_disable(&tx_eq->napi);
2272
2273         if (lancer_chip(adapter)) {
2274                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2275                 for_all_rx_queues(adapter, rxo, i)
2276                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2277                 for_all_tx_queues(adapter, txo, i)
2278                          be_cq_notify(adapter, txo->cq.id, false, 0);
2279         }
2280
2281         if (msix_enabled(adapter)) {
2282                 vec = be_msix_vec_get(adapter, tx_eq);
2283                 synchronize_irq(vec);
2284
2285                 for_all_rx_queues(adapter, rxo, i) {
2286                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2287                         synchronize_irq(vec);
2288                 }
2289         } else {
2290                 synchronize_irq(netdev->irq);
2291         }
2292         be_irq_unregister(adapter);
2293
2294         /* Wait for all pending tx completions to arrive so that
2295          * all tx skbs are freed.
2296          */
2297         for_all_tx_queues(adapter, txo, i)
2298                 be_tx_compl_clean(adapter, txo);
2299
2300         be_rx_queues_clear(adapter);
2301         return 0;
2302 }
2303
2304 static int be_rx_queues_setup(struct be_adapter *adapter)
2305 {
2306         struct be_rx_obj *rxo;
2307         int rc, i;
2308         u8 rsstable[MAX_RSS_QS];
2309
2310         for_all_rx_queues(adapter, rxo, i) {
2311                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2312                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2313                         adapter->if_handle,
2314                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2315                 if (rc)
2316                         return rc;
2317         }
2318
2319         if (be_multi_rxq(adapter)) {
2320                 for_all_rss_queues(adapter, rxo, i)
2321                         rsstable[i] = rxo->rss_id;
2322
2323                 rc = be_cmd_rss_config(adapter, rsstable,
2324                         adapter->num_rx_qs - 1);
2325                 if (rc)
2326                         return rc;
2327         }
2328
2329         /* First time posting */
2330         for_all_rx_queues(adapter, rxo, i) {
2331                 be_post_rx_frags(rxo, GFP_KERNEL);
2332                 napi_enable(&rxo->rx_eq.napi);
2333         }
2334         return 0;
2335 }
2336
2337 static int be_open(struct net_device *netdev)
2338 {
2339         struct be_adapter *adapter = netdev_priv(netdev);
2340         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2341         struct be_rx_obj *rxo;
2342         int status, i;
2343
2344         status = be_rx_queues_setup(adapter);
2345         if (status)
2346                 goto err;
2347
2348         napi_enable(&tx_eq->napi);
2349
2350         be_irq_register(adapter);
2351
2352         if (!lancer_chip(adapter))
2353                 be_intr_set(adapter, true);
2354
2355         /* The evt queues are created in unarmed state; arm them */
2356         for_all_rx_queues(adapter, rxo, i) {
2357                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2358                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2359         }
2360         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2361
2362         /* Now that interrupts are on we can process async mcc */
2363         be_async_mcc_enable(adapter);
2364
2365         return 0;
2366 err:
2367         be_close(adapter->netdev);
2368         return -EIO;
2369 }
2370
2371 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2372 {
2373         struct be_dma_mem cmd;
2374         int status = 0;
2375         u8 mac[ETH_ALEN];
2376
2377         memset(mac, 0, ETH_ALEN);
2378
2379         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2380         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2381                                     GFP_KERNEL);
2382         if (cmd.va == NULL)
2383                 return -1;
2384         memset(cmd.va, 0, cmd.size);
2385
2386         if (enable) {
2387                 status = pci_write_config_dword(adapter->pdev,
2388                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2389                 if (status) {
2390                         dev_err(&adapter->pdev->dev,
2391                                 "Could not enable Wake-on-lan\n");
2392                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2393                                           cmd.dma);
2394                         return status;
2395                 }
2396                 status = be_cmd_enable_magic_wol(adapter,
2397                                 adapter->netdev->dev_addr, &cmd);
2398                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2399                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2400         } else {
2401                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2402                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2403                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2404         }
2405
2406         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2407         return status;
2408 }
2409
2410 /*
2411  * Generate a seed MAC address from the PF MAC Address using jhash.
2412  * MAC Address for VFs are assigned incrementally starting from the seed.
2413  * These addresses are programmed in the ASIC by the PF and the VF driver
2414  * queries for the MAC address during its probe.
2415  */
2416 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2417 {
2418         u32 vf;
2419         int status = 0;
2420         u8 mac[ETH_ALEN];
2421
2422         be_vf_eth_addr_generate(adapter, mac);
2423
2424         for (vf = 0; vf < num_vfs; vf++) {
2425                 status = be_cmd_pmac_add(adapter, mac,
2426                                         adapter->vf_cfg[vf].vf_if_handle,
2427                                         &adapter->vf_cfg[vf].vf_pmac_id,
2428                                         vf + 1);
2429                 if (status)
2430                         dev_err(&adapter->pdev->dev,
2431                                 "Mac address add failed for VF %d\n", vf);
2432                 else
2433                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2434
2435                 mac[5] += 1;
2436         }
2437         return status;
2438 }
2439
2440 static void be_vf_clear(struct be_adapter *adapter)
2441 {
2442         u32 vf;
2443
2444         for (vf = 0; vf < num_vfs; vf++) {
2445                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2446                         be_cmd_pmac_del(adapter,
2447                                         adapter->vf_cfg[vf].vf_if_handle,
2448                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2449         }
2450
2451         for (vf = 0; vf < num_vfs; vf++)
2452                 if (adapter->vf_cfg[vf].vf_if_handle)
2453                         be_cmd_if_destroy(adapter,
2454                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2455 }
2456
2457 static int be_clear(struct be_adapter *adapter)
2458 {
2459         if (be_physfn(adapter) && adapter->sriov_enabled)
2460                 be_vf_clear(adapter);
2461
2462         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2463
2464         be_mcc_queues_destroy(adapter);
2465         be_rx_queues_destroy(adapter);
2466         be_tx_queues_destroy(adapter);
2467         adapter->eq_next_idx = 0;
2468
2469         adapter->be3_native = false;
2470         adapter->promiscuous = false;
2471
2472         /* tell fw we're done with firing cmds */
2473         be_cmd_fw_clean(adapter);
2474         return 0;
2475 }
2476
2477 static int be_vf_setup(struct be_adapter *adapter)
2478 {
2479         u32 cap_flags, en_flags, vf;
2480         u16 lnk_speed;
2481         int status;
2482
2483         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2484         for (vf = 0; vf < num_vfs; vf++) {
2485                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2486                                         &adapter->vf_cfg[vf].vf_if_handle,
2487                                         NULL, vf+1);
2488                 if (status)
2489                         goto err;
2490                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2491         }
2492
2493         if (!lancer_chip(adapter)) {
2494                 status = be_vf_eth_addr_config(adapter);
2495                 if (status)
2496                         goto err;
2497         }
2498
2499         for (vf = 0; vf < num_vfs; vf++) {
2500                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2501                                 vf + 1);
2502                 if (status)
2503                         goto err;
2504                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2505         }
2506         return 0;
2507 err:
2508         return status;
2509 }
2510
2511 static int be_setup(struct be_adapter *adapter)
2512 {
2513         struct net_device *netdev = adapter->netdev;
2514         u32 cap_flags, en_flags;
2515         u32 tx_fc, rx_fc;
2516         int status;
2517         u8 mac[ETH_ALEN];
2518
2519         /* Allow all priorities by default. A GRP5 evt may modify this */
2520         adapter->vlan_prio_bmap = 0xff;
2521         adapter->link_speed = -1;
2522
2523         be_cmd_req_native_mode(adapter);
2524
2525         status = be_tx_queues_create(adapter);
2526         if (status != 0)
2527                 goto err;
2528
2529         status = be_rx_queues_create(adapter);
2530         if (status != 0)
2531                 goto err;
2532
2533         status = be_mcc_queues_create(adapter);
2534         if (status != 0)
2535                 goto err;
2536
2537         memset(mac, 0, ETH_ALEN);
2538         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2539                         true /*permanent */, 0);
2540         if (status)
2541                 return status;
2542         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2543         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2544
2545         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2546                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2547         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2548                         BE_IF_FLAGS_PROMISCUOUS;
2549         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2550                 cap_flags |= BE_IF_FLAGS_RSS;
2551                 en_flags |= BE_IF_FLAGS_RSS;
2552         }
2553         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2554                         netdev->dev_addr, &adapter->if_handle,
2555                         &adapter->pmac_id, 0);
2556         if (status != 0)
2557                 goto err;
2558
2559         /* For BEx, the VF's permanent mac queried from card is incorrect.
2560          * Query the mac configued by the PF using if_handle
2561          */
2562         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2563                 status = be_cmd_mac_addr_query(adapter, mac,
2564                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2565                 if (!status) {
2566                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2567                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2568                 }
2569         }
2570
2571         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2572
2573         status = be_vid_config(adapter, false, 0);
2574         if (status)
2575                 goto err;
2576
2577         be_set_rx_mode(adapter->netdev);
2578
2579         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2580         if (status)
2581                 goto err;
2582         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2583                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2584                                         adapter->rx_fc);
2585                 if (status)
2586                         goto err;
2587         }
2588
2589         pcie_set_readrq(adapter->pdev, 4096);
2590
2591         if (be_physfn(adapter) && adapter->sriov_enabled) {
2592                 status = be_vf_setup(adapter);
2593                 if (status)
2594                         goto err;
2595         }
2596
2597         return 0;
2598 err:
2599         be_clear(adapter);
2600         return status;
2601 }
2602
2603 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2604 static bool be_flash_redboot(struct be_adapter *adapter,
2605                         const u8 *p, u32 img_start, int image_size,
2606                         int hdr_size)
2607 {
2608         u32 crc_offset;
2609         u8 flashed_crc[4];
2610         int status;
2611
2612         crc_offset = hdr_size + img_start + image_size - 4;
2613
2614         p += crc_offset;
2615
2616         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2617                         (image_size - 4));
2618         if (status) {
2619                 dev_err(&adapter->pdev->dev,
2620                 "could not get crc from flash, not flashing redboot\n");
2621                 return false;
2622         }
2623
2624         /*update redboot only if crc does not match*/
2625         if (!memcmp(flashed_crc, p, 4))
2626                 return false;
2627         else
2628                 return true;
2629 }
2630
2631 static bool phy_flashing_required(struct be_adapter *adapter)
2632 {
2633         int status = 0;
2634         struct be_phy_info phy_info;
2635
2636         status = be_cmd_get_phy_info(adapter, &phy_info);
2637         if (status)
2638                 return false;
2639         if ((phy_info.phy_type == TN_8022) &&
2640                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2641                 return true;
2642         }
2643         return false;
2644 }
2645
2646 static int be_flash_data(struct be_adapter *adapter,
2647                         const struct firmware *fw,
2648                         struct be_dma_mem *flash_cmd, int num_of_images)
2649
2650 {
2651         int status = 0, i, filehdr_size = 0;
2652         u32 total_bytes = 0, flash_op;
2653         int num_bytes;
2654         const u8 *p = fw->data;
2655         struct be_cmd_write_flashrom *req = flash_cmd->va;
2656         const struct flash_comp *pflashcomp;
2657         int num_comp;
2658
2659         static const struct flash_comp gen3_flash_types[10] = {
2660                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2661                         FLASH_IMAGE_MAX_SIZE_g3},
2662                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2663                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2664                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2665                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2666                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2667                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2668                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2669                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2670                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2671                         FLASH_IMAGE_MAX_SIZE_g3},
2672                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2673                         FLASH_IMAGE_MAX_SIZE_g3},
2674                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2675                         FLASH_IMAGE_MAX_SIZE_g3},
2676                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2677                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2678                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2679                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2680         };
2681         static const struct flash_comp gen2_flash_types[8] = {
2682                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2683                         FLASH_IMAGE_MAX_SIZE_g2},
2684                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2685                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2686                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2687                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2688                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2689                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2690                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2691                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2692                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2693                         FLASH_IMAGE_MAX_SIZE_g2},
2694                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2695                         FLASH_IMAGE_MAX_SIZE_g2},
2696                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2697                          FLASH_IMAGE_MAX_SIZE_g2}
2698         };
2699
2700         if (adapter->generation == BE_GEN3) {
2701                 pflashcomp = gen3_flash_types;
2702                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2703                 num_comp = ARRAY_SIZE(gen3_flash_types);
2704         } else {
2705                 pflashcomp = gen2_flash_types;
2706                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2707                 num_comp = ARRAY_SIZE(gen2_flash_types);
2708         }
2709         for (i = 0; i < num_comp; i++) {
2710                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2711                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2712                         continue;
2713                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2714                         if (!phy_flashing_required(adapter))
2715                                 continue;
2716                 }
2717                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2718                         (!be_flash_redboot(adapter, fw->data,
2719                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2720                         (num_of_images * sizeof(struct image_hdr)))))
2721                         continue;
2722                 p = fw->data;
2723                 p += filehdr_size + pflashcomp[i].offset
2724                         + (num_of_images * sizeof(struct image_hdr));
2725                 if (p + pflashcomp[i].size > fw->data + fw->size)
2726                         return -1;
2727                 total_bytes = pflashcomp[i].size;
2728                 while (total_bytes) {
2729                         if (total_bytes > 32*1024)
2730                                 num_bytes = 32*1024;
2731                         else
2732                                 num_bytes = total_bytes;
2733                         total_bytes -= num_bytes;
2734                         if (!total_bytes) {
2735                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2736                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2737                                 else
2738                                         flash_op = FLASHROM_OPER_FLASH;
2739                         } else {
2740                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2741                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2742                                 else
2743                                         flash_op = FLASHROM_OPER_SAVE;
2744                         }
2745                         memcpy(req->params.data_buf, p, num_bytes);
2746                         p += num_bytes;
2747                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2748                                 pflashcomp[i].optype, flash_op, num_bytes);
2749                         if (status) {
2750                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2751                                         (pflashcomp[i].optype ==
2752                                                 IMG_TYPE_PHY_FW))
2753                                         break;
2754                                 dev_err(&adapter->pdev->dev,
2755                                         "cmd to write to flash rom failed.\n");
2756                                 return -1;
2757                         }
2758                 }
2759         }
2760         return 0;
2761 }
2762
2763 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2764 {
2765         if (fhdr == NULL)
2766                 return 0;
2767         if (fhdr->build[0] == '3')
2768                 return BE_GEN3;
2769         else if (fhdr->build[0] == '2')
2770                 return BE_GEN2;
2771         else
2772                 return 0;
2773 }
2774
2775 static int lancer_fw_download(struct be_adapter *adapter,
2776                                 const struct firmware *fw)
2777 {
2778 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2779 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2780         struct be_dma_mem flash_cmd;
2781         const u8 *data_ptr = NULL;
2782         u8 *dest_image_ptr = NULL;
2783         size_t image_size = 0;
2784         u32 chunk_size = 0;
2785         u32 data_written = 0;
2786         u32 offset = 0;
2787         int status = 0;
2788         u8 add_status = 0;
2789
2790         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2791                 dev_err(&adapter->pdev->dev,
2792                         "FW Image not properly aligned. "
2793                         "Length must be 4 byte aligned.\n");
2794                 status = -EINVAL;
2795                 goto lancer_fw_exit;
2796         }
2797
2798         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2799                                 + LANCER_FW_DOWNLOAD_CHUNK;
2800         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2801                                                 &flash_cmd.dma, GFP_KERNEL);
2802         if (!flash_cmd.va) {
2803                 status = -ENOMEM;
2804                 dev_err(&adapter->pdev->dev,
2805                         "Memory allocation failure while flashing\n");
2806                 goto lancer_fw_exit;
2807         }
2808
2809         dest_image_ptr = flash_cmd.va +
2810                                 sizeof(struct lancer_cmd_req_write_object);
2811         image_size = fw->size;
2812         data_ptr = fw->data;
2813
2814         while (image_size) {
2815                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2816
2817                 /* Copy the image chunk content. */
2818                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2819
2820                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2821                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2822                                 &data_written, &add_status);
2823
2824                 if (status)
2825                         break;
2826
2827                 offset += data_written;
2828                 data_ptr += data_written;
2829                 image_size -= data_written;
2830         }
2831
2832         if (!status) {
2833                 /* Commit the FW written */
2834                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2835                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2836                                         &data_written, &add_status);
2837         }
2838
2839         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2840                                 flash_cmd.dma);
2841         if (status) {
2842                 dev_err(&adapter->pdev->dev,
2843                         "Firmware load error. "
2844                         "Status code: 0x%x Additional Status: 0x%x\n",
2845                         status, add_status);
2846                 goto lancer_fw_exit;
2847         }
2848
2849         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2850 lancer_fw_exit:
2851         return status;
2852 }
2853
2854 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2855 {
2856         struct flash_file_hdr_g2 *fhdr;
2857         struct flash_file_hdr_g3 *fhdr3;
2858         struct image_hdr *img_hdr_ptr = NULL;
2859         struct be_dma_mem flash_cmd;
2860         const u8 *p;
2861         int status = 0, i = 0, num_imgs = 0;
2862
2863         p = fw->data;
2864         fhdr = (struct flash_file_hdr_g2 *) p;
2865
2866         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2867         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2868                                           &flash_cmd.dma, GFP_KERNEL);
2869         if (!flash_cmd.va) {
2870                 status = -ENOMEM;
2871                 dev_err(&adapter->pdev->dev,
2872                         "Memory allocation failure while flashing\n");
2873                 goto be_fw_exit;
2874         }
2875
2876         if ((adapter->generation == BE_GEN3) &&
2877                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2878                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2879                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2880                 for (i = 0; i < num_imgs; i++) {
2881                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2882                                         (sizeof(struct flash_file_hdr_g3) +
2883                                          i * sizeof(struct image_hdr)));
2884                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2885                                 status = be_flash_data(adapter, fw, &flash_cmd,
2886                                                         num_imgs);
2887                 }
2888         } else if ((adapter->generation == BE_GEN2) &&
2889                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2890                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2891         } else {
2892                 dev_err(&adapter->pdev->dev,
2893                         "UFI and Interface are not compatible for flashing\n");
2894                 status = -1;
2895         }
2896
2897         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2898                           flash_cmd.dma);
2899         if (status) {
2900                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2901                 goto be_fw_exit;
2902         }
2903
2904         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2905
2906 be_fw_exit:
2907         return status;
2908 }
2909
2910 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2911 {
2912         const struct firmware *fw;
2913         int status;
2914
2915         if (!netif_running(adapter->netdev)) {
2916                 dev_err(&adapter->pdev->dev,
2917                         "Firmware load not allowed (interface is down)\n");
2918                 return -1;
2919         }
2920
2921         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2922         if (status)
2923                 goto fw_exit;
2924
2925         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2926
2927         if (lancer_chip(adapter))
2928                 status = lancer_fw_download(adapter, fw);
2929         else
2930                 status = be_fw_download(adapter, fw);
2931
2932 fw_exit:
2933         release_firmware(fw);
2934         return status;
2935 }
2936
2937 static struct net_device_ops be_netdev_ops = {
2938         .ndo_open               = be_open,
2939         .ndo_stop               = be_close,
2940         .ndo_start_xmit         = be_xmit,
2941         .ndo_set_rx_mode        = be_set_rx_mode,
2942         .ndo_set_mac_address    = be_mac_addr_set,
2943         .ndo_change_mtu         = be_change_mtu,
2944         .ndo_get_stats64        = be_get_stats64,
2945         .ndo_validate_addr      = eth_validate_addr,
2946         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2947         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2948         .ndo_set_vf_mac         = be_set_vf_mac,
2949         .ndo_set_vf_vlan        = be_set_vf_vlan,
2950         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2951         .ndo_get_vf_config      = be_get_vf_config
2952 };
2953
2954 static void be_netdev_init(struct net_device *netdev)
2955 {
2956         struct be_adapter *adapter = netdev_priv(netdev);
2957         struct be_rx_obj *rxo;
2958         int i;
2959
2960         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2961                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2962                 NETIF_F_HW_VLAN_TX;
2963         if (be_multi_rxq(adapter))
2964                 netdev->hw_features |= NETIF_F_RXHASH;
2965
2966         netdev->features |= netdev->hw_features |
2967                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2968
2969         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2970                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2971
2972         netdev->flags |= IFF_MULTICAST;
2973
2974         netif_set_gso_max_size(netdev, 65535);
2975
2976         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2977
2978         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2979
2980         for_all_rx_queues(adapter, rxo, i)
2981                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2982                                 BE_NAPI_WEIGHT);
2983
2984         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2985                 BE_NAPI_WEIGHT);
2986 }
2987
2988 static void be_unmap_pci_bars(struct be_adapter *adapter)
2989 {
2990         if (adapter->csr)
2991                 iounmap(adapter->csr);
2992         if (adapter->db)
2993                 iounmap(adapter->db);
2994 }
2995
2996 static int be_map_pci_bars(struct be_adapter *adapter)
2997 {
2998         u8 __iomem *addr;
2999         int db_reg;
3000
3001         if (lancer_chip(adapter)) {
3002                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3003                         pci_resource_len(adapter->pdev, 0));
3004                 if (addr == NULL)
3005                         return -ENOMEM;
3006                 adapter->db = addr;
3007                 return 0;
3008         }
3009
3010         if (be_physfn(adapter)) {
3011                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3012                                 pci_resource_len(adapter->pdev, 2));
3013                 if (addr == NULL)
3014                         return -ENOMEM;
3015                 adapter->csr = addr;
3016         }
3017
3018         if (adapter->generation == BE_GEN2) {
3019                 db_reg = 4;
3020         } else {
3021                 if (be_physfn(adapter))
3022                         db_reg = 4;
3023                 else
3024                         db_reg = 0;
3025         }
3026         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3027                                 pci_resource_len(adapter->pdev, db_reg));
3028         if (addr == NULL)
3029                 goto pci_map_err;
3030         adapter->db = addr;
3031
3032         return 0;
3033 pci_map_err:
3034         be_unmap_pci_bars(adapter);
3035         return -ENOMEM;
3036 }
3037
3038
3039 static void be_ctrl_cleanup(struct be_adapter *adapter)
3040 {
3041         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3042
3043         be_unmap_pci_bars(adapter);
3044
3045         if (mem->va)
3046                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3047                                   mem->dma);
3048
3049         mem = &adapter->rx_filter;
3050         if (mem->va)
3051                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3052                                   mem->dma);
3053 }
3054
3055 static int be_ctrl_init(struct be_adapter *adapter)
3056 {
3057         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3058         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3059         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3060         int status;
3061
3062         status = be_map_pci_bars(adapter);
3063         if (status)
3064                 goto done;
3065
3066         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3067         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3068                                                 mbox_mem_alloc->size,
3069                                                 &mbox_mem_alloc->dma,
3070                                                 GFP_KERNEL);
3071         if (!mbox_mem_alloc->va) {
3072                 status = -ENOMEM;
3073                 goto unmap_pci_bars;
3074         }
3075         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3076         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3077         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3078         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3079
3080         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3081         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3082                                         &rx_filter->dma, GFP_KERNEL);
3083         if (rx_filter->va == NULL) {
3084                 status = -ENOMEM;
3085                 goto free_mbox;
3086         }
3087         memset(rx_filter->va, 0, rx_filter->size);
3088
3089         mutex_init(&adapter->mbox_lock);
3090         spin_lock_init(&adapter->mcc_lock);
3091         spin_lock_init(&adapter->mcc_cq_lock);
3092
3093         init_completion(&adapter->flash_compl);
3094         pci_save_state(adapter->pdev);
3095         return 0;
3096
3097 free_mbox:
3098         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3099                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3100
3101 unmap_pci_bars:
3102         be_unmap_pci_bars(adapter);
3103
3104 done:
3105         return status;
3106 }
3107
3108 static void be_stats_cleanup(struct be_adapter *adapter)
3109 {
3110         struct be_dma_mem *cmd = &adapter->stats_cmd;
3111
3112         if (cmd->va)
3113                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3114                                   cmd->va, cmd->dma);
3115 }
3116
3117 static int be_stats_init(struct be_adapter *adapter)
3118 {
3119         struct be_dma_mem *cmd = &adapter->stats_cmd;
3120
3121         if (adapter->generation == BE_GEN2) {
3122                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3123         } else {
3124                 if (lancer_chip(adapter))
3125                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3126                 else
3127                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3128         }
3129         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3130                                      GFP_KERNEL);
3131         if (cmd->va == NULL)
3132                 return -1;
3133         memset(cmd->va, 0, cmd->size);
3134         return 0;
3135 }
3136
3137 static void __devexit be_remove(struct pci_dev *pdev)
3138 {
3139         struct be_adapter *adapter = pci_get_drvdata(pdev);
3140
3141         if (!adapter)
3142                 return;
3143
3144         cancel_delayed_work_sync(&adapter->work);
3145
3146         unregister_netdev(adapter->netdev);
3147
3148         be_clear(adapter);
3149
3150         be_stats_cleanup(adapter);
3151
3152         be_ctrl_cleanup(adapter);
3153
3154         be_sriov_disable(adapter);
3155
3156         be_msix_disable(adapter);
3157
3158         pci_set_drvdata(pdev, NULL);
3159         pci_release_regions(pdev);
3160         pci_disable_device(pdev);
3161
3162         free_netdev(adapter->netdev);
3163 }
3164
3165 static int be_get_config(struct be_adapter *adapter)
3166 {
3167         int status;
3168
3169         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3170                         &adapter->function_mode, &adapter->function_caps);
3171         if (status)
3172                 return status;
3173
3174         if (adapter->function_mode & 0x400)
3175                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3176         else
3177                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3178
3179         status = be_cmd_get_cntl_attributes(adapter);
3180         if (status)
3181                 return status;
3182
3183         if ((num_vfs && adapter->sriov_enabled) ||
3184                 (adapter->function_mode & 0x400) ||
3185                 lancer_chip(adapter) || !be_physfn(adapter)) {
3186                 adapter->num_tx_qs = 1;
3187                 netif_set_real_num_tx_queues(adapter->netdev,
3188                         adapter->num_tx_qs);
3189         } else {
3190                 adapter->num_tx_qs = MAX_TX_QS;
3191         }
3192
3193         return 0;
3194 }
3195
3196 static int be_dev_family_check(struct be_adapter *adapter)
3197 {
3198         struct pci_dev *pdev = adapter->pdev;
3199         u32 sli_intf = 0, if_type;
3200
3201         switch (pdev->device) {
3202         case BE_DEVICE_ID1:
3203         case OC_DEVICE_ID1:
3204                 adapter->generation = BE_GEN2;
3205                 break;
3206         case BE_DEVICE_ID2:
3207         case OC_DEVICE_ID2:
3208                 adapter->generation = BE_GEN3;
3209                 break;
3210         case OC_DEVICE_ID3:
3211         case OC_DEVICE_ID4:
3212                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3213                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3214                                                 SLI_INTF_IF_TYPE_SHIFT;
3215
3216                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3217                         if_type != 0x02) {
3218                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3219                         return -EINVAL;
3220                 }
3221                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3222                                          SLI_INTF_FAMILY_SHIFT);
3223                 adapter->generation = BE_GEN3;
3224                 break;
3225         default:
3226                 adapter->generation = 0;
3227         }
3228         return 0;
3229 }
3230
3231 static int lancer_wait_ready(struct be_adapter *adapter)
3232 {
3233 #define SLIPORT_READY_TIMEOUT 500
3234         u32 sliport_status;
3235         int status = 0, i;
3236
3237         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3238                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3239                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3240                         break;
3241
3242                 msleep(20);
3243         }
3244
3245         if (i == SLIPORT_READY_TIMEOUT)
3246                 status = -1;
3247
3248         return status;
3249 }
3250
3251 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3252 {
3253         int status;
3254         u32 sliport_status, err, reset_needed;
3255         status = lancer_wait_ready(adapter);
3256         if (!status) {
3257                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3258                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3259                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3260                 if (err && reset_needed) {
3261                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3262                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3263
3264                         /* check adapter has corrected the error */
3265                         status = lancer_wait_ready(adapter);
3266                         sliport_status = ioread32(adapter->db +
3267                                                         SLIPORT_STATUS_OFFSET);
3268                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3269                                                 SLIPORT_STATUS_RN_MASK);
3270                         if (status || sliport_status)
3271                                 status = -1;
3272                 } else if (err || reset_needed) {
3273                         status = -1;
3274                 }
3275         }
3276         return status;
3277 }
3278
3279 static int __devinit be_probe(struct pci_dev *pdev,
3280                         const struct pci_device_id *pdev_id)
3281 {
3282         int status = 0;
3283         struct be_adapter *adapter;
3284         struct net_device *netdev;
3285
3286         status = pci_enable_device(pdev);
3287         if (status)
3288                 goto do_none;
3289
3290         status = pci_request_regions(pdev, DRV_NAME);
3291         if (status)
3292                 goto disable_dev;
3293         pci_set_master(pdev);
3294
3295         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3296         if (netdev == NULL) {
3297                 status = -ENOMEM;
3298                 goto rel_reg;
3299         }
3300         adapter = netdev_priv(netdev);
3301         adapter->pdev = pdev;
3302         pci_set_drvdata(pdev, adapter);
3303
3304         status = be_dev_family_check(adapter);
3305         if (status)
3306                 goto free_netdev;
3307
3308         adapter->netdev = netdev;
3309         SET_NETDEV_DEV(netdev, &pdev->dev);
3310
3311         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3312         if (!status) {
3313                 netdev->features |= NETIF_F_HIGHDMA;
3314         } else {
3315                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3316                 if (status) {
3317                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3318                         goto free_netdev;
3319                 }
3320         }
3321
3322         status = be_sriov_enable(adapter);
3323         if (status)
3324                 goto free_netdev;
3325
3326         status = be_ctrl_init(adapter);
3327         if (status)
3328                 goto disable_sriov;
3329
3330         if (lancer_chip(adapter)) {
3331                 status = lancer_test_and_set_rdy_state(adapter);
3332                 if (status) {
3333                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3334                         goto ctrl_clean;
3335                 }
3336         }
3337
3338         /* sync up with fw's ready state */
3339         if (be_physfn(adapter)) {
3340                 status = be_cmd_POST(adapter);
3341                 if (status)
3342                         goto ctrl_clean;
3343         }
3344
3345         /* tell fw we're ready to fire cmds */
3346         status = be_cmd_fw_init(adapter);
3347         if (status)
3348                 goto ctrl_clean;
3349
3350         status = be_cmd_reset_function(adapter);
3351         if (status)
3352                 goto ctrl_clean;
3353
3354         status = be_stats_init(adapter);
3355         if (status)
3356                 goto ctrl_clean;
3357
3358         status = be_get_config(adapter);
3359         if (status)
3360                 goto stats_clean;
3361
3362         /* The INTR bit may be set in the card when probed by a kdump kernel
3363          * after a crash.
3364          */
3365         if (!lancer_chip(adapter))
3366                 be_intr_set(adapter, false);
3367
3368         be_msix_enable(adapter);
3369
3370         INIT_DELAYED_WORK(&adapter->work, be_worker);
3371         adapter->rx_fc = adapter->tx_fc = true;
3372
3373         status = be_setup(adapter);
3374         if (status)
3375                 goto msix_disable;
3376
3377         be_netdev_init(netdev);
3378         status = register_netdev(netdev);
3379         if (status != 0)
3380                 goto unsetup;
3381
3382         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3383
3384         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3385         return 0;
3386
3387 unsetup:
3388         be_clear(adapter);
3389 msix_disable:
3390         be_msix_disable(adapter);
3391 stats_clean:
3392         be_stats_cleanup(adapter);
3393 ctrl_clean:
3394         be_ctrl_cleanup(adapter);
3395 disable_sriov:
3396         be_sriov_disable(adapter);
3397 free_netdev:
3398         free_netdev(netdev);
3399         pci_set_drvdata(pdev, NULL);
3400 rel_reg:
3401         pci_release_regions(pdev);
3402 disable_dev:
3403         pci_disable_device(pdev);
3404 do_none:
3405         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3406         return status;
3407 }
3408
3409 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3410 {
3411         struct be_adapter *adapter = pci_get_drvdata(pdev);
3412         struct net_device *netdev =  adapter->netdev;
3413
3414         cancel_delayed_work_sync(&adapter->work);
3415         if (adapter->wol)
3416                 be_setup_wol(adapter, true);
3417
3418         netif_device_detach(netdev);
3419         if (netif_running(netdev)) {
3420                 rtnl_lock();
3421                 be_close(netdev);
3422                 rtnl_unlock();
3423         }
3424         be_clear(adapter);
3425
3426         be_msix_disable(adapter);
3427         pci_save_state(pdev);
3428         pci_disable_device(pdev);
3429         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3430         return 0;
3431 }
3432
3433 static int be_resume(struct pci_dev *pdev)
3434 {
3435         int status = 0;
3436         struct be_adapter *adapter = pci_get_drvdata(pdev);
3437         struct net_device *netdev =  adapter->netdev;
3438
3439         netif_device_detach(netdev);
3440
3441         status = pci_enable_device(pdev);
3442         if (status)
3443                 return status;
3444
3445         pci_set_power_state(pdev, 0);
3446         pci_restore_state(pdev);
3447
3448         be_msix_enable(adapter);
3449         /* tell fw we're ready to fire cmds */
3450         status = be_cmd_fw_init(adapter);
3451         if (status)
3452                 return status;
3453
3454         be_setup(adapter);
3455         if (netif_running(netdev)) {
3456                 rtnl_lock();
3457                 be_open(netdev);
3458                 rtnl_unlock();
3459         }
3460         netif_device_attach(netdev);
3461
3462         if (adapter->wol)
3463                 be_setup_wol(adapter, false);
3464
3465         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3466         return 0;
3467 }
3468
3469 /*
3470  * An FLR will stop BE from DMAing any data.
3471  */
3472 static void be_shutdown(struct pci_dev *pdev)
3473 {
3474         struct be_adapter *adapter = pci_get_drvdata(pdev);
3475
3476         if (!adapter)
3477                 return;
3478
3479         cancel_delayed_work_sync(&adapter->work);
3480
3481         netif_device_detach(adapter->netdev);
3482
3483         if (adapter->wol)
3484                 be_setup_wol(adapter, true);
3485
3486         be_cmd_reset_function(adapter);
3487
3488         pci_disable_device(pdev);
3489 }
3490
3491 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3492                                 pci_channel_state_t state)
3493 {
3494         struct be_adapter *adapter = pci_get_drvdata(pdev);
3495         struct net_device *netdev =  adapter->netdev;
3496
3497         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3498
3499         adapter->eeh_err = true;
3500
3501         netif_device_detach(netdev);
3502
3503         if (netif_running(netdev)) {
3504                 rtnl_lock();
3505                 be_close(netdev);
3506                 rtnl_unlock();
3507         }
3508         be_clear(adapter);
3509
3510         if (state == pci_channel_io_perm_failure)
3511                 return PCI_ERS_RESULT_DISCONNECT;
3512
3513         pci_disable_device(pdev);
3514
3515         return PCI_ERS_RESULT_NEED_RESET;
3516 }
3517
3518 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3519 {
3520         struct be_adapter *adapter = pci_get_drvdata(pdev);
3521         int status;
3522
3523         dev_info(&adapter->pdev->dev, "EEH reset\n");
3524         adapter->eeh_err = false;
3525
3526         status = pci_enable_device(pdev);
3527         if (status)
3528                 return PCI_ERS_RESULT_DISCONNECT;
3529
3530         pci_set_master(pdev);
3531         pci_set_power_state(pdev, 0);
3532         pci_restore_state(pdev);
3533
3534         /* Check if card is ok and fw is ready */
3535         status = be_cmd_POST(adapter);
3536         if (status)
3537                 return PCI_ERS_RESULT_DISCONNECT;
3538
3539         return PCI_ERS_RESULT_RECOVERED;
3540 }
3541
3542 static void be_eeh_resume(struct pci_dev *pdev)
3543 {
3544         int status = 0;
3545         struct be_adapter *adapter = pci_get_drvdata(pdev);
3546         struct net_device *netdev =  adapter->netdev;
3547
3548         dev_info(&adapter->pdev->dev, "EEH resume\n");
3549
3550         pci_save_state(pdev);
3551
3552         /* tell fw we're ready to fire cmds */
3553         status = be_cmd_fw_init(adapter);
3554         if (status)
3555                 goto err;
3556
3557         status = be_setup(adapter);
3558         if (status)
3559                 goto err;
3560
3561         if (netif_running(netdev)) {
3562                 status = be_open(netdev);
3563                 if (status)
3564                         goto err;
3565         }
3566         netif_device_attach(netdev);
3567         return;
3568 err:
3569         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3570 }
3571
3572 static struct pci_error_handlers be_eeh_handlers = {
3573         .error_detected = be_eeh_err_detected,
3574         .slot_reset = be_eeh_reset,
3575         .resume = be_eeh_resume,
3576 };
3577
3578 static struct pci_driver be_driver = {
3579         .name = DRV_NAME,
3580         .id_table = be_dev_ids,
3581         .probe = be_probe,
3582         .remove = be_remove,
3583         .suspend = be_suspend,
3584         .resume = be_resume,
3585         .shutdown = be_shutdown,
3586         .err_handler = &be_eeh_handlers
3587 };
3588
3589 static int __init be_init_module(void)
3590 {
3591         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3592             rx_frag_size != 2048) {
3593                 printk(KERN_WARNING DRV_NAME
3594                         " : Module param rx_frag_size must be 2048/4096/8192."
3595                         " Using 2048\n");
3596                 rx_frag_size = 2048;
3597         }
3598
3599         return pci_register_driver(&be_driver);
3600 }
3601 module_init(be_init_module);
3602
3603 static void __exit be_exit_module(void)
3604 {
3605         pci_unregister_driver(&be_driver);
3606 }
3607 module_exit(be_exit_module);